siibra 1.0a11__py3-none-any.whl → 1.0a19__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (77) hide show
  1. siibra/VERSION +1 -1
  2. siibra/__init__.py +12 -2
  3. siibra/commons.py +4 -3
  4. siibra/configuration/__init__.py +1 -1
  5. siibra/configuration/configuration.py +1 -1
  6. siibra/configuration/factory.py +164 -117
  7. siibra/core/__init__.py +1 -1
  8. siibra/core/assignment.py +1 -1
  9. siibra/core/atlas.py +4 -3
  10. siibra/core/concept.py +18 -9
  11. siibra/core/parcellation.py +9 -3
  12. siibra/core/region.py +35 -65
  13. siibra/core/space.py +3 -1
  14. siibra/core/structure.py +1 -2
  15. siibra/exceptions.py +9 -1
  16. siibra/explorer/__init__.py +1 -1
  17. siibra/explorer/url.py +15 -0
  18. siibra/explorer/util.py +1 -1
  19. siibra/features/__init__.py +1 -1
  20. siibra/features/anchor.py +1 -1
  21. siibra/features/connectivity/__init__.py +1 -1
  22. siibra/features/connectivity/functional_connectivity.py +1 -1
  23. siibra/features/connectivity/regional_connectivity.py +5 -3
  24. siibra/features/connectivity/streamline_counts.py +1 -1
  25. siibra/features/connectivity/streamline_lengths.py +1 -1
  26. siibra/features/connectivity/tracing_connectivity.py +1 -1
  27. siibra/features/dataset/__init__.py +1 -1
  28. siibra/features/dataset/ebrains.py +1 -1
  29. siibra/features/feature.py +42 -15
  30. siibra/features/image/__init__.py +1 -1
  31. siibra/features/image/image.py +18 -13
  32. siibra/features/image/sections.py +1 -1
  33. siibra/features/image/volume_of_interest.py +1 -1
  34. siibra/features/tabular/__init__.py +1 -1
  35. siibra/features/tabular/bigbrain_intensity_profile.py +1 -1
  36. siibra/features/tabular/cell_density_profile.py +5 -3
  37. siibra/features/tabular/cortical_profile.py +5 -3
  38. siibra/features/tabular/gene_expression.py +2 -2
  39. siibra/features/tabular/layerwise_bigbrain_intensities.py +1 -1
  40. siibra/features/tabular/layerwise_cell_density.py +5 -3
  41. siibra/features/tabular/receptor_density_fingerprint.py +5 -3
  42. siibra/features/tabular/receptor_density_profile.py +5 -3
  43. siibra/features/tabular/regional_timeseries_activity.py +5 -3
  44. siibra/features/tabular/tabular.py +5 -3
  45. siibra/livequeries/__init__.py +1 -1
  46. siibra/livequeries/allen.py +9 -6
  47. siibra/livequeries/bigbrain.py +1 -1
  48. siibra/livequeries/ebrains.py +1 -1
  49. siibra/livequeries/query.py +1 -1
  50. siibra/locations/__init__.py +1 -1
  51. siibra/locations/boundingbox.py +51 -17
  52. siibra/locations/location.py +12 -4
  53. siibra/locations/point.py +10 -5
  54. siibra/locations/pointset.py +45 -11
  55. siibra/retrieval/__init__.py +1 -1
  56. siibra/retrieval/cache.py +1 -1
  57. siibra/retrieval/datasets.py +1 -1
  58. siibra/retrieval/exceptions/__init__.py +1 -1
  59. siibra/retrieval/repositories.py +1 -1
  60. siibra/retrieval/requests.py +1 -1
  61. siibra/vocabularies/__init__.py +1 -1
  62. siibra/volumes/__init__.py +1 -1
  63. siibra/volumes/parcellationmap.py +38 -18
  64. siibra/volumes/providers/__init__.py +1 -1
  65. siibra/volumes/providers/freesurfer.py +1 -1
  66. siibra/volumes/providers/gifti.py +1 -1
  67. siibra/volumes/providers/neuroglancer.py +7 -7
  68. siibra/volumes/providers/nifti.py +8 -4
  69. siibra/volumes/providers/provider.py +2 -2
  70. siibra/volumes/sparsemap.py +7 -4
  71. siibra/volumes/volume.py +114 -16
  72. {siibra-1.0a11.dist-info → siibra-1.0a19.dist-info}/METADATA +3 -3
  73. siibra-1.0a19.dist-info/RECORD +84 -0
  74. {siibra-1.0a11.dist-info → siibra-1.0a19.dist-info}/WHEEL +1 -1
  75. siibra-1.0a11.dist-info/RECORD +0 -84
  76. {siibra-1.0a11.dist-info → siibra-1.0a19.dist-info}/LICENSE +0 -0
  77. {siibra-1.0a11.dist-info → siibra-1.0a19.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -18,6 +18,7 @@ from . import location, point, boundingbox as _boundingbox
18
18
 
19
19
  from ..retrieval.requests import HttpRequest
20
20
  from ..commons import logger
21
+ from ..exceptions import SpaceWarpingFailedError
21
22
 
22
23
  from typing import List, Union, Tuple
23
24
  import numbers
@@ -149,7 +150,7 @@ class PointSet(location.Location):
149
150
  if spaceobj == self.space:
150
151
  return self
151
152
  if any(_ not in location.Location.SPACEWARP_IDS for _ in [self.space.id, spaceobj.id]):
152
- raise ValueError(
153
+ raise SpaceWarpingFailedError(
153
154
  f"Cannot convert coordinates between {self.space.id} and {spaceobj.id}"
154
155
  )
155
156
 
@@ -178,6 +179,10 @@ class PointSet(location.Location):
178
179
  ).data
179
180
  tgt_points.extend(list(response["target_points"]))
180
181
 
182
+ # TODO: consider using np.isnan(np.dot(arr, arr)). see https://stackoverflow.com/a/45011547
183
+ if np.any(np.isnan(response['target_points'])):
184
+ raise SpaceWarpingFailedError(f'Warping {str(self)} to {spaceobj.name} resulted in NaN')
185
+
181
186
  return self.__class__(coordinates=tuple(tgt_points), space=spaceobj, labels=self.labels)
182
187
 
183
188
  def transform(self, affine: np.ndarray, space=None):
@@ -243,15 +248,15 @@ class PointSet(location.Location):
243
248
 
244
249
  @property
245
250
  def boundingbox(self):
246
- """Return the bounding box of these points.
247
- TODO revisit the numerical margin of 1e-6, should not be necessary.
248
251
  """
249
- XYZ = self.coordinates
250
- sigma_min = max(self.sigma[i] for i in XYZ.argmin(0))
251
- sigma_max = max(self.sigma[i] for i in XYZ.argmax(0))
252
+ Return the bounding box of these points.
253
+ """
254
+ coords = self.coordinates
255
+ sigma_min = max(self.sigma[i] for i in coords.argmin(0))
256
+ sigma_max = max(self.sigma[i] for i in coords.argmax(0))
252
257
  return _boundingbox.BoundingBox(
253
- point1=XYZ.min(0) - max(sigma_min, 1e-6),
254
- point2=XYZ.max(0) + max(sigma_max, 1e-6),
258
+ point1=coords.min(0),
259
+ point2=coords.max(0),
255
260
  space=self.space,
256
261
  sigma_mm=[sigma_min, sigma_max]
257
262
  )
@@ -276,7 +281,34 @@ class PointSet(location.Location):
276
281
  """Access the list of 3D point as an Nx4 array of homogeneous coordinates."""
277
282
  return np.c_[self.coordinates, np.ones(len(self))]
278
283
 
279
- def find_clusters(self, min_fraction=1 / 200, max_fraction=1 / 8):
284
+ def find_clusters(
285
+ self,
286
+ min_fraction: float = 1 / 200,
287
+ max_fraction: float = 1 / 8
288
+ ) -> List[int]:
289
+ """
290
+ Find clusters using HDBSCAN (https://dl.acm.org/doi/10.1145/2733381)
291
+ implementation of scikit-learn (https://dl.acm.org/doi/10.5555/1953048.2078195).
292
+
293
+ Parameters
294
+ ----------
295
+ min_fraction: min cluster size as a fraction of total points in the PointSet
296
+ max_fraction: max cluster size as a fraction of total points in the PointSet
297
+
298
+ Returns
299
+ -------
300
+ List[int]
301
+ Returns the cluster labels found by skilearn.cluster.HDBSCAN.
302
+
303
+ Note
304
+ ----
305
+ Replaces the labels of the PointSet instance with these labels.
306
+
307
+ Raises
308
+ ------
309
+ RuntimeError
310
+ If a sklearn version without HDBSCAN is installed.
311
+ """
280
312
  if not _HAS_HDBSCAN:
281
313
  raise RuntimeError(
282
314
  f"HDBSCAN is not available with your version {sklearn.__version__} "
@@ -289,7 +321,9 @@ class PointSet(location.Location):
289
321
  max_cluster_size=int(N * max_fraction),
290
322
  )
291
323
  if self.labels is not None:
292
- logger.warn("Existing labels of PointSet will be overwritten with cluster labels.")
324
+ logger.warning(
325
+ "Existing labels of PointSet will be overwritten with cluster labels."
326
+ )
293
327
  self.labels = clustering.fit_predict(points)
294
328
  return self.labels
295
329
 
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
siibra/retrieval/cache.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2023
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -76,6 +76,7 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
76
76
  modality: str = None,
77
77
  publications: list = [],
78
78
  datasets: list = [],
79
+ prerelease: bool = False,
79
80
  ):
80
81
  """
81
82
  Constructs a new parcellation object.
@@ -120,12 +121,11 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
120
121
  description=description,
121
122
  publications=publications,
122
123
  datasets=datasets,
123
- modality=modality
124
+ modality=modality,
125
+ prerelease=prerelease,
124
126
  )
125
127
  self._space_spec = space_spec
126
128
  self._parcellation_spec = parcellation_spec
127
- if 'prerelease' in self.parcellation.name.lower():
128
- self.name = f"[PRERELEASE] {self.name}"
129
129
 
130
130
  # Since the volumes might include 4D arrays, where the actual
131
131
  # volume index points to a z coordinate, we create subvolume
@@ -572,32 +572,52 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
572
572
  )]
573
573
  )
574
574
 
575
- def compute_centroids(self) -> Dict[str, point.Point]:
575
+ def compute_centroids(self, split_components: bool = True) -> Dict[str, pointset.PointSet]:
576
576
  """
577
- Compute a dictionary of the centroids of all regions in this map.
577
+ Compute a dictionary of all regions in this map to their centroids.
578
+ By default, the regional masks will be split to connected components
579
+ and each point in the PointSet corresponds to a region component.
580
+
581
+ Parameters
582
+ ----------
583
+ split_components: bool, default: True
584
+ If True, finds the spatial properties for each connected component
585
+ found by skimage.measure.label.
578
586
 
579
587
  Returns
580
588
  -------
581
589
  Dict[str, point.Point]
582
590
  Region names as keys and computed centroids as items.
583
591
  """
584
- centroids = {}
585
- maparr = None
592
+ centroids = dict()
586
593
  for regionname, indexlist in siibra_tqdm(
587
594
  self._indices.items(), unit="regions", desc="Computing centroids"
588
595
  ):
589
- assert len(indexlist) == 1
590
- index = indexlist[0]
591
- if index.label == 0:
592
- continue
593
- with QUIET:
594
- mapimg = self.fetch(index=index) # returns a mask of the region
595
- maparr = np.asanyarray(mapimg.dataobj)
596
- centroid_vox = np.mean(np.where(maparr == 1), axis=1)
597
596
  assert regionname not in centroids
598
- centroids[regionname] = point.Point(
599
- np.dot(mapimg.affine, np.r_[centroid_vox, 1])[:3], space=self.space
597
+ # get the mask of the region in this map
598
+ with QUIET:
599
+ if len(indexlist) >= 1:
600
+ merged_volume = _volume.merge(
601
+ [
602
+ _volume.from_nifti(
603
+ self.fetch(index=index),
604
+ self.space,
605
+ f"{self.name} - {index}"
606
+ )
607
+ for index in indexlist
608
+ ],
609
+ labels=[1] * len(indexlist)
610
+ )
611
+ mapimg = merged_volume.fetch()
612
+ elif len(indexlist) == 1:
613
+ index = indexlist[0]
614
+ mapimg = self.fetch(index=index) # returns a mask of the region
615
+ props = _volume.ComponentSpatialProperties.compute_from_image(
616
+ img=mapimg,
617
+ space=self.space,
618
+ split_components=split_components,
600
619
  )
620
+ centroids[regionname] = pointset.from_points([c.centroid for c in props])
601
621
  return centroids
602
622
 
603
623
  def get_resampled_template(self, **fetch_kwargs) -> _volume.Volume:
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,7 +26,7 @@ from ...commons import (
26
26
  from ...retrieval import requests, cache
27
27
  from ...locations import boundingbox as _boundingbox
28
28
 
29
- from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset
29
+ from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset, PrecomputedIO
30
30
  from neuroglancer_scripts.http_accessor import HttpAccessor
31
31
  from neuroglancer_scripts.mesh import read_precomputed_mesh, affine_transform_mesh
32
32
  from io import BytesIO
@@ -243,7 +243,7 @@ class NeuroglancerVolume:
243
243
  self._scales_cached = None
244
244
  self._info = None
245
245
  self._transform_nm = None
246
- self._io = None
246
+ self._io: PrecomputedIO = None
247
247
 
248
248
  @property
249
249
  def transform_nm(self):
@@ -266,7 +266,7 @@ class NeuroglancerVolume:
266
266
  self._transform_nm = val
267
267
 
268
268
  @property
269
- def io(self):
269
+ def io(self) -> PrecomputedIO:
270
270
  if self._io is None:
271
271
  accessor = HttpAccessor(self.url)
272
272
  self._io = get_IO_for_existing_dataset(accessor)
@@ -385,7 +385,7 @@ class NeuroglancerScale:
385
385
 
386
386
  color_warning_issued = False
387
387
 
388
- def __init__(self, volume: NeuroglancerProvider, scaleinfo: dict):
388
+ def __init__(self, volume: NeuroglancerVolume, scaleinfo: dict):
389
389
  self.volume = volume
390
390
  self.chunk_sizes = np.array(scaleinfo["chunk_sizes"]).squeeze()
391
391
  self.encoding = scaleinfo["encoding"]
@@ -532,8 +532,8 @@ class NeuroglancerScale:
532
532
  # determine the remaining offset from the "chunk mosaic" to the
533
533
  # exact bounding box requested, to cut off undesired borders
534
534
  data_min = np.array([gx0, gy0, gz0]) * self.chunk_sizes
535
- x0, y0, z0 = (np.array(tuple(bbox_.minpoint)) - data_min).astype("int")
536
- xd, yd, zd = np.array(bbox_.shape).astype("int")
535
+ x0, y0, z0 = (np.array(bbox_.minpoint) - data_min).astype("int")
536
+ xd, yd, zd = np.ceil((np.array(bbox_.maxpoint))).astype(int) - np.floor((np.array(bbox_.minpoint))).astype(int) # TODO: consider 0.5 voxel shift
537
537
  offset = tuple(bbox_.minpoint)
538
538
 
539
539
  # build the nifti image
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -65,7 +65,7 @@ class NiftiProvider(_provider.VolumeProvider, srctype="nii"):
65
65
  def fragments(self):
66
66
  return [k for k in self._img_loaders if k is not None]
67
67
 
68
- def get_boundingbox(self, clip=True, background=0, **fetch_kwargs) -> "_boundingbox.BoundingBox":
68
+ def get_boundingbox(self, clip=True, background=0., **fetch_kwargs) -> "_boundingbox.BoundingBox":
69
69
  """
70
70
  Return the bounding box in physical coordinates of the union of
71
71
  fragments in this nifti volume.
@@ -188,19 +188,23 @@ class NiftiProvider(_provider.VolumeProvider, srctype="nii"):
188
188
  result = loader()
189
189
 
190
190
  if voi is not None:
191
+ zoom_xyz = np.array(result.header.get_zooms()) # voxel dimensions in xyzt_units
191
192
  bb_vox = voi.transform(np.linalg.inv(result.affine))
192
- (x0, y0, z0), (x1, y1, z1) = bb_vox.minpoint, bb_vox.maxpoint
193
+ x0, y0, z0 = np.floor(np.array(bb_vox.minpoint.coordinate) / zoom_xyz).astype(int)
194
+ x1, y1, z1 = np.ceil(np.array(bb_vox.maxpoint.coordinate) / zoom_xyz).astype(int)
193
195
  shift = np.identity(4)
194
196
  shift[:3, -1] = bb_vox.minpoint
195
197
  result = nib.Nifti1Image(
196
198
  dataobj=result.dataobj[x0:x1, y0:y1, z0:z1],
197
199
  affine=np.dot(result.affine, shift),
200
+ dtype=result.header.get_data_dtype(),
198
201
  )
199
202
 
200
203
  if label is not None:
201
204
  result = nib.Nifti1Image(
202
205
  (result.get_fdata() == label).astype('uint8'),
203
- result.affine
206
+ result.affine,
207
+ dtype='uint8'
204
208
  )
205
209
 
206
210
  return result
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -28,7 +28,7 @@ VolumeData = Union[Nifti1Image, Dict]
28
28
 
29
29
  class VolumeProvider(ABC):
30
30
 
31
- _SUBCLASSES = []
31
+ _SUBCLASSES: List[VolumeProvider] = []
32
32
 
33
33
  def __init_subclass__(cls, srctype: str) -> None:
34
34
  cls.srctype = srctype
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2022
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,6 +20,7 @@ from ..commons import MapIndex, logger, connected_components, siibra_tqdm
20
20
  from ..locations import boundingbox
21
21
  from ..retrieval import cache
22
22
  from ..retrieval.repositories import ZipfileConnector, GitlabConnector
23
+ from ..exceptions import InsufficientArgumentException, ExcessiveArgumentException
23
24
 
24
25
  from os import path, rename, makedirs
25
26
  from zipfile import ZipFile, ZIP_DEFLATED
@@ -226,7 +227,8 @@ class SparseMap(parcellationmap.Map):
226
227
  description: str = "",
227
228
  modality: str = None,
228
229
  publications: list = [],
229
- datasets: list = []
230
+ datasets: list = [],
231
+ prerelease: bool = False,
230
232
  ):
231
233
  parcellationmap.Map.__init__(
232
234
  self,
@@ -241,6 +243,7 @@ class SparseMap(parcellationmap.Map):
241
243
  publications=publications,
242
244
  datasets=datasets,
243
245
  volumes=volumes,
246
+ prerelease=prerelease,
244
247
  )
245
248
  self._sparse_index_cached = None
246
249
 
@@ -390,7 +393,7 @@ class SparseMap(parcellationmap.Map):
390
393
  assert length == 1
391
394
  except AssertionError:
392
395
  if length > 1:
393
- raise parcellationmap.ExcessiveArgumentException(
396
+ raise ExcessiveArgumentException(
394
397
  "One and only one of region_or_index, region, index can be defined for fetch"
395
398
  )
396
399
  # user can provide no arguments, which assumes one and only one volume present
@@ -416,7 +419,7 @@ class SparseMap(parcellationmap.Map):
416
419
  assert len(self) == 1
417
420
  volidx = 0
418
421
  except AssertionError:
419
- raise parcellationmap.InsufficientArgumentException(
422
+ raise InsufficientArgumentException(
420
423
  f"{self.__class__.__name__} provides {len(self)} volumes. "
421
424
  "Specify 'region' or 'index' for fetch() to identify one."
422
425
  )
siibra/volumes/volume.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2023
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -20,9 +20,10 @@ from .. import logger
20
20
  from ..retrieval import requests
21
21
  from ..core import space as _space, structure
22
22
  from ..locations import location, point, pointset, boundingbox
23
- from ..commons import resample_img_to_img, siibra_tqdm
23
+ from ..commons import resample_img_to_img, siibra_tqdm, affine_scaling, connected_components
24
24
  from ..exceptions import NoMapAvailableError, SpaceWarpingFailedError
25
25
 
26
+ from dataclasses import dataclass
26
27
  from nibabel import Nifti1Image
27
28
  import numpy as np
28
29
  from typing import List, Dict, Union, Set, TYPE_CHECKING
@@ -36,6 +37,61 @@ if TYPE_CHECKING:
36
37
  TypeDataset = EbrainsDataset
37
38
 
38
39
 
40
+ @dataclass
41
+ class ComponentSpatialProperties:
42
+ """
43
+ Centroid and nonzero volume of an image.
44
+ """
45
+ centroid: point.Point
46
+ volume: int
47
+
48
+ @staticmethod
49
+ def compute_from_image(
50
+ img: Nifti1Image,
51
+ space: Union[str, "_space.Space"],
52
+ split_components: bool = True
53
+
54
+ ) -> List["ComponentSpatialProperties"]:
55
+ """
56
+ Find the center of an image in its (non-zero) voxel space and and its
57
+ volume.
58
+
59
+ Parameters
60
+ ----------
61
+ img: Nifti1Image
62
+ space: str, Space
63
+ split_components: bool, default: True
64
+ If True, finds the spatial properties for each connected component
65
+ found by skimage.measure.label.
66
+ """
67
+ scale = affine_scaling(img.affine)
68
+ if split_components:
69
+ iter_components = lambda img: connected_components(
70
+ np.asanyarray(img.dataobj),
71
+ connectivity=None
72
+ )
73
+ else:
74
+ iter_components = lambda img: [(0, np.asanyarray(img.dataobj))]
75
+
76
+ spatial_props: List[ComponentSpatialProperties] = []
77
+ for _, component in iter_components(img):
78
+ nonzero: np.ndarray = np.c_[np.nonzero(component)]
79
+ spatial_props.append(
80
+ ComponentSpatialProperties(
81
+ centroid=point.Point(
82
+ np.dot(img.affine, np.r_[nonzero.mean(0), 1])[:3],
83
+ space=space
84
+ ),
85
+ volume=nonzero.shape[0] * scale,
86
+ )
87
+ )
88
+
89
+ # sort by volume
90
+ spatial_props.sort(key=lambda cmp: cmp.volume, reverse=True)
91
+
92
+ return spatial_props
93
+
94
+
39
95
  class Volume(location.Location):
40
96
  """
41
97
  A volume is a specific mesh or 3D array,
@@ -77,12 +133,14 @@ class Volume(location.Location):
77
133
  name: str = "",
78
134
  variant: str = None,
79
135
  datasets: List['TypeDataset'] = [],
136
+ bbox: "boundingbox.BoundingBox" = None
80
137
  ):
81
138
  self._name = name
82
139
  self._space_spec = space_spec
83
140
  self.variant = variant
84
141
  self._providers: Dict[str, _provider.VolumeProvider] = {}
85
142
  self.datasets = datasets
143
+ self._boundingbox = bbox
86
144
  for provider in providers:
87
145
  srctype = provider.srctype
88
146
  assert srctype not in self._providers
@@ -142,6 +200,9 @@ class Volume(location.Location):
142
200
  RuntimeError
143
201
  If the volume provider does not have a bounding box calculator.
144
202
  """
203
+ if self._boundingbox is not None and len(fetch_kwargs) == 0:
204
+ return self._boundingbox
205
+
145
206
  fmt = fetch_kwargs.get("format")
146
207
  if (fmt is not None) and (fmt not in self.formats):
147
208
  raise ValueError(
@@ -253,9 +314,8 @@ class Volume(location.Location):
253
314
  raise NotImplementedError("Filtering of points by pure mesh volumes not yet implemented.")
254
315
 
255
316
  # make sure the points are in the same physical space as this volume
256
- warped = (
257
- pointset.from_points([points]) if isinstance(points, point.Point) else points
258
- ).warp(self.space)
317
+ as_pointset = pointset.from_points([points]) if isinstance(points, point.Point) else points
318
+ warped = as_pointset.warp(self.space)
259
319
  assert warped is not None, SpaceWarpingFailedError
260
320
 
261
321
  # get the voxel array of this volume
@@ -340,6 +400,8 @@ class Volume(location.Location):
340
400
  elif isinstance(other, boundingbox.BoundingBox):
341
401
  return self.get_boundingbox(clip=True, background=0.0, **fetch_kwargs).intersection(other)
342
402
  elif isinstance(other, Volume):
403
+ if self.space != other.space:
404
+ raise NotImplementedError("Cannot intersect volumes from different spaces. Try comparing their boudning boxes.")
343
405
  format = fetch_kwargs.pop('format', 'image')
344
406
  v1 = self.fetch(format=format, **fetch_kwargs)
345
407
  v2 = other.fetch(format=format, **fetch_kwargs)
@@ -416,6 +478,20 @@ class Volume(location.Location):
416
478
  f"volume are: {self.formats}"
417
479
  )
418
480
 
481
+ # ensure the voi is inside the template
482
+ voi = kwargs.get("voi", None)
483
+ if voi is not None and voi.space is not None:
484
+ assert isinstance(voi, boundingbox.BoundingBox)
485
+ tmplt_bbox = voi.space.get_template().get_boundingbox(clip=False)
486
+ intersection_bbox = voi.intersection(tmplt_bbox)
487
+ if intersection_bbox is None:
488
+ raise RuntimeError(f"voi provided ({voi}) lies out side the voxel space of the {voi.space.name} template.")
489
+ if intersection_bbox != voi:
490
+ logger.info(
491
+ f"Since provided voi lies outside the template ({voi.space}) it is clipped as: {intersection_bbox}"
492
+ )
493
+ kwargs["voi"] = intersection_bbox
494
+
419
495
  result = None
420
496
  # try each possible format
421
497
  for fmt in possible_formats:
@@ -460,19 +536,37 @@ class Volume(location.Location):
460
536
 
461
537
  return self._FETCH_CACHE[fetch_hash]
462
538
 
463
- def fetch_connected_components(self, **kwargs):
539
+ def fetch_connected_components(self, **fetch_kwargs):
464
540
  """
465
- Provide an iterator over masks of connected components in the volume
541
+ Provide an generator over masks of connected components in the volume
466
542
  """
467
- img = self.fetch(**kwargs)
468
- from skimage import measure
469
- imgdata = np.asanyarray(img.dataobj).squeeze()
470
- components = measure.label(imgdata > 0)
471
- component_labels = np.unique(components)
472
- assert component_labels[0] == 0
473
- return (
474
- (label, Nifti1Image((components == label).astype('uint8'), img.affine))
475
- for label in component_labels[1:]
543
+ img = self.fetch(**fetch_kwargs)
544
+ assert isinstance(img, Nifti1Image), NotImplementedError(
545
+ f"Connected components for type {type(img)} is not yet implemeneted."
546
+ )
547
+ for label, component in connected_components(np.asanyarray(img.dataobj)):
548
+ yield (
549
+ label,
550
+ Nifti1Image(component, img.affine)
551
+ )
552
+
553
+ def compute_spatial_props(self, split_components: bool = True, **fetch_kwargs) -> List[ComponentSpatialProperties]:
554
+ """
555
+ Find the center of this volume in its (non-zero) voxel space and and its
556
+ volume.
557
+
558
+ Parameters
559
+ ----------
560
+ split_components: bool, default: True
561
+ If True, finds the spatial properties for each connected component
562
+ found by skimage.measure.label.
563
+ """
564
+ assert self.provides_image, NotImplementedError("Spatial properties can currently on be calculated for images.")
565
+ img = self.fetch(format="image", **fetch_kwargs)
566
+ return ComponentSpatialProperties.compute_from_image(
567
+ img=img,
568
+ space=self.space,
569
+ split_components=split_components
476
570
  )
477
571
 
478
572
  def draw_samples(self, N: int, sample_size: int = 100, e: float = 1, sigma_mm=None, invert=False, **kwargs):
@@ -670,6 +764,10 @@ def merge(volumes: List[Volume], labels: List[int] = [], **fetch_kwargs) -> Volu
670
764
  -------
671
765
  Volume
672
766
  """
767
+ if len(volumes) == 1:
768
+ logger.debug("Only one volume supplied returning as is (kwargs are ignored).")
769
+ return volumes[0]
770
+
673
771
  assert len(volumes) > 1, "Need to supply at least two volumes to merge."
674
772
  if labels:
675
773
  assert len(volumes) == len(labels), "Need to supply as many labels as volumes."
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.1
2
2
  Name: siibra
3
- Version: 1.0a11
3
+ Version: 1.0a19
4
4
  Summary: siibra - Software interfaces for interacting with brain atlases
5
5
  Home-page: https://github.com/FZJ-INM1-BDA/siibra-python
6
6
  Author: Big Data Analytics Group, Forschungszentrum Juelich, Institute of Neuroscience and Medicine (INM-1)
@@ -25,8 +25,8 @@ Requires-Dist: requests
25
25
  Requires-Dist: neuroglancer-scripts
26
26
  Requires-Dist: nilearn
27
27
  Requires-Dist: filelock
28
- Requires-Dist: ebrains-drive >=0.6.0
29
- Requires-Dist: typing-extensions ; python_version < "3.8"
28
+ Requires-Dist: ebrains-drive>=0.6.0
29
+ Requires-Dist: typing-extensions; python_version < "3.8"
30
30
 
31
31
  |License| |PyPI version| |doi| |Python versions| |Documentation Status|
32
32