siibra 1.0a1__1-py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (84) hide show
  1. siibra/VERSION +1 -0
  2. siibra/__init__.py +164 -0
  3. siibra/commons.py +823 -0
  4. siibra/configuration/__init__.py +17 -0
  5. siibra/configuration/configuration.py +189 -0
  6. siibra/configuration/factory.py +589 -0
  7. siibra/core/__init__.py +16 -0
  8. siibra/core/assignment.py +110 -0
  9. siibra/core/atlas.py +239 -0
  10. siibra/core/concept.py +308 -0
  11. siibra/core/parcellation.py +387 -0
  12. siibra/core/region.py +1223 -0
  13. siibra/core/space.py +131 -0
  14. siibra/core/structure.py +111 -0
  15. siibra/exceptions.py +63 -0
  16. siibra/experimental/__init__.py +19 -0
  17. siibra/experimental/contour.py +61 -0
  18. siibra/experimental/cortical_profile_sampler.py +57 -0
  19. siibra/experimental/patch.py +98 -0
  20. siibra/experimental/plane3d.py +256 -0
  21. siibra/explorer/__init__.py +17 -0
  22. siibra/explorer/url.py +222 -0
  23. siibra/explorer/util.py +87 -0
  24. siibra/features/__init__.py +117 -0
  25. siibra/features/anchor.py +224 -0
  26. siibra/features/connectivity/__init__.py +33 -0
  27. siibra/features/connectivity/functional_connectivity.py +57 -0
  28. siibra/features/connectivity/regional_connectivity.py +494 -0
  29. siibra/features/connectivity/streamline_counts.py +27 -0
  30. siibra/features/connectivity/streamline_lengths.py +27 -0
  31. siibra/features/connectivity/tracing_connectivity.py +30 -0
  32. siibra/features/dataset/__init__.py +17 -0
  33. siibra/features/dataset/ebrains.py +90 -0
  34. siibra/features/feature.py +970 -0
  35. siibra/features/image/__init__.py +27 -0
  36. siibra/features/image/image.py +115 -0
  37. siibra/features/image/sections.py +26 -0
  38. siibra/features/image/volume_of_interest.py +88 -0
  39. siibra/features/tabular/__init__.py +24 -0
  40. siibra/features/tabular/bigbrain_intensity_profile.py +77 -0
  41. siibra/features/tabular/cell_density_profile.py +298 -0
  42. siibra/features/tabular/cortical_profile.py +322 -0
  43. siibra/features/tabular/gene_expression.py +257 -0
  44. siibra/features/tabular/layerwise_bigbrain_intensities.py +62 -0
  45. siibra/features/tabular/layerwise_cell_density.py +95 -0
  46. siibra/features/tabular/receptor_density_fingerprint.py +192 -0
  47. siibra/features/tabular/receptor_density_profile.py +110 -0
  48. siibra/features/tabular/regional_timeseries_activity.py +294 -0
  49. siibra/features/tabular/tabular.py +139 -0
  50. siibra/livequeries/__init__.py +19 -0
  51. siibra/livequeries/allen.py +352 -0
  52. siibra/livequeries/bigbrain.py +197 -0
  53. siibra/livequeries/ebrains.py +145 -0
  54. siibra/livequeries/query.py +49 -0
  55. siibra/locations/__init__.py +91 -0
  56. siibra/locations/boundingbox.py +454 -0
  57. siibra/locations/location.py +115 -0
  58. siibra/locations/point.py +344 -0
  59. siibra/locations/pointcloud.py +349 -0
  60. siibra/retrieval/__init__.py +27 -0
  61. siibra/retrieval/cache.py +233 -0
  62. siibra/retrieval/datasets.py +389 -0
  63. siibra/retrieval/exceptions/__init__.py +27 -0
  64. siibra/retrieval/repositories.py +769 -0
  65. siibra/retrieval/requests.py +659 -0
  66. siibra/vocabularies/__init__.py +45 -0
  67. siibra/vocabularies/gene_names.json +29176 -0
  68. siibra/vocabularies/receptor_symbols.json +210 -0
  69. siibra/vocabularies/region_aliases.json +460 -0
  70. siibra/volumes/__init__.py +23 -0
  71. siibra/volumes/parcellationmap.py +1279 -0
  72. siibra/volumes/providers/__init__.py +20 -0
  73. siibra/volumes/providers/freesurfer.py +113 -0
  74. siibra/volumes/providers/gifti.py +165 -0
  75. siibra/volumes/providers/neuroglancer.py +736 -0
  76. siibra/volumes/providers/nifti.py +266 -0
  77. siibra/volumes/providers/provider.py +107 -0
  78. siibra/volumes/sparsemap.py +468 -0
  79. siibra/volumes/volume.py +892 -0
  80. siibra-1.0.0a1.dist-info/LICENSE +201 -0
  81. siibra-1.0.0a1.dist-info/METADATA +160 -0
  82. siibra-1.0.0a1.dist-info/RECORD +84 -0
  83. siibra-1.0.0a1.dist-info/WHEEL +5 -0
  84. siibra-1.0.0a1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,892 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """A specific mesh or 3D array."""
16
+
17
+ from .providers import provider as _provider
18
+
19
+ from .. import logger
20
+ from ..retrieval import requests
21
+ from ..core import space as _space, structure
22
+ from ..locations import point, pointcloud, boundingbox
23
+ from ..commons import resample_img_to_img, siibra_tqdm, affine_scaling, connected_components
24
+ from ..exceptions import NoMapAvailableError, SpaceWarpingFailedError, EmptyPointCloudError
25
+
26
+ from dataclasses import dataclass
27
+ from nibabel import Nifti1Image
28
+ import numpy as np
29
+ from typing import List, Dict, Union, Set, TYPE_CHECKING
30
+ from time import sleep
31
+ import json
32
+ from skimage import feature as skimage_feature, filters
33
+ from functools import lru_cache
34
+
35
+ if TYPE_CHECKING:
36
+ from ..retrieval.datasets import EbrainsDataset
37
+ TypeDataset = EbrainsDataset
38
+
39
+
40
+ @dataclass
41
+ class ComponentSpatialProperties:
42
+ """
43
+ Centroid and nonzero volume of an image.
44
+ """
45
+ centroid: point.Point
46
+ volume: int
47
+
48
+ @staticmethod
49
+ def compute_from_image(
50
+ img: Nifti1Image,
51
+ space: Union[str, "_space.Space"],
52
+ split_components: bool = True
53
+
54
+ ) -> List["ComponentSpatialProperties"]:
55
+ """
56
+ Find the center of an image in its (non-zero) voxel space and and its
57
+ volume.
58
+
59
+ Parameters
60
+ ----------
61
+ img: Nifti1Image
62
+ space: str, Space
63
+ split_components: bool, default: True
64
+ If True, finds the spatial properties for each connected component
65
+ found by skimage.measure.label.
66
+ """
67
+ scale = affine_scaling(img.affine)
68
+ if split_components:
69
+ iter_components = lambda img: connected_components(
70
+ np.asanyarray(img.dataobj),
71
+ connectivity=None
72
+ )
73
+ else:
74
+ iter_components = lambda img: [(0, np.asanyarray(img.dataobj))]
75
+
76
+ spatial_props: List[ComponentSpatialProperties] = []
77
+ for _, component in iter_components(img):
78
+ nonzero: np.ndarray = np.c_[np.nonzero(component)]
79
+ spatial_props.append(
80
+ ComponentSpatialProperties(
81
+ centroid=point.Point(
82
+ np.dot(img.affine, np.r_[nonzero.mean(0), 1])[:3],
83
+ space=space
84
+ ),
85
+ volume=nonzero.shape[0] * scale,
86
+ )
87
+ )
88
+
89
+ # sort by volume
90
+ spatial_props.sort(key=lambda cmp: cmp.volume, reverse=True)
91
+
92
+ return spatial_props
93
+
94
+
95
+ class Volume(structure.BrainStructure):
96
+ """
97
+ A volume is a specific mesh or 3D array,
98
+ which can be accessible via multiple providers in different formats.
99
+ """
100
+
101
+ IMAGE_FORMATS = [
102
+ "nii",
103
+ "zip/nii",
104
+ "neuroglancer/precomputed"
105
+ ]
106
+
107
+ MESH_FORMATS = [
108
+ "neuroglancer/precompmesh",
109
+ "neuroglancer/precompmesh/surface",
110
+ "gii-mesh",
111
+ "gii-label",
112
+ "freesurfer-annot",
113
+ "zip/freesurfer-annot",
114
+ ]
115
+
116
+ SUPPORTED_FORMATS = IMAGE_FORMATS + MESH_FORMATS
117
+
118
+ _FORMAT_LOOKUP = {
119
+ "image": IMAGE_FORMATS,
120
+ "mesh": MESH_FORMATS,
121
+ "surface": MESH_FORMATS,
122
+ "nifti": ["nii", "zip/nii"],
123
+ "nii": ["nii", "zip/nii"]
124
+ }
125
+
126
+ _FETCH_CACHE = {} # we keep a cache of the most recently fetched volumes
127
+ _FETCH_CACHE_MAX_ENTRIES = 3
128
+
129
+ def __init__(
130
+ self,
131
+ space_spec: dict,
132
+ providers: List['_provider.VolumeProvider'],
133
+ name: str = "",
134
+ variant: str = None,
135
+ datasets: List['TypeDataset'] = [],
136
+ bbox: "boundingbox.BoundingBox" = None
137
+ ):
138
+ self._name = name
139
+ self._space_spec = space_spec
140
+ self.variant = variant
141
+ self._providers: Dict[str, _provider.VolumeProvider] = {}
142
+ self.datasets = datasets
143
+ self._boundingbox = bbox
144
+ for provider in providers:
145
+ srctype = provider.srctype
146
+ assert srctype not in self._providers
147
+ self._providers[srctype] = provider
148
+ if len(self._providers) == 0:
149
+ logger.debug(f"No provider for volume {name}")
150
+
151
+ def __hash__(self):
152
+ return super().__hash__()
153
+
154
+ def __eq__(self, other: 'Volume'):
155
+ return (
156
+ isinstance(other, Volume)
157
+ and self.name == other.name
158
+ and self.space == other.space
159
+ and self.variant == other.variant
160
+ and self._providers == other._providers
161
+ )
162
+
163
+ @property
164
+ def name(self):
165
+ """Allows derived classes to implement a lazy name specification."""
166
+ return self._name
167
+
168
+ @property
169
+ def providers(self):
170
+ def concat(url: Union[str, Dict[str, str]], concat: str):
171
+ if isinstance(url, str):
172
+ return url + concat
173
+ return {key: url[key] + concat for key in url}
174
+ return {
175
+ srctype: concat(prov._url, f" {prov.label}" if hasattr(prov, "label") else "")
176
+ for srctype, prov in self._providers.items()
177
+ }
178
+
179
+ @lru_cache(2)
180
+ def get_boundingbox(self, clip: bool = True, background: float = 0.0, **fetch_kwargs) -> "boundingbox.BoundingBox":
181
+ """
182
+ Obtain the bounding box in physical coordinates of this volume.
183
+
184
+ Parameters
185
+ ----------
186
+ clip : bool, default: True
187
+ Whether to clip the background of the volume.
188
+ background : float, default: 0.0
189
+ The background value to clip.
190
+ Note
191
+ ----
192
+ To use it, clip must be True.
193
+ fetch_kwargs:
194
+ key word arguments that are used for fetchin volumes,
195
+ such as voi or resolution_mm. Currently, only possible for
196
+ Neuroglancer volumes except for `format`.
197
+
198
+ Raises
199
+ ------
200
+ RuntimeError
201
+ If the volume provider does not have a bounding box calculator.
202
+ """
203
+ if self._boundingbox is not None and len(fetch_kwargs) == 0:
204
+ return self._boundingbox
205
+
206
+ if not self.provides_image:
207
+ raise NotImplementedError("Bounding box calculation of meshes is not implemented yet.")
208
+
209
+ if clip: # clippin requires fetching the image
210
+ img = self.fetch(**fetch_kwargs)
211
+ assert isinstance(img, Nifti1Image)
212
+ return boundingbox.from_array(
213
+ array=np.asanyarray(img.dataobj),
214
+ background=background,
215
+ ).transform(img.affine, space=self.space)
216
+
217
+ # if clipping is not required, providers migth have methods of creating
218
+ # bounding boxes without fetching the image
219
+ fmt = fetch_kwargs.get("format")
220
+ if (fmt is not None) and (fmt not in self.formats):
221
+ raise ValueError(
222
+ f"Requested format {fmt} is not available as provider of "
223
+ "this volume. See `volume.formats` for possible options."
224
+ )
225
+ providers = [self._providers[fmt]] if fmt else self._providers.values()
226
+ for provider in providers:
227
+ try:
228
+ assert clip is False
229
+ bbox = provider.get_boundingbox(
230
+ background=background, **fetch_kwargs
231
+ )
232
+ if bbox.space is None: # provider do not know the space!
233
+ bbox._space_cached = self.space
234
+ bbox.minpoint._space_cached = self.space
235
+ bbox.maxpoint._space_cached = self.space
236
+ except NotImplementedError:
237
+ continue
238
+ return bbox
239
+ raise RuntimeError(f"No bounding box specified by any volume provider of {str(self)}")
240
+
241
+ @property
242
+ def formats(self) -> Set[str]:
243
+ return {fmt for fmt in self._providers}
244
+
245
+ @property
246
+ def provides_mesh(self):
247
+ return any(f in self.MESH_FORMATS for f in self.formats)
248
+
249
+ @property
250
+ def provides_image(self):
251
+ return any(f in self.IMAGE_FORMATS for f in self.formats)
252
+
253
+ @property
254
+ def fragments(self) -> Dict[str, List[str]]:
255
+ result = {}
256
+ for srctype, p in self._providers.items():
257
+ t = 'mesh' if srctype in self.MESH_FORMATS else 'image'
258
+ for fragment_name in p.fragments:
259
+ if t in result:
260
+ result[t].append(fragment_name)
261
+ else:
262
+ result[t] = [fragment_name]
263
+ return result
264
+
265
+ @property
266
+ def space(self):
267
+ for key in ["@id", "name"]:
268
+ if key in self._space_spec:
269
+ return _space.Space.get_instance(self._space_spec[key])
270
+ return _space.Space(None, "Unspecified space", species=_space.Species.UNSPECIFIED_SPECIES)
271
+
272
+ @property
273
+ def species(self):
274
+ s = self.space
275
+ return None if s is None else s.species
276
+
277
+ def __str__(self):
278
+ return (
279
+ f"{self.__class__.__name__} {f'{self.name}' if self.name else ''}"
280
+ f"{f' in space {self.space.name}' if self.space else ''}"
281
+ )
282
+
283
+ def __repr__(self):
284
+ return (
285
+ f"<{self.__class__.__name__}(space_spec={self._space_spec}, "
286
+ f"name='{self.name}', providers={self._providers})>"
287
+ )
288
+
289
+ def evaluate_points(
290
+ self,
291
+ points: Union['point.Point', 'pointcloud.PointCloud'],
292
+ outside_value: Union[int, float] = 0,
293
+ **fetch_kwargs
294
+ ) -> np.ndarray:
295
+ """
296
+ Evaluate the image at the positions of the given points.
297
+
298
+ Note
299
+ ----
300
+ Uses nearest neighbor interpolation. Other interpolation schemes are not
301
+ yet implemented.
302
+
303
+ Note
304
+ ----
305
+ If points are not on the same space as the map, they will be warped to
306
+ the space of the volume.
307
+
308
+ Parameters
309
+ ----------
310
+ points: PointCloud
311
+ outside_value: int, float. Default: 0
312
+ fetch_kwargs: dict
313
+ Any additional arguments are passed to the `fetch()` call for
314
+ retrieving the image data.
315
+
316
+ Returns
317
+ -------
318
+ values: numpy.ndarray
319
+ The values of the volume at the voxels points correspond to.
320
+
321
+ Raises
322
+ ------
323
+ SpaceWarpingFailedError
324
+ If warping of the points fails.
325
+ """
326
+ if not self.provides_image:
327
+ raise NotImplementedError("Filtering of points by pure mesh volumes not yet implemented.")
328
+
329
+ # make sure the points are in the same physical space as this volume
330
+ as_pointcloud = pointcloud.from_points([points]) if isinstance(points, point.Point) else points
331
+ warped = as_pointcloud.warp(self.space)
332
+ assert warped is not None, SpaceWarpingFailedError
333
+
334
+ # get the voxel array of this volume
335
+ img = self.fetch(format='image', **fetch_kwargs)
336
+ arr = np.asanyarray(img.dataobj)
337
+
338
+ # transform the points to the voxel space of the volume for extracting values
339
+ phys2vox = np.linalg.inv(img.affine)
340
+ voxels = warped.transform(phys2vox, space=None)
341
+ XYZ = voxels.coordinates.astype('int')
342
+
343
+ # temporarily set all outside voxels to (0,0,0) so that the index access doesn't fail
344
+ inside = np.all((XYZ < arr.shape) & (XYZ > 0), axis=1)
345
+ XYZ[~inside, :] = 0
346
+
347
+ # read out the values
348
+ X, Y, Z = XYZ.T
349
+ values = arr[X, Y, Z]
350
+
351
+ # fix the outside voxel values, which might have an inconsistent value now
352
+ values[~inside] = outside_value
353
+
354
+ return values
355
+
356
+ def _points_inside(
357
+ self,
358
+ points: Union['point.Point', 'pointcloud.PointCloud'],
359
+ keep_labels: bool = True,
360
+ outside_value: Union[int, float] = 0,
361
+ **fetch_kwargs
362
+ ) -> 'pointcloud.PointCloud':
363
+ """
364
+ Reduce a pointcloud to the points which fall inside nonzero pixels of
365
+ this map.
366
+
367
+
368
+ Paramaters
369
+ ----------
370
+ points: PointCloud
371
+ keep_labels: bool
372
+ If False, the returned PointCloud will be labeled with their indices
373
+ in the original PointCloud.
374
+ fetch_kwargs: dict
375
+ Any additional arguments are passed to the `fetch()` call for
376
+ retrieving the image data.
377
+
378
+ Returns
379
+ -------
380
+ PointCloud
381
+ A new PointCloud containing only the points inside the volume.
382
+ Labels reflect the indices of the original points if `keep_labels`
383
+ is False.
384
+ """
385
+ ptset = pointcloud.from_points([points]) if isinstance(points, point.Point) else points
386
+ values = self.evaluate_points(ptset, outside_value=outside_value, **fetch_kwargs)
387
+ inside = list(np.where(values != outside_value)[0])
388
+ return pointcloud.from_points(
389
+ [ptset[i] for i in inside],
390
+ newlabels=None if keep_labels else inside
391
+ )
392
+
393
+ def intersection(self, other: structure.BrainStructure, **fetch_kwargs) -> structure.BrainStructure:
394
+ """
395
+ Compute the intersection of a location with this volume. This will
396
+ fetch actual image data. Any additional arguments are passed to fetch.
397
+ """
398
+ if isinstance(other, (pointcloud.PointCloud, point.Point)):
399
+ try:
400
+ points_inside = self._points_inside(other, keep_labels=False, **fetch_kwargs)
401
+ except EmptyPointCloudError:
402
+ return None # BrainStructure.intersects checks for not None
403
+ if isinstance(other, point.Point): # preserve the type
404
+ return points_inside[0]
405
+ return points_inside
406
+ elif isinstance(other, boundingbox.BoundingBox):
407
+ return self.get_boundingbox(clip=True, background=0.0, **fetch_kwargs).intersection(other)
408
+ elif isinstance(other, Volume):
409
+ if self.space != other.space:
410
+ raise NotImplementedError("Cannot intersect volumes from different spaces. Try comparing their boudning boxes.")
411
+ format = fetch_kwargs.pop('format', 'image')
412
+ v1 = self.fetch(format=format, **fetch_kwargs)
413
+ v2 = other.fetch(format=format, **fetch_kwargs)
414
+ arr1 = np.asanyarray(v1.dataobj)
415
+ arr2 = np.asanyarray(resample_img_to_img(v2, v1).dataobj)
416
+ pointwise_min = np.minimum(arr1, arr2)
417
+ if np.any(pointwise_min):
418
+ return from_array(
419
+ data=pointwise_min,
420
+ affine=v1.affine,
421
+ space=self.space,
422
+ name=f"Intersection between {self} and {other} computed as their pointwise minimum"
423
+ )
424
+ else:
425
+ return None
426
+ else: # other BrainStructures should have intersection with locations implemented.
427
+ try:
428
+ return other.intersection(self)
429
+ except NoMapAvailableError:
430
+ return None
431
+
432
+ def fetch(
433
+ self,
434
+ format: str = None,
435
+ **kwargs
436
+ ):
437
+ """
438
+ Fetch a volumetric or surface representation from one of the providers.
439
+
440
+ Parameters
441
+ ----------
442
+ format: str, default=None
443
+ Requested format. If `None`, the first supported format matching in
444
+ `self.formats` is tried, starting with volumetric formats.
445
+ It can be explicitly specified as:
446
+ - 'surface' or 'mesh' to fetch a surface format
447
+ - 'volumetric' or 'voxel' to fetch a volumetric format
448
+ - supported format types, see SUPPORTED_FORMATS. This includes
449
+ 'nii', 'zip/nii', 'neuroglancer/precomputed', 'gii-mesh',
450
+ 'neuroglancer/precompmesh', 'gii-label'
451
+
452
+ Returns
453
+ -------
454
+ An image (Nifti1Image) or mesh (Dict['verts': ndarray, 'faces': ndarray, 'labels': ndarray])
455
+ """
456
+ kwargs_serialized = json.dumps({k: hash(v) for k, v in kwargs.items()}, sort_keys=True)
457
+
458
+ if "resolution_mm" in kwargs and format is None:
459
+ if 'neuroglancer/precomputed' not in self.formats:
460
+ raise ValueError("'resolution_mm' is only available for volumes with 'neuroglancer/precomputed' formats.")
461
+ format = 'neuroglancer/precomputed'
462
+
463
+ if format is None:
464
+ # preseve fetch order in SUPPORTED_FORMATS
465
+ possible_formats = [f for f in self.SUPPORTED_FORMATS if f in self.formats]
466
+ elif format in self._FORMAT_LOOKUP: # allow use of aliases
467
+ possible_formats = [f for f in self._FORMAT_LOOKUP[format] if f in self.formats]
468
+ elif format in self.SUPPORTED_FORMATS:
469
+ possible_formats = [format] if format in self.formats else []
470
+ else:
471
+ possible_formats = []
472
+ if len(possible_formats) == 0:
473
+ raise ValueError(
474
+ f"Invalid format requested: {format}. Possible values for this "
475
+ f"volume are: {self.formats}"
476
+ )
477
+
478
+ # ensure the voi is inside the template
479
+ voi = kwargs.get("voi", None)
480
+ if voi is not None and voi.space is not None:
481
+ assert isinstance(voi, boundingbox.BoundingBox)
482
+ tmplt_bbox = voi.space.get_template().get_boundingbox(clip=False)
483
+ intersection_bbox = voi.intersection(tmplt_bbox)
484
+ if intersection_bbox is None:
485
+ raise RuntimeError(f"voi provided ({voi}) lies out side the voxel space of the {voi.space.name} template.")
486
+ if intersection_bbox != voi:
487
+ logger.info(
488
+ f"Since provided voi lies outside the template ({voi.space}) it is clipped as: {intersection_bbox}"
489
+ )
490
+ kwargs["voi"] = intersection_bbox
491
+
492
+ result = None
493
+ # try each possible format
494
+ for fmt in possible_formats:
495
+ fetch_hash = hash((hash(self), hash(fmt), hash(kwargs_serialized)))
496
+ # cached
497
+ if fetch_hash in self._FETCH_CACHE:
498
+ break
499
+ # Repeat in case of too many requests only
500
+ fwd_args = {k: v for k, v in kwargs.items() if k != "format"}
501
+ for try_count in range(6):
502
+ try:
503
+ if fmt in ["gii-label", "freesurfer-annot", "zip/freesurfer-annot"]:
504
+ tpl = self.space.get_template(variant=kwargs.get('variant'))
505
+ mesh = tpl.fetch(**kwargs)
506
+ labels = self._providers[fmt].fetch(**fwd_args)
507
+ result = dict(**mesh, **labels)
508
+ else:
509
+ result = self._providers[fmt].fetch(**fwd_args)
510
+ except requests.SiibraHttpRequestError as e:
511
+ if e.status_code == 429: # too many requests
512
+ sleep(0.1)
513
+ logger.error(f"Cannot access {self._providers[fmt]}", exc_info=(try_count == 5))
514
+ continue
515
+ else:
516
+ break
517
+ except Exception as e:
518
+ logger.info(e, exc_info=1)
519
+ break
520
+ else:
521
+ break
522
+ # udpate the cache if fetch is successful
523
+ if result is not None:
524
+ self._FETCH_CACHE[fetch_hash] = result
525
+ while len(self._FETCH_CACHE) >= self._FETCH_CACHE_MAX_ENTRIES:
526
+ # remove oldest entry
527
+ self._FETCH_CACHE.pop(next(iter(self._FETCH_CACHE)))
528
+ break
529
+ else:
530
+ # unsuccessful: do not poison the cache if none fetched
531
+ logger.error(f"Could not fetch any formats from {possible_formats}.")
532
+ return None
533
+
534
+ return self._FETCH_CACHE[fetch_hash]
535
+
536
+ def fetch_connected_components(self, **fetch_kwargs):
537
+ """
538
+ Provide an generator over masks of connected components in the volume
539
+ """
540
+ img = self.fetch(**fetch_kwargs)
541
+ assert isinstance(img, Nifti1Image), NotImplementedError(
542
+ f"Connected components for type {type(img)} is not yet implemeneted."
543
+ )
544
+ for label, component in connected_components(np.asanyarray(img.dataobj)):
545
+ yield (
546
+ label,
547
+ Nifti1Image(component, img.affine)
548
+ )
549
+
550
+ def compute_spatial_props(self, split_components: bool = True, **fetch_kwargs) -> List[ComponentSpatialProperties]:
551
+ """
552
+ Find the center of this volume in its (non-zero) voxel space and and its
553
+ volume.
554
+
555
+ Parameters
556
+ ----------
557
+ split_components: bool, default: True
558
+ If True, finds the spatial properties for each connected component
559
+ found by skimage.measure.label.
560
+ """
561
+ assert self.provides_image, NotImplementedError("Spatial properties can currently on be calculated for images.")
562
+ img = self.fetch(format=fetch_kwargs.pop("format", "image"), **fetch_kwargs)
563
+ return ComponentSpatialProperties.compute_from_image(
564
+ img=img,
565
+ space=self.space,
566
+ split_components=split_components
567
+ )
568
+
569
+ def draw_samples(self, N: int, sample_size: int = 100, e: float = 1, sigma_mm=None, invert=False, **kwargs):
570
+ """
571
+ Draw samples from the volume, by interpreting its values as an
572
+ unnormalized empirical probability distribtution.
573
+ Any keyword arguments are passed over to fetch()
574
+ """
575
+ if not self.provides_image:
576
+ raise NotImplementedError(
577
+ "Drawing samples is so far only implemented for image-type volumes, "
578
+ f"not {self.__class__.__name__}."
579
+ )
580
+ img = self.fetch(**kwargs)
581
+ array = np.asanyarray(img.dataobj)
582
+ samples = []
583
+ P = (array - array.min()) / (array.max() - array.min())
584
+ if invert:
585
+ P = 1 - P
586
+ P = P**e
587
+ while True:
588
+ pts = (np.random.rand(sample_size, 3) * max(P.shape))
589
+ inside = np.all(pts < P.shape, axis=1)
590
+ Y, X, Z = np.split(pts[inside, :].astype('int'), 3, axis=1)
591
+ T = np.random.rand(1)
592
+ choice = np.where(P[Y, X, Z] >= T)[0]
593
+ samples.extend(list(pts[inside, :][choice, :]))
594
+ if len(samples) > N:
595
+ break
596
+ voxels = pointcloud.PointCloud(
597
+ np.random.permutation(samples)[:N, :],
598
+ space=None
599
+ )
600
+ result = voxels.transform(img.affine, space='mni152')
601
+ result.sigma_mm = [sigma_mm for _ in result]
602
+ return result
603
+
604
+ def find_peaks(self, mindist=5, sigma_mm=0, **kwargs):
605
+ """
606
+ Find local peaks in the volume.
607
+ Additional keyword arguments are passed over to fetch()
608
+ """
609
+ if not self.provides_image:
610
+ raise NotImplementedError(
611
+ "Finding peaks is so far only implemented for image-type volumes, "
612
+ f"not {self.__class__.__name__}."
613
+ )
614
+ img = self.fetch(**kwargs)
615
+ array = np.asanyarray(img.dataobj)
616
+ voxels = skimage_feature.peak_local_max(array, min_distance=mindist)
617
+ points = pointcloud.PointCloud(voxels, space=None, labels=list(range(len(voxels)))).transform(img.affine, space=self.space)
618
+ points.sigma_mm = [sigma_mm for _ in points]
619
+ return points
620
+
621
+
622
+ class FilteredVolume(Volume):
623
+
624
+ def __init__(
625
+ self,
626
+ parent_volume: Volume,
627
+ label: int = None,
628
+ fragment: str = None,
629
+ threshold: float = None,
630
+ ):
631
+ """
632
+ A prescribed Volume to fetch specified label and fragment.
633
+ If threshold is defined, a mask of the values above the threshold.
634
+
635
+ Parameters
636
+ ----------
637
+ parent_volume : Volume
638
+ label : int, default: None
639
+ Get the mask of value equal to label.
640
+ fragment : str, default None
641
+ If a volume is fragmented, get a specified one.
642
+ threshold : float, default None
643
+ Provide a float value to threshold the image.
644
+ """
645
+ name = parent_volume.name
646
+ if label:
647
+ name += f" - label: {label}"
648
+ if fragment:
649
+ name += f" - fragment: {fragment}"
650
+ if threshold:
651
+ name += f" - threshold: {threshold}"
652
+ Volume.__init__(
653
+ self,
654
+ space_spec=parent_volume._space_spec,
655
+ providers=list(parent_volume._providers.values()),
656
+ name=name
657
+ )
658
+ self.fragment = fragment
659
+ self.label = label
660
+ self.threshold = threshold
661
+
662
+ def fetch(
663
+ self,
664
+ format: str = None,
665
+ **kwargs
666
+ ):
667
+ if "fragment" in kwargs:
668
+ assert kwargs.get("fragment") == self.fragment, f"This is a filtered volume that can only fetch fragment '{self.fragment}'."
669
+ else:
670
+ kwargs["fragment"] = self.fragment
671
+ if "label" in kwargs:
672
+ assert kwargs.get("label") == self.label, f"This is a filtered volume that can only fetch label '{self.label}' only."
673
+ else:
674
+ kwargs["label"] = self.label
675
+
676
+ result = super().fetch(format=format, **kwargs)
677
+
678
+ if self.threshold is not None:
679
+ assert self.label is None
680
+ if not isinstance(result, Nifti1Image):
681
+ raise NotImplementedError("Cannot threshold meshes.")
682
+ imgdata = np.asanyarray(result.dataobj)
683
+ return Nifti1Image(
684
+ dataobj=(imgdata > self.threshold).astype("uint8"),
685
+ affine=result.affine,
686
+ dtype="uint8"
687
+ )
688
+
689
+ return result
690
+
691
+ def get_boundingbox(
692
+ self,
693
+ clip: bool = True,
694
+ background: float = 0.0,
695
+ **fetch_kwargs
696
+ ) -> "boundingbox.BoundingBox":
697
+ # NOTE: since some providers enable different simpllified ways to create a
698
+ # bounding box without fetching the image, the correct kwargs must be
699
+ # forwarded since FilteredVolumes enforce their specs to be fetched.
700
+ return super().get_boundingbox(
701
+ clip=clip,
702
+ background=background,
703
+ **fetch_kwargs
704
+ )
705
+
706
+
707
+ class Subvolume(Volume):
708
+ """
709
+ Wrapper class for exposing a z level of a 4D volume to be used like a 3D volume.
710
+ """
711
+
712
+ def __init__(self, parent_volume: Volume, z: int):
713
+ Volume.__init__(
714
+ self,
715
+ space_spec=parent_volume._space_spec,
716
+ providers=[
717
+ _provider.SubvolumeProvider(p, z=z)
718
+ for p in parent_volume._providers.values()
719
+ ],
720
+ name=parent_volume.name + f" - z: {z}"
721
+ )
722
+
723
+
724
+ def from_file(filename: str, space: str, name: str) -> Volume:
725
+ """ Builds a nifti volume from a filename. """
726
+ from ..core.concept import get_registry
727
+ from .providers.nifti import NiftiProvider
728
+ spaceobj = get_registry("Space").get(space)
729
+ return Volume(
730
+ space_spec={"@id": spaceobj.id},
731
+ providers=[NiftiProvider(filename)],
732
+ name=filename if name is None else name,
733
+ )
734
+
735
+
736
+ def from_nifti(nifti: Nifti1Image, space: str, name: str) -> Volume:
737
+ """Builds a nifti volume from a Nifti image."""
738
+ from ..core.concept import get_registry
739
+ from .providers.nifti import NiftiProvider
740
+ spaceobj = get_registry("Space").get(space)
741
+ return Volume(
742
+ space_spec={"@id": spaceobj.id},
743
+ providers=[NiftiProvider((np.asanyarray(nifti.dataobj), nifti.affine))],
744
+ name=name
745
+ )
746
+
747
+
748
+ def from_array(
749
+ data: np.ndarray,
750
+ affine: np.ndarray,
751
+ space: Union[str, Dict[str, str]],
752
+ name: str
753
+ ) -> Volume:
754
+ """Builds a siibra volume from an array and an affine matrix."""
755
+ if len(name) == 0:
756
+ raise ValueError("Please provide a non-empty string for `name`")
757
+ from ..core.concept import get_registry
758
+ from .providers.nifti import NiftiProvider
759
+ spacespec = next(iter(space.values())) if isinstance(space, dict) else space
760
+ spaceobj = get_registry("Space").get(spacespec)
761
+ return Volume(
762
+ space_spec={"@id": spaceobj.id},
763
+ providers=[NiftiProvider((data, affine))],
764
+ name=name,
765
+ )
766
+
767
+
768
+ def from_pointcloud(
769
+ points: pointcloud.PointCloud,
770
+ label: int = None,
771
+ target: Volume = None,
772
+ normalize=True,
773
+ **kwargs
774
+ ) -> Volume:
775
+ """
776
+ Get the kernel density estimate as a volume from the points using their
777
+ average uncertainty on target volume.
778
+
779
+ Parameters
780
+ ----------
781
+ points: pointcloud.PointCloud
782
+ label: int, default: None
783
+ If None, finds the KDE for all points. Otherwise, selects the points
784
+ labelled with this integer value.
785
+ target: Volume, default: None
786
+ If None, the template of the space points are defined on will be used.
787
+ normalize: bool, default: True
788
+
789
+ Raises
790
+ ------
791
+ RuntimeError
792
+ If no points with labels found
793
+ """
794
+ if target is None:
795
+ target = points.space.get_template()
796
+ targetimg = target.fetch(**kwargs)
797
+ voxels = points.transform(np.linalg.inv(targetimg.affine), space=None)
798
+
799
+ if (label is None) or (points.labels is None):
800
+ selection = [True for _ in points]
801
+ else:
802
+ assert label in points.labels, f"No points with the label {label} in the set: {set(points.labels)}"
803
+ selection = points.labels == label
804
+
805
+ voxelcount_img = np.zeros_like(targetimg.get_fdata())
806
+ unique_coords, counts = np.unique(
807
+ np.array(voxels.as_list(), dtype='int')[selection, :],
808
+ axis=0,
809
+ return_counts=True
810
+ )
811
+ voxelcount_img[tuple(unique_coords.T)] = counts
812
+
813
+ # TODO: consider how to handle pointclouds with varied sigma_mm
814
+ sigmas = np.array(points.sigma_mm)[selection]
815
+ bandwidth = np.mean(sigmas)
816
+ if len(np.unique(sigmas)) > 1:
817
+ logger.warning(f"KDE of pointcloud uses average bandwith {bandwidth} instead of the points' individual sigmas.")
818
+
819
+ filtered_arr = filters.gaussian(voxelcount_img, bandwidth)
820
+ if normalize:
821
+ filtered_arr /= filtered_arr.sum()
822
+
823
+ return from_array(
824
+ data=filtered_arr,
825
+ affine=targetimg.affine,
826
+ space=target.space,
827
+ name=f'KDE map of {points}{f"labelled {label}" if label else ""}'
828
+ )
829
+
830
+
831
+ def merge(volumes: List[Volume], labels: List[int] = [], **fetch_kwargs) -> Volume:
832
+ """
833
+ Merge a list of nifti volumes in the same space into a single volume.
834
+
835
+ Note
836
+ ----
837
+ In case of voxel conflicts, the volumes will be override the previous values
838
+ in the given order.
839
+
840
+ Parameters
841
+ ----------
842
+ volumes : List[Volume]
843
+ labels : List[int], optional
844
+ Supply new labels to replace exisiting values per volume.
845
+
846
+ Returns
847
+ -------
848
+ Volume
849
+ """
850
+ if len(volumes) == 1:
851
+ logger.debug("Only one volume supplied returning as is (kwargs are ignored).")
852
+ return volumes[0]
853
+
854
+ assert len(volumes) > 1, "Need to supply at least two volumes to merge."
855
+ if labels:
856
+ assert len(volumes) == len(labels), "Need to supply as many labels as volumes."
857
+
858
+ space = volumes[0].space
859
+ assert all(v.space == space for v in volumes), "Cannot merge volumes from different spaces."
860
+
861
+ if len(labels) > 0:
862
+ dtype = 'int32'
863
+ elif FilteredVolume in {type(v) for v in volumes}:
864
+ dtype = 'uint8'
865
+ else:
866
+ dtype = volumes[0].fetch().dataobj.dtype
867
+ template_img = space.get_template().fetch(**fetch_kwargs)
868
+ merged_array = np.zeros(template_img.shape, dtype=dtype)
869
+
870
+ for i, vol in siibra_tqdm(
871
+ enumerate(volumes),
872
+ unit=" volume",
873
+ desc=f"Resampling volumes to {space.name} and merging",
874
+ total=len(volumes),
875
+ disable=len(volumes) < 3
876
+ ):
877
+ img = vol.fetch(**fetch_kwargs)
878
+ resampled_arr = np.asanyarray(
879
+ resample_img_to_img(img, template_img).dataobj
880
+ )
881
+ nonzero_voxels = resampled_arr > 0
882
+ if labels:
883
+ merged_array[nonzero_voxels] = labels[i]
884
+ else:
885
+ merged_array[nonzero_voxels] = resampled_arr[nonzero_voxels]
886
+
887
+ return from_array(
888
+ data=merged_array,
889
+ affine=template_img.affine,
890
+ space=space,
891
+ name=f"Resampled and merged volumes: {','.join([v.name for v in volumes])}"
892
+ )