siibra 1.0.1a0__py3-none-any.whl → 1.0.1a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (82) hide show
  1. siibra/VERSION +1 -1
  2. siibra/__init__.py +11 -20
  3. siibra/commons.py +17 -14
  4. siibra/configuration/__init__.py +1 -1
  5. siibra/configuration/configuration.py +6 -6
  6. siibra/configuration/factory.py +10 -9
  7. siibra/core/__init__.py +2 -2
  8. siibra/core/assignment.py +2 -1
  9. siibra/core/atlas.py +4 -4
  10. siibra/core/concept.py +7 -5
  11. siibra/core/parcellation.py +10 -10
  12. siibra/core/region.py +82 -73
  13. siibra/core/space.py +5 -7
  14. siibra/core/structure.py +4 -4
  15. siibra/exceptions.py +6 -2
  16. siibra/explorer/__init__.py +1 -1
  17. siibra/explorer/url.py +2 -2
  18. siibra/explorer/util.py +1 -1
  19. siibra/features/__init__.py +1 -1
  20. siibra/features/anchor.py +4 -6
  21. siibra/features/connectivity/__init__.py +1 -1
  22. siibra/features/connectivity/functional_connectivity.py +1 -1
  23. siibra/features/connectivity/regional_connectivity.py +12 -15
  24. siibra/features/connectivity/streamline_counts.py +1 -1
  25. siibra/features/connectivity/streamline_lengths.py +1 -1
  26. siibra/features/connectivity/tracing_connectivity.py +1 -1
  27. siibra/features/dataset/__init__.py +1 -1
  28. siibra/features/dataset/ebrains.py +2 -2
  29. siibra/features/feature.py +31 -28
  30. siibra/features/image/__init__.py +5 -3
  31. siibra/features/image/image.py +4 -6
  32. siibra/features/image/sections.py +82 -3
  33. siibra/features/image/volume_of_interest.py +1 -9
  34. siibra/features/tabular/__init__.py +2 -2
  35. siibra/features/tabular/bigbrain_intensity_profile.py +3 -2
  36. siibra/features/tabular/cell_density_profile.py +10 -11
  37. siibra/features/tabular/cortical_profile.py +9 -9
  38. siibra/features/tabular/gene_expression.py +7 -6
  39. siibra/features/tabular/layerwise_bigbrain_intensities.py +5 -4
  40. siibra/features/tabular/layerwise_cell_density.py +5 -7
  41. siibra/features/tabular/receptor_density_fingerprint.py +47 -19
  42. siibra/features/tabular/receptor_density_profile.py +2 -3
  43. siibra/features/tabular/regional_timeseries_activity.py +9 -9
  44. siibra/features/tabular/tabular.py +10 -9
  45. siibra/livequeries/__init__.py +1 -1
  46. siibra/livequeries/allen.py +23 -25
  47. siibra/livequeries/bigbrain.py +252 -55
  48. siibra/livequeries/ebrains.py +14 -11
  49. siibra/livequeries/query.py +5 -5
  50. siibra/locations/__init__.py +19 -10
  51. siibra/locations/boundingbox.py +10 -13
  52. siibra/{experimental/plane3d.py → locations/experimental.py} +117 -17
  53. siibra/locations/location.py +11 -13
  54. siibra/locations/point.py +10 -19
  55. siibra/locations/pointcloud.py +59 -23
  56. siibra/retrieval/__init__.py +1 -1
  57. siibra/retrieval/cache.py +2 -1
  58. siibra/retrieval/datasets.py +23 -17
  59. siibra/retrieval/exceptions/__init__.py +1 -1
  60. siibra/retrieval/repositories.py +14 -15
  61. siibra/retrieval/requests.py +32 -30
  62. siibra/vocabularies/__init__.py +2 -3
  63. siibra/volumes/__init__.py +5 -4
  64. siibra/volumes/parcellationmap.py +55 -20
  65. siibra/volumes/providers/__init__.py +1 -1
  66. siibra/volumes/providers/freesurfer.py +7 -7
  67. siibra/volumes/providers/gifti.py +5 -5
  68. siibra/volumes/providers/neuroglancer.py +25 -28
  69. siibra/volumes/providers/nifti.py +7 -7
  70. siibra/volumes/providers/provider.py +4 -3
  71. siibra/volumes/sparsemap.py +8 -7
  72. siibra/volumes/volume.py +33 -40
  73. {siibra-1.0.1a0.dist-info → siibra-1.0.1a2.dist-info}/METADATA +21 -8
  74. siibra-1.0.1a2.dist-info/RECORD +80 -0
  75. {siibra-1.0.1a0.dist-info → siibra-1.0.1a2.dist-info}/WHEEL +1 -1
  76. siibra/experimental/__init__.py +0 -19
  77. siibra/experimental/contour.py +0 -61
  78. siibra/experimental/cortical_profile_sampler.py +0 -57
  79. siibra/experimental/patch.py +0 -98
  80. siibra-1.0.1a0.dist-info/RECORD +0 -84
  81. {siibra-1.0.1a0.dist-info → siibra-1.0.1a2.dist-info}/LICENSE +0 -0
  82. {siibra-1.0.1a0.dist-info → siibra-1.0.1a2.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2024
1
+ # Copyright 2018-2025
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,8 +13,20 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- from . import provider as _provider
16
+ from io import BytesIO
17
+ import os
18
+ from typing import Union, Dict, Tuple
19
+ import json
20
+
21
+ import numpy as np
22
+ import nibabel as nib
23
+ from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset, PrecomputedIO
24
+ from neuroglancer_scripts.http_accessor import HttpAccessor
25
+ from neuroglancer_scripts.mesh import read_precomputed_mesh, affine_transform_mesh
17
26
 
27
+ from . import provider as _provider
28
+ from ...retrieval import requests, cache
29
+ from ...locations import boundingbox as _boundingbox
18
30
  from ...commons import (
19
31
  logger,
20
32
  MapType,
@@ -23,18 +35,6 @@ from ...commons import (
23
35
  QUIET,
24
36
  resample_img_to_img
25
37
  )
26
- from ...retrieval import requests, cache
27
- from ...locations import boundingbox as _boundingbox
28
-
29
- from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset, PrecomputedIO
30
- from neuroglancer_scripts.http_accessor import HttpAccessor
31
- from neuroglancer_scripts.mesh import read_precomputed_mesh, affine_transform_mesh
32
- from io import BytesIO
33
- import nibabel as nib
34
- import os
35
- import numpy as np
36
- from typing import Union, Dict, Tuple
37
- import json
38
38
 
39
39
 
40
40
  def shift_ng_transfrom(
@@ -52,7 +52,7 @@ def shift_ng_transfrom(
52
52
  Parameters
53
53
  ----------
54
54
  transform_nm: np.ndarray
55
- Transform array created for dispalying an image correctly from
55
+ Transform array created for displaying an image correctly from
56
56
  neuroglancer precomputed format in neuroglancer viewer.
57
57
  max_resolution_nm: np.ndarray
58
58
  The voxel resolution of the highest level of resolution.
@@ -174,7 +174,7 @@ class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/preco
174
174
  Parameters
175
175
  ----------
176
176
  fetch_kwargs:
177
- key word arguments that are used for fetchin volumes,
177
+ key word arguments that are used for fetching volumes,
178
178
  such as voi or resolution_mm.
179
179
  """
180
180
  bbox = None
@@ -422,7 +422,7 @@ class NeuroglancerScale:
422
422
  return self.res_nm / 1e6
423
423
 
424
424
  def resolves(self, resolution_mm):
425
- """Test wether the resolution of this scale is sufficient to provide the given resolution."""
425
+ """Test whether the resolution of this scale is sufficient to provide the given resolution."""
426
426
  return all(r / 1e6 <= resolution_mm for r in self.res_nm)
427
427
 
428
428
  def __lt__(self, other):
@@ -529,14 +529,7 @@ class NeuroglancerScale:
529
529
  if voi is None:
530
530
  bbox_ = _boundingbox.BoundingBox((0, 0, 0), self.size, space=None)
531
531
  else:
532
- bbox_ = voi.transform(np.linalg.inv(self.affine))
533
-
534
- for dim in range(3):
535
- if bbox_.shape[dim] < 1:
536
- logger.warning(
537
- f"Bounding box in voxel space will be enlarged to by {self.res_mm[dim]} along axis {dim}."
538
- )
539
- bbox_.maxpoint[dim] = bbox_.maxpoint[dim] + self.res_mm[dim]
532
+ bbox_ = voi.transform(np.linalg.inv(self.affine), space=None)
540
533
 
541
534
  # extract minimum and maximum the chunk indices to be loaded
542
535
  gx0, gy0, gz0 = self._point_to_lower_chunk_idx(tuple(bbox_.minpoint))
@@ -559,8 +552,12 @@ class NeuroglancerScale:
559
552
  # exact bounding box requested, to cut off undesired borders
560
553
  data_min = np.array([gx0, gy0, gz0]) * self.chunk_sizes
561
554
  x0, y0, z0 = (np.array(bbox_.minpoint) - data_min).astype("int")
562
- xd, yd, zd = np.ceil((np.array(bbox_.maxpoint))).astype(int) - np.floor((np.array(bbox_.minpoint))).astype(int)
555
+ xd, yd, zd = np.ceil(bbox_.maxpoint).astype(int) - np.floor(bbox_.minpoint).astype(int)
563
556
  offset = tuple(bbox_.minpoint)
557
+ if voi is not None:
558
+ logger.debug(
559
+ f"Input: {voi.minpoint.coordinate}, {voi.maxpoint.coordinate}.\nVoxel space: {bbox_.minpoint.coordinate}, {bbox_.maxpoint.coordinate}"
560
+ )
564
561
 
565
562
  # build the nifti image
566
563
  trans = np.identity(4)[[2, 1, 0, 3], :] # zyx -> xyz
@@ -594,7 +591,7 @@ class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompme
594
591
  elif isinstance(resource, dict):
595
592
  self._meshes = {n: self._fragmentinfo(u) for n, u in resource.items()}
596
593
  else:
597
- raise ValueError(f"Resource specificaton not understood for {self.__class__.__name__}: {resource}")
594
+ raise ValueError(f"Resource specification not understood for {self.__class__.__name__}: {resource}")
598
595
 
599
596
  @property
600
597
  def _url(self) -> Union[str, Dict[str, str]]:
@@ -635,7 +632,7 @@ class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompme
635
632
  result[name] = (f"{spec['url']}/{mesh_key}/{fragment_names[0]}", transform)
636
633
  else:
637
634
  # only one mesh was configures, so we might still
638
- # see muliple fragments under the mesh url
635
+ # see multiple fragments under the mesh url
639
636
  for fragment_name in fragment_names:
640
637
  result[fragment_name] = (f"{spec['url']}/{mesh_key}/{fragment_name}", transform)
641
638
 
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2024
1
+ # Copyright 2018-2025
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,17 +13,17 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
 
16
- from . import provider as _provider
16
+ import os
17
+ from typing import Union, Dict, Tuple
18
+
19
+ import numpy as np
20
+ import nibabel as nib
17
21
 
22
+ from . import provider as _provider
18
23
  from ...commons import logger, resample_img_to_img
19
24
  from ...retrieval import requests
20
25
  from ...locations import pointcloud, boundingbox as _boundingbox
21
26
 
22
- from typing import Union, Dict, Tuple
23
- import nibabel as nib
24
- import os
25
- import numpy as np
26
-
27
27
 
28
28
  class NiftiProvider(_provider.VolumeProvider, srctype="nii"):
29
29
 
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2024
1
+ # Copyright 2018-2025
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -17,12 +17,13 @@ from __future__ import annotations
17
17
 
18
18
  from abc import ABC, abstractmethod
19
19
  from typing import TYPE_CHECKING, Union, Dict, List
20
- from nibabel import Nifti1Image
21
20
  import json
21
+
22
+ from nibabel import Nifti1Image
23
+
22
24
  if TYPE_CHECKING:
23
25
  from ...locations.boundingbox import BoundingBox
24
26
 
25
- # TODO add mesh primitive. Check nibabel implementation? Use trimesh? Do we want to add yet another dependency?
26
27
  VolumeData = Union[Nifti1Image, Dict]
27
28
 
28
29
 
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2024
1
+ # Copyright 2018-2025
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -13,19 +13,20 @@
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
15
  """Represents lists of probabilistic brain region maps."""
16
- from . import parcellationmap, volume as _volume
17
16
 
17
+ from os import path, makedirs
18
+ from typing import Dict, List
19
+
20
+ import numpy as np
21
+ from nilearn import image
22
+
23
+ from . import parcellationmap, volume as _volume
18
24
  from .providers import provider
19
25
  from ..commons import MapIndex, logger, connected_components, siibra_tqdm
20
26
  from ..locations import boundingbox
21
27
  from ..retrieval.cache import CACHE
22
28
  from ..retrieval.requests import HttpRequest, FileLoader
23
29
 
24
- from os import path, makedirs
25
- from typing import Dict, List
26
- from nilearn import image
27
- import numpy as np
28
-
29
30
 
30
31
  class SparseIndex:
31
32
 
siibra/volumes/volume.py CHANGED
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2024
1
+ # Copyright 2018-2025
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -14,27 +14,26 @@
14
14
  # limitations under the License.
15
15
  """A specific mesh or 3D array."""
16
16
 
17
- from .providers import provider as _provider
18
-
19
- from .. import logger
20
- from ..retrieval import requests
21
- from ..core import space as _space, structure
22
- from ..locations import point, pointcloud, boundingbox
23
- from ..commons import resample_img_to_img, siibra_tqdm, affine_scaling, connected_components
24
- from ..exceptions import NoMapAvailableError, SpaceWarpingFailedError, EmptyPointCloudError
25
-
26
- from dataclasses import dataclass
27
- from nibabel import Nifti1Image
28
- import numpy as np
29
17
  from typing import List, Dict, Union, Set, TYPE_CHECKING
18
+ from dataclasses import dataclass
30
19
  from time import sleep
31
20
  import json
32
- from skimage import feature as skimage_feature, filters
33
21
  from functools import lru_cache
34
22
 
23
+ import numpy as np
24
+ from nibabel import Nifti1Image
25
+ from skimage import feature as skimage_feature, filters
26
+
27
+ from . import providers as _providers
28
+ from ..commons import resample_img_to_img, siibra_tqdm, affine_scaling, connected_components, logger
29
+ from ..exceptions import NoMapAvailableError, SpaceWarpingFailedError, EmptyPointCloudError
30
+ from ..retrieval import requests
31
+ from ..core import space as _space, structure
32
+ from ..core.concept import get_registry
33
+ from ..locations import point, pointcloud, boundingbox
34
+
35
35
  if TYPE_CHECKING:
36
- from ..retrieval.datasets import EbrainsDataset
37
- TypeDataset = EbrainsDataset
36
+ from ..retrieval.datasets import EbrainsDataset as TypeDataset
38
37
 
39
38
 
40
39
  @dataclass
@@ -129,7 +128,7 @@ class Volume(structure.BrainStructure):
129
128
  def __init__(
130
129
  self,
131
130
  space_spec: dict,
132
- providers: List['_provider.VolumeProvider'],
131
+ providers: List[_providers.provider.VolumeProvider],
133
132
  name: str = "",
134
133
  variant: str = None,
135
134
  datasets: List['TypeDataset'] = [],
@@ -138,7 +137,7 @@ class Volume(structure.BrainStructure):
138
137
  self._name = name
139
138
  self._space_spec = space_spec
140
139
  self.variant = variant
141
- self._providers: Dict[str, _provider.VolumeProvider] = {}
140
+ self._providers: Dict[str, _providers.provider.VolumeProvider] = {}
142
141
  self.datasets = datasets
143
142
  self._boundingbox = bbox
144
143
  for provider in providers:
@@ -191,7 +190,7 @@ class Volume(structure.BrainStructure):
191
190
  ----
192
191
  To use it, clip must be True.
193
192
  fetch_kwargs:
194
- key word arguments that are used for fetchin volumes,
193
+ key word arguments that are used for fetching volumes,
195
194
  such as voi or resolution_mm. Currently, only possible for
196
195
  Neuroglancer volumes except for `format`.
197
196
 
@@ -206,7 +205,7 @@ class Volume(structure.BrainStructure):
206
205
  if not self.provides_image:
207
206
  raise NotImplementedError("Bounding box calculation of meshes is not implemented yet.")
208
207
 
209
- if clip: # clippin requires fetching the image
208
+ if clip: # clipping requires fetching the image
210
209
  img = self.fetch(**fetch_kwargs)
211
210
  assert isinstance(img, Nifti1Image)
212
211
  return boundingbox.from_array(
@@ -214,7 +213,7 @@ class Volume(structure.BrainStructure):
214
213
  background=background,
215
214
  ).transform(img.affine, space=self.space)
216
215
 
217
- # if clipping is not required, providers migth have methods of creating
216
+ # if clipping is not required, providers might have methods of creating
218
217
  # bounding boxes without fetching the image
219
218
  fmt = fetch_kwargs.get("format")
220
219
  if (fmt is not None) and (fmt not in self.formats):
@@ -365,7 +364,7 @@ class Volume(structure.BrainStructure):
365
364
  this map.
366
365
 
367
366
 
368
- Paramaters
367
+ Parameters
369
368
  ----------
370
369
  points: PointCloud
371
370
  keep_labels: bool
@@ -407,7 +406,7 @@ class Volume(structure.BrainStructure):
407
406
  return self.get_boundingbox(clip=True, background=0.0, **fetch_kwargs).intersection(other)
408
407
  elif isinstance(other, Volume):
409
408
  if self.space != other.space:
410
- raise NotImplementedError("Cannot intersect volumes from different spaces. Try comparing their boudning boxes.")
409
+ raise NotImplementedError("Cannot intersect volumes from different spaces. Try comparing their bounding boxes.")
411
410
  format = fetch_kwargs.pop('format', 'image')
412
411
  v1 = self.fetch(format=format, **fetch_kwargs)
413
412
  v2 = other.fetch(format=format, **fetch_kwargs)
@@ -461,7 +460,7 @@ class Volume(structure.BrainStructure):
461
460
  format = 'neuroglancer/precomputed'
462
461
 
463
462
  if format is None:
464
- # preseve fetch order in SUPPORTED_FORMATS
463
+ # preserve fetch order in SUPPORTED_FORMATS
465
464
  possible_formats = [f for f in self.SUPPORTED_FORMATS if f in self.formats]
466
465
  elif format in self._FORMAT_LOOKUP: # allow use of aliases
467
466
  possible_formats = [f for f in self._FORMAT_LOOKUP[format] if f in self.formats]
@@ -519,7 +518,7 @@ class Volume(structure.BrainStructure):
519
518
  break
520
519
  else:
521
520
  break
522
- # udpate the cache if fetch is successful
521
+ # update the cache if fetch is successful
523
522
  if result is not None:
524
523
  self._FETCH_CACHE[fetch_hash] = result
525
524
  while len(self._FETCH_CACHE) >= self._FETCH_CACHE_MAX_ENTRIES:
@@ -539,7 +538,7 @@ class Volume(structure.BrainStructure):
539
538
  """
540
539
  img = self.fetch(**fetch_kwargs)
541
540
  assert isinstance(img, Nifti1Image), NotImplementedError(
542
- f"Connected components for type {type(img)} is not yet implemeneted."
541
+ f"Connected components for type {type(img)} is not yet implemented."
543
542
  )
544
543
  for label, component in connected_components(np.asanyarray(img.dataobj)):
545
544
  yield (
@@ -569,7 +568,7 @@ class Volume(structure.BrainStructure):
569
568
  def draw_samples(self, N: int, sample_size: int = 100, e: float = 1, sigma_mm=None, invert=False, **kwargs):
570
569
  """
571
570
  Draw samples from the volume, by interpreting its values as an
572
- unnormalized empirical probability distribtution.
571
+ unnormalized empirical probability distributions.
573
572
  Any keyword arguments are passed over to fetch()
574
573
  """
575
574
  if not self.provides_image:
@@ -714,7 +713,7 @@ class Subvolume(Volume):
714
713
  self,
715
714
  space_spec=parent_volume._space_spec,
716
715
  providers=[
717
- _provider.SubvolumeProvider(p, z=z)
716
+ _providers.provider.SubvolumeProvider(p, z=z)
718
717
  for p in parent_volume._providers.values()
719
718
  ],
720
719
  name=parent_volume.name + f" - z: {z}"
@@ -722,25 +721,21 @@ class Subvolume(Volume):
722
721
 
723
722
 
724
723
  def from_file(filename: str, space: str, name: str) -> Volume:
725
- """ Builds a nifti volume from a filename. """
726
- from ..core.concept import get_registry
727
- from .providers.nifti import NiftiProvider
724
+ """Builds a nifti volume from a filename."""
728
725
  spaceobj = get_registry("Space").get(space)
729
726
  return Volume(
730
727
  space_spec={"@id": spaceobj.id},
731
- providers=[NiftiProvider(filename)],
728
+ providers=[_providers.NiftiProvider(filename)],
732
729
  name=filename if name is None else name,
733
730
  )
734
731
 
735
732
 
736
733
  def from_nifti(nifti: Nifti1Image, space: str, name: str) -> Volume:
737
734
  """Builds a nifti volume from a Nifti image."""
738
- from ..core.concept import get_registry
739
- from .providers.nifti import NiftiProvider
740
735
  spaceobj = get_registry("Space").get(space)
741
736
  return Volume(
742
737
  space_spec={"@id": spaceobj.id},
743
- providers=[NiftiProvider((np.asanyarray(nifti.dataobj), nifti.affine))],
738
+ providers=[_providers.NiftiProvider((np.asanyarray(nifti.dataobj), nifti.affine))],
744
739
  name=name
745
740
  )
746
741
 
@@ -754,13 +749,11 @@ def from_array(
754
749
  """Builds a siibra volume from an array and an affine matrix."""
755
750
  if len(name) == 0:
756
751
  raise ValueError("Please provide a non-empty string for `name`")
757
- from ..core.concept import get_registry
758
- from .providers.nifti import NiftiProvider
759
752
  spacespec = next(iter(space.values())) if isinstance(space, dict) else space
760
753
  spaceobj = get_registry("Space").get(spacespec)
761
754
  return Volume(
762
755
  space_spec={"@id": spaceobj.id},
763
- providers=[NiftiProvider((data, affine))],
756
+ providers=[_providers.NiftiProvider((data, affine))],
764
757
  name=name,
765
758
  )
766
759
 
@@ -814,7 +807,7 @@ def from_pointcloud(
814
807
  sigmas = np.array(points.sigma_mm)[selection]
815
808
  bandwidth = np.mean(sigmas)
816
809
  if len(np.unique(sigmas)) > 1:
817
- logger.warning(f"KDE of pointcloud uses average bandwith {bandwidth} instead of the points' individual sigmas.")
810
+ logger.warning(f"KDE of pointcloud uses average bandwidth {bandwidth} instead of the points' individual sigmas.")
818
811
 
819
812
  filtered_arr = filters.gaussian(voxelcount_img, bandwidth)
820
813
  if normalize:
@@ -841,7 +834,7 @@ def merge(volumes: List[Volume], labels: List[int] = [], **fetch_kwargs) -> Volu
841
834
  ----------
842
835
  volumes : List[Volume]
843
836
  labels : List[int], optional
844
- Supply new labels to replace exisiting values per volume.
837
+ Supply new labels to replace existing values per volume.
845
838
 
846
839
  Returns
847
840
  -------
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.1
1
+ Metadata-Version: 2.2
2
2
  Name: siibra
3
- Version: 1.0.1a0
3
+ Version: 1.0.1a2
4
4
  Summary: siibra - Software interfaces for interacting with brain atlases
5
5
  Home-page: https://github.com/FZJ-INM1-BDA/siibra-python
6
6
  Author: Big Data Analytics Group, Forschungszentrum Juelich, Institute of Neuroscience and Medicine (INM-1)
@@ -27,6 +27,15 @@ Requires-Dist: nilearn
27
27
  Requires-Dist: typing-extensions; python_version < "3.8"
28
28
  Requires-Dist: filelock
29
29
  Requires-Dist: ebrains-drive>=0.6.0
30
+ Dynamic: author
31
+ Dynamic: author-email
32
+ Dynamic: classifier
33
+ Dynamic: description
34
+ Dynamic: description-content-type
35
+ Dynamic: home-page
36
+ Dynamic: requires-dist
37
+ Dynamic: requires-python
38
+ Dynamic: summary
30
39
 
31
40
  |License| |PyPI version| |doi| |Python versions| |Documentation Status|
32
41
 
@@ -34,7 +43,7 @@ Requires-Dist: ebrains-drive>=0.6.0
34
43
  siibra - Software interface for interacting with brain atlases
35
44
  ==============================================================
36
45
 
37
- Copyright 2020-2024, Forschungszentrum Jülich GmbH
46
+ Copyright 2018-2024, Forschungszentrum Jülich GmbH
38
47
 
39
48
  *Authors: Big Data Analytics Group, Institute of Neuroscience and
40
49
  Medicine (INM-1), Forschungszentrum Jülich GmbH*
@@ -50,6 +59,14 @@ It aims to facilitate programmatic and reproducible incorporation of brain parce
50
59
  It supports both discretely labelled and statistical (probabilistic) parcellation maps, which can be used to assign brain regions to spatial locations and image signals, to retrieve region-specific neuroscience datasets from multiple online repositories, and to sample information from high-resolution image data.
51
60
  The datasets anchored to brain regions address features of molecular, cellular and architecture as well as connectivity, and are complemented with live queries to external repositories as well as dynamic extraction from "big" image volumes such as the 20 micrometer BigBrain model.
52
61
 
62
+ ``siibra`` hides much of the complexity that would be required to collect and interact with the individual parcellations, templates and data repositories.
63
+ By encapsulating many aspects of interacting with different maps and reference templates spaces, it also minimizes common errors like misinterpretation of coordinates from different reference spaces, confusing label indices of brain regions, or using inconsistent versions of parcellation maps.
64
+ It aims to provide a safe way of using maps defined across multiple spatial scales for reproducible analysis.
65
+
66
+ .. intro-end
67
+
68
+ .. about-start
69
+
53
70
  ``siibra`` was developed in the frame of the `Human Brain Project <https://humanbrainproject.eu>`__ for accessing the `EBRAINS
54
71
  human brain atlas <https://ebrains.eu/service/human-brain-atlas>`__.
55
72
  It stores most of its contents as sustainable and open datasets in the `EBRAINS Knowledge Graph <https://kg.ebrains.eu>`__, and is designed to support the `OpenMINDS metadata standards <https://github.com/HumanBrainProject/openMINDS_SANDS>`__.
@@ -58,11 +75,7 @@ In fact, the viewer is a good resource for exploring ``siibra``\ ’s core funct
58
75
  Feature queries in ``siibra`` are parameterized by data modality and anatomical location, while the latter could be a brain region, brain parcellation, or location in reference space.
59
76
  Beyond the explorative focus of ``siibra-explorer``, the Python library supports a range of data analysis functions suitable for typical neuroscience workflows.
60
77
 
61
- ``siibra`` hides much of the complexity that would be required to collect and interact with the individual parcellations, templates and data repositories.
62
- By encapsulating many aspects of interacting with different maps and reference templates spaces, it also minimizes common errors like misinterpretation of coordinates from different reference spaces, confusing label indices of brain regions, or using inconsistent versions of parcellation maps.
63
- It aims to provide a safe way of using maps defined across multiple spatial scales for reproducible analysis.
64
-
65
- .. intro-end
78
+ .. about-end
66
79
 
67
80
  .. getting-started-start
68
81
 
@@ -0,0 +1,80 @@
1
+ siibra/VERSION,sha256=tnNU724QbIsJd2y5y9A2yYp48XeoqEJ_sfDYRgMGdrg,14
2
+ siibra/__init__.py,sha256=1uWhsE93KG4N9wiWoMdEokUXxfoRcyznXDktjAGhpEI,4496
3
+ siibra/commons.py,sha256=i4qS4CPteESu2NlchZuLjJrc23PYc0_WMiPr7WMi9ro,27646
4
+ siibra/exceptions.py,sha256=6MlXOadwXcCsceOE4lmy4fLJyAaBCCVvJF6BZlMYjU8,1371
5
+ siibra/configuration/__init__.py,sha256=ArqQ_B8C_O61KA4Fk3ho8ksckbjLu-COOlPGiXyf8LE,752
6
+ siibra/configuration/configuration.py,sha256=FhJ2MF925BeI3KHk8r68-1PnMzIqTfsZEM8ZpvbtqzQ,7263
7
+ siibra/configuration/factory.py,sha256=WPOP0hugX47cTWMGsXo99B4RER14pGllJDUvcC3kMko,22628
8
+ siibra/core/__init__.py,sha256=zW887SH2txImUfvU80k83NbxxnlHlbdzBjdryY-3-48,766
9
+ siibra/core/assignment.py,sha256=7TL3cV2uU8XHatpOkrt5uMju04HfDtcgRR7HM3B1chU,3820
10
+ siibra/core/atlas.py,sha256=Iwqgcf8sG8_iL4mlw_4Nzi7HWVR-wxYLESqnIS0CFTw,8549
11
+ siibra/core/concept.py,sha256=rLqgJ72Mt1Mc8mlh-bGYOyq65rV7Vc8vCZGs-amJp1w,10891
12
+ siibra/core/parcellation.py,sha256=JzrnoB8G0XupffP8mnwE3oHYmHjo2Mzn4-pXnZ2R6Ow,14480
13
+ siibra/core/region.py,sha256=036Fjck1H6wGSubJomjEHUN1I-XMPSPqx3_78MEYbG8,44108
14
+ siibra/core/space.py,sha256=X7FyKgdhao3ezSWQZ0MAjDxlQh305S-4a4D630RaM-c,4588
15
+ siibra/core/structure.py,sha256=M2li4PPiJf27dOc3b2ycCfHf7Ad1AWxBYc9OpSFazJM,4498
16
+ siibra/explorer/__init__.py,sha256=XBAeYm4W3HlbWsKtt8gOwqE_FinIEY7RdA6Rg4Y275A,781
17
+ siibra/explorer/url.py,sha256=ja5i-VkEMYwqhlQ-K5tEfnlYTcgMpPFYJCK7IV0d3Us,7069
18
+ siibra/explorer/util.py,sha256=ul82TQZAULdupr4tJBACdkjlHm2mt8LJ9UpwNWGHYhE,2083
19
+ siibra/features/__init__.py,sha256=FER6DMnkPhXSV1XMZWibZdyBwVhIgWYSUGYMEYEKb9c,3970
20
+ siibra/features/anchor.py,sha256=Umu_Ljkr656h7jvgp43Wi6thMFEycxz_Lf8Bj_QOTZA,9129
21
+ siibra/features/feature.py,sha256=mpQ0M7sMYJEqOkx91rvaDlV_W5iRi_-z4S3eJLAZR8M,35283
22
+ siibra/features/connectivity/__init__.py,sha256=FkPf0vyrLo3ERxrDbsRHUd7FUgJyajD87NiiXIiXhmY,1161
23
+ siibra/features/connectivity/functional_connectivity.py,sha256=9lQoOXv8lZUnyMduAbWABwDIkQC0QTI8V23yx0NjOBg,2122
24
+ siibra/features/connectivity/regional_connectivity.py,sha256=V401G_EOTIDVz3IJJR4d2xOxLiWmIb9qQAMFD46qPlM,18283
25
+ siibra/features/connectivity/streamline_counts.py,sha256=JaAYf6-1S8NYhkE4lhshCSY__EQ5BFcL2i_XXdFfgrM,1064
26
+ siibra/features/connectivity/streamline_lengths.py,sha256=QeuoW_ZDVa9dxCguaemj4Cq9CCPB8ur8_alhATto2-w,1067
27
+ siibra/features/connectivity/tracing_connectivity.py,sha256=rkYgD8mOZzDp0STo5djhDqOaEdz-9j5EuLffXE0F01A,1083
28
+ siibra/features/dataset/__init__.py,sha256=qRV_P0335b4LnSMiONRpSC4elGckp0FXmtJz_QQuVLA,748
29
+ siibra/features/dataset/ebrains.py,sha256=zA_GSIJzeJgClY5KrsfAJgrdjcM5N2Z_mz442UR_1vc,2544
30
+ siibra/features/image/__init__.py,sha256=_Vf6AgIEiYTmtYiKdM7v6YRQco3XWnrEL8vLDhU1sqo,1015
31
+ siibra/features/image/image.py,sha256=heXBen5Sq3dVEcHSb4W4rs7n9nOXy3Nqp-eO0Vzjz4A,3583
32
+ siibra/features/image/sections.py,sha256=rZPoeZbu_cK8td5J4LVxZxsojjwTodrHe42P2de28_Y,3708
33
+ siibra/features/image/volume_of_interest.py,sha256=6rMMWekSAji7p0KFJMSVX7QuhbODfDBEIR-KNHGvnuM,2392
34
+ siibra/features/tabular/__init__.py,sha256=DdE1GXPEsHSNJ3pyNYZNWu5JxDfuI08DbrDF1iBrXIA,1203
35
+ siibra/features/tabular/bigbrain_intensity_profile.py,sha256=C7rP2QTJPyTz_EnRlgdFWPVaDcwJszTlm1ylHBydJOE,2709
36
+ siibra/features/tabular/cell_density_profile.py,sha256=ScpjD9W-w-lLxdG6q42Zfyqe8LW4JvaorIdnixuPzo4,10818
37
+ siibra/features/tabular/cortical_profile.py,sha256=x0TqZh8FzcVx13EwPtbt8kBYfF1dREWWuf9BhpWzj7o,12542
38
+ siibra/features/tabular/gene_expression.py,sha256=8n3aJgJfU_hyIZFnOmKg76GNCath5TMkH0tElWvZhTg,9828
39
+ siibra/features/tabular/layerwise_bigbrain_intensities.py,sha256=iFCNG641yCSuIXFTY30xgRQqLERDHMqJrAy3SdKkAJ4,2118
40
+ siibra/features/tabular/layerwise_cell_density.py,sha256=j5EznhcWjGb_hwbsQtIiQNWkbeUtEOS13vPeP-Zw2Xw,3740
41
+ siibra/features/tabular/receptor_density_fingerprint.py,sha256=CvFJv940whxzavhEQWnTjueDEq0ZoiuYVpqUpR8t-Ec,8065
42
+ siibra/features/tabular/receptor_density_profile.py,sha256=VW2Ry8ifQLfmiBDKqIdvaN7K1YzMENGU92Lnm4wA8P0,3724
43
+ siibra/features/tabular/regional_timeseries_activity.py,sha256=wuno4oI1I-dyxRr8-tLFj42iaD6dYZiPigCqVMhrG50,10027
44
+ siibra/features/tabular/tabular.py,sha256=wFfCeRwh45Bu7eUT9hBygqy4K010tf7n96t2ddA0tIk,5392
45
+ siibra/livequeries/__init__.py,sha256=hToUNmyFq1UW5CxScHyBB-mSZ7kUK_w1Cvrk1OkZQf0,875
46
+ siibra/livequeries/allen.py,sha256=QtKf5sYsSuF8NeqPvyw_DakEJrVYh47mfJVF1rMkJWg,14827
47
+ siibra/livequeries/bigbrain.py,sha256=hT-T6gKRoZQFPWvitm1hv-DoMkkGjz-NfqSx7khfLR0,15762
48
+ siibra/livequeries/ebrains.py,sha256=GBwpm9f_rVjZgF-SyGZ5PGMmL9e9OZzXhHs78uCNhYs,5952
49
+ siibra/livequeries/query.py,sha256=P_uUVFxv4KyOPQXh2WnDQxuaSSBK_dXPKEr8I0-4xSQ,1849
50
+ siibra/locations/__init__.py,sha256=yTJVMdv-pICm_gUYLQ09lDKSkZBfgUihM6LNv07e_yk,3546
51
+ siibra/locations/boundingbox.py,sha256=uCFQBiwsq39UUTz0xwTNIIl4RMXQRH3MDXO0t9pbY9Q,16369
52
+ siibra/locations/experimental.py,sha256=kd9yroLKpnY-N6hoMVgtHK4pVdgeGqhpad4GyxYy6LU,14484
53
+ siibra/locations/location.py,sha256=sdIZiUHGURpOo9JVMUH2w1dEmZAtMBbBXYED-ho_K5s,4383
54
+ siibra/locations/point.py,sha256=g8rjHCuHENTgkiJB7UGH0nWu4anbhf2GSlUfjhek67o,12533
55
+ siibra/locations/pointcloud.py,sha256=_9-4k9yLMiCu8phoxFO6bUsIuJFbRqw7BmBMQ0_FHmk,13979
56
+ siibra/retrieval/__init__.py,sha256=E-UA8rDQZFGkHmAcmit6siONo7G2mH_Y0xgLlR1RfvY,1062
57
+ siibra/retrieval/cache.py,sha256=uMWEi93VePSOSXaiU5PZAbUlWBYyO7gbpRxod4jO2Rc,7833
58
+ siibra/retrieval/datasets.py,sha256=JgnSc41TM0oGNAVn8zQjL84HML-feOBVy5bLxxHrEt8,11110
59
+ siibra/retrieval/repositories.py,sha256=mMmSAnLZJjDb-bi86FPKNQXDbIPKpA_kh7IjKlTPkxM,29935
60
+ siibra/retrieval/requests.py,sha256=VhAnD8mTK26YHVv0haWjfYcdnJuHg6uHhZD-TSs8Cfo,23094
61
+ siibra/retrieval/exceptions/__init__.py,sha256=sOuzPHh2xq1p55T0zAcrSW2B1wfwwYEXBOWIuCjGePE,875
62
+ siibra/vocabularies/__init__.py,sha256=aWXsCVmtz0ZtX5J-f_XeyeGSAj9XbuQQjKJz_rvPVtY,1287
63
+ siibra/vocabularies/gene_names.json,sha256=i-gnh753GyZtQfX_dWibNYr_d5ccDPHooOwsdeKUYqE,1647972
64
+ siibra/vocabularies/receptor_symbols.json,sha256=F6DZIArPCBmJV_lWGV-zDpBBH_GOJOZm67LBE4qzMa4,5722
65
+ siibra/vocabularies/region_aliases.json,sha256=T2w1wRlxPNTsPppXn0bzC70tNsb8mOjLsoHuxDSYm2w,8563
66
+ siibra/volumes/__init__.py,sha256=9eiVjgGTqq9BiFJaLVLABtTrhIcE2k3Cn51yC4EKplg,936
67
+ siibra/volumes/parcellationmap.py,sha256=-Yb9SINRyug-nT_pZkLGvZB3iNrpxFyjp1-PgdMTv2k,51329
68
+ siibra/volumes/sparsemap.py,sha256=PI-3dxORjCFyg_B03ByQpwdAT69GHMRrcLBgweHc0vM,17438
69
+ siibra/volumes/volume.py,sha256=wLmtqoXBDoPvE3WBO_Hc8uny3cdE9mB0KgdCq0LOTa0,32408
70
+ siibra/volumes/providers/__init__.py,sha256=AHZCjutCqO4mnHxyykVjqxlz85jDqFWcSjsa4ciwc1A,934
71
+ siibra/volumes/providers/freesurfer.py,sha256=l3zkLlE28EAEmg75tv9yp1YYiaHVkf4Zi8rKn9TUWVs,4893
72
+ siibra/volumes/providers/gifti.py,sha256=JGuixlSJTVjbDU_M5oMDCV8BAwIzuczhnI-qZ7LMQIc,6231
73
+ siibra/volumes/providers/neuroglancer.py,sha256=b3TiJ6yrx_akLFKgHRKZyHculzzRIqbZ7U3TMQHy6-k,28618
74
+ siibra/volumes/providers/nifti.py,sha256=aAzkmeDZaXRZ-dkAeEb2cSywNn9WzIz0z7yFtN6iNpU,10135
75
+ siibra/volumes/providers/provider.py,sha256=Vn02au_LKynO5SIfqLyjqzxCf7JD9Wm4i7yEFcTX0WU,3585
76
+ siibra-1.0.1a2.dist-info/LICENSE,sha256=xx0jnfkXJvxRnG63LTGOxlggYnIysveWIZ6H3PNdCrQ,11357
77
+ siibra-1.0.1a2.dist-info/METADATA,sha256=lndYng0x27sYef1UlLiD6evMTEYXFsCamHTuTWgfzcs,9111
78
+ siibra-1.0.1a2.dist-info/WHEEL,sha256=52BFRY2Up02UkjOa29eZOS2VxUrpPORXg1pkohGGUS8,91
79
+ siibra-1.0.1a2.dist-info/top_level.txt,sha256=NF0OSGLL0li2qyC7MaU0iBB5Y9S09_euPpvisD0-8Hg,7
80
+ siibra-1.0.1a2.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.6.0)
2
+ Generator: setuptools (76.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5
 
@@ -1,19 +0,0 @@
1
- # Copyright 2018-2024
2
- # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
-
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
-
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
-
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from .plane3d import Plane3D
17
- from .contour import Contour
18
- from .cortical_profile_sampler import CorticalProfileSampler
19
- from .patch import Patch
@@ -1,61 +0,0 @@
1
- # Copyright 2018-2024
2
- # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
-
4
- # Licensed under the Apache License, Version 2.0 (the "License");
5
- # you may not use this file except in compliance with the License.
6
- # You may obtain a copy of the License at
7
-
8
- # http://www.apache.org/licenses/LICENSE-2.0
9
-
10
- # Unless required by applicable law or agreed to in writing, software
11
- # distributed under the License is distributed on an "AS IS" BASIS,
12
- # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- # See the License for the specific language governing permissions and
14
- # limitations under the License.
15
-
16
- from ..locations import point, pointcloud, boundingbox
17
-
18
- import numpy as np
19
-
20
-
21
- class Contour(pointcloud.PointCloud):
22
- """
23
- A PointCloud that represents a contour line.
24
- The only difference is that the point order is relevant,
25
- and consecutive points are thought as being connected by an edge.
26
-
27
- In fact, PointCloud assumes order as well, but no connections between points.
28
- """
29
-
30
- def __init__(self, coordinates, space=None, sigma_mm=0, labels: list = None):
31
- pointcloud.PointCloud.__init__(self, coordinates, space, sigma_mm, labels)
32
-
33
- def crop(self, voi: boundingbox.BoundingBox):
34
- """
35
- Crop the contour with a volume of interest.
36
- Since the contour might be split from the cropping,
37
- returns a set of contour segments.
38
- """
39
- segments = []
40
-
41
- # set the contour point labels to a linear numbering
42
- # so we can use them after the intersection to detect splits.
43
- old_labels = self.labels
44
- self.labels = list(range(len(self)))
45
- cropped = self.intersection(voi)
46
-
47
- if cropped is not None and not isinstance(cropped, point.Point):
48
- assert isinstance(cropped, pointcloud.PointCloud)
49
- # Identifiy contour splits are by discontinuouities ("jumps")
50
- # of their labels, which denote positions in the original contour
51
- jumps = np.diff([self.labels.index(lb) for lb in cropped.labels])
52
- splits = [0] + list(np.where(jumps > 1)[0] + 1) + [len(cropped)]
53
- for i, j in zip(splits[:-1], splits[1:]):
54
- segments.append(
55
- self.__class__(cropped.coordinates[i:j, :], space=cropped.space)
56
- )
57
-
58
- # reset labels of the input contour points.
59
- self.labels = old_labels
60
-
61
- return segments