siibra 1.0a19__py3-none-any.whl → 1.0.1a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of siibra might be problematic. Click here for more details.
- siibra/VERSION +1 -1
- siibra/__init__.py +7 -7
- siibra/commons.py +8 -53
- siibra/configuration/__init__.py +1 -1
- siibra/configuration/configuration.py +1 -1
- siibra/configuration/factory.py +11 -21
- siibra/core/__init__.py +1 -1
- siibra/core/assignment.py +1 -1
- siibra/core/atlas.py +21 -15
- siibra/core/concept.py +3 -3
- siibra/core/parcellation.py +69 -54
- siibra/core/region.py +178 -158
- siibra/core/space.py +1 -1
- siibra/core/structure.py +2 -2
- siibra/exceptions.py +13 -1
- siibra/experimental/__init__.py +1 -1
- siibra/experimental/contour.py +8 -8
- siibra/experimental/cortical_profile_sampler.py +1 -1
- siibra/experimental/patch.py +3 -3
- siibra/experimental/plane3d.py +12 -12
- siibra/explorer/__init__.py +1 -1
- siibra/explorer/url.py +2 -2
- siibra/explorer/util.py +1 -1
- siibra/features/__init__.py +1 -1
- siibra/features/anchor.py +14 -15
- siibra/features/connectivity/__init__.py +1 -1
- siibra/features/connectivity/functional_connectivity.py +1 -1
- siibra/features/connectivity/regional_connectivity.py +4 -4
- siibra/features/connectivity/streamline_counts.py +1 -1
- siibra/features/connectivity/streamline_lengths.py +1 -1
- siibra/features/connectivity/tracing_connectivity.py +1 -1
- siibra/features/dataset/__init__.py +1 -1
- siibra/features/dataset/ebrains.py +1 -1
- siibra/features/feature.py +24 -26
- siibra/features/image/__init__.py +1 -1
- siibra/features/image/image.py +2 -2
- siibra/features/image/sections.py +1 -1
- siibra/features/image/volume_of_interest.py +1 -1
- siibra/features/tabular/__init__.py +1 -1
- siibra/features/tabular/bigbrain_intensity_profile.py +2 -2
- siibra/features/tabular/cell_density_profile.py +98 -64
- siibra/features/tabular/cortical_profile.py +3 -3
- siibra/features/tabular/gene_expression.py +1 -1
- siibra/features/tabular/layerwise_bigbrain_intensities.py +1 -1
- siibra/features/tabular/layerwise_cell_density.py +4 -23
- siibra/features/tabular/receptor_density_fingerprint.py +13 -10
- siibra/features/tabular/receptor_density_profile.py +1 -1
- siibra/features/tabular/regional_timeseries_activity.py +4 -4
- siibra/features/tabular/tabular.py +7 -5
- siibra/livequeries/__init__.py +1 -1
- siibra/livequeries/allen.py +42 -19
- siibra/livequeries/bigbrain.py +21 -12
- siibra/livequeries/ebrains.py +1 -1
- siibra/livequeries/query.py +2 -3
- siibra/locations/__init__.py +11 -11
- siibra/locations/boundingbox.py +30 -29
- siibra/locations/location.py +1 -1
- siibra/locations/point.py +7 -7
- siibra/locations/{pointset.py → pointcloud.py} +36 -33
- siibra/retrieval/__init__.py +1 -1
- siibra/retrieval/cache.py +1 -1
- siibra/retrieval/datasets.py +4 -4
- siibra/retrieval/exceptions/__init__.py +1 -1
- siibra/retrieval/repositories.py +13 -30
- siibra/retrieval/requests.py +25 -8
- siibra/vocabularies/__init__.py +1 -1
- siibra/volumes/__init__.py +2 -2
- siibra/volumes/parcellationmap.py +119 -91
- siibra/volumes/providers/__init__.py +1 -1
- siibra/volumes/providers/freesurfer.py +3 -3
- siibra/volumes/providers/gifti.py +1 -1
- siibra/volumes/providers/neuroglancer.py +67 -41
- siibra/volumes/providers/nifti.py +12 -26
- siibra/volumes/providers/provider.py +1 -1
- siibra/volumes/sparsemap.py +125 -246
- siibra/volumes/volume.py +150 -61
- {siibra-1.0a19.dist-info → siibra-1.0.1a1.dist-info}/METADATA +26 -4
- siibra-1.0.1a1.dist-info/RECORD +84 -0
- {siibra-1.0a19.dist-info → siibra-1.0.1a1.dist-info}/WHEEL +1 -1
- siibra-1.0a19.dist-info/RECORD +0 -84
- {siibra-1.0a19.dist-info → siibra-1.0.1a1.dist-info}/LICENSE +0 -0
- {siibra-1.0a19.dist-info → siibra-1.0.1a1.dist-info}/top_level.txt +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2025
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -37,6 +37,37 @@ from typing import Union, Dict, Tuple
|
|
|
37
37
|
import json
|
|
38
38
|
|
|
39
39
|
|
|
40
|
+
def shift_ng_transfrom(
|
|
41
|
+
transform_nm: np.ndarray, scale_resolution_nm: np.ndarray, max_resolution_nm: np.ndarray
|
|
42
|
+
) -> np.ndarray:
|
|
43
|
+
"""
|
|
44
|
+
Helper method to get nifti standard affine.
|
|
45
|
+
|
|
46
|
+
transfrorm.json stored with neuroglancer precomputed images and meshes
|
|
47
|
+
are meant to be used for neuroglancer viewers and hence they are not
|
|
48
|
+
representative of the affine in other tools. This method shifts back
|
|
49
|
+
half a voxel in each axis.
|
|
50
|
+
(see https://neuroglancer-scripts.readthedocs.io/en/latest/neuroglancer-info.html#different-conventions-for-coordinate-transformations)
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
transform_nm: np.ndarray
|
|
55
|
+
Transform array created for displaying an image correctly from
|
|
56
|
+
neuroglancer precomputed format in neuroglancer viewer.
|
|
57
|
+
max_resolution_nm: np.ndarray
|
|
58
|
+
The voxel resolution of the highest level of resolution.
|
|
59
|
+
|
|
60
|
+
Returns
|
|
61
|
+
-------
|
|
62
|
+
np.ndarray
|
|
63
|
+
Standard affine in nm
|
|
64
|
+
"""
|
|
65
|
+
scaling = np.diag(np.r_[scale_resolution_nm, 1.0])
|
|
66
|
+
affine = np.dot(transform_nm, scaling)
|
|
67
|
+
affine[:3, 3] += (max_resolution_nm * 0.5)
|
|
68
|
+
return affine
|
|
69
|
+
|
|
70
|
+
|
|
40
71
|
class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/precomputed"):
|
|
41
72
|
|
|
42
73
|
def __init__(self, url: Union[str, Dict[str, str]]):
|
|
@@ -128,28 +159,22 @@ class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/preco
|
|
|
128
159
|
label = None
|
|
129
160
|
if label is not None:
|
|
130
161
|
result = nib.Nifti1Image(
|
|
131
|
-
(result.
|
|
132
|
-
result.affine
|
|
162
|
+
(np.asanyarray(result.dataobj) == label).astype('uint8'),
|
|
163
|
+
result.affine,
|
|
164
|
+
dtype='uint8',
|
|
133
165
|
)
|
|
134
166
|
|
|
135
167
|
return result
|
|
136
168
|
|
|
137
|
-
def get_boundingbox(self,
|
|
169
|
+
def get_boundingbox(self, **fetch_kwargs) -> "_boundingbox.BoundingBox":
|
|
138
170
|
"""
|
|
139
171
|
Return the bounding box in physical coordinates of the union of
|
|
140
172
|
fragments in this neuroglancer volume.
|
|
141
173
|
|
|
142
174
|
Parameters
|
|
143
175
|
----------
|
|
144
|
-
clip: bool, default: True
|
|
145
|
-
Whether to clip the background of the volume.
|
|
146
|
-
background: float, default: 0.0
|
|
147
|
-
The background value to clip.
|
|
148
|
-
Note
|
|
149
|
-
----
|
|
150
|
-
To use it, clip must be True.
|
|
151
176
|
fetch_kwargs:
|
|
152
|
-
key word arguments that are used for
|
|
177
|
+
key word arguments that are used for fetching volumes,
|
|
153
178
|
such as voi or resolution_mm.
|
|
154
179
|
"""
|
|
155
180
|
bbox = None
|
|
@@ -159,23 +184,17 @@ class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/preco
|
|
|
159
184
|
f"N-D Neuroglancer volume has shape {frag.shape}, but "
|
|
160
185
|
f"bounding box considers only {frag.shape[:3]}"
|
|
161
186
|
)
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
).transform(img.affine) # use the affine of the image matching fetch_kwargs
|
|
187
|
+
resolution_mm = fetch_kwargs.get("resolution_mm")
|
|
188
|
+
if resolution_mm is None:
|
|
189
|
+
affine = frag.affine
|
|
190
|
+
shape = frag.shape[:3]
|
|
167
191
|
else:
|
|
168
|
-
|
|
169
|
-
|
|
170
|
-
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
affine = scale.affine
|
|
175
|
-
shape = scale.size[:3]
|
|
176
|
-
next_bbox = _boundingbox.BoundingBox(
|
|
177
|
-
(0, 0, 0), shape, space=None
|
|
178
|
-
).transform(affine)
|
|
192
|
+
scale = frag._select_scale(resolution_mm=resolution_mm)
|
|
193
|
+
affine = scale.affine
|
|
194
|
+
shape = scale.size[:3]
|
|
195
|
+
next_bbox = _boundingbox.BoundingBox(
|
|
196
|
+
(0, 0, 0), shape, space=None
|
|
197
|
+
).transform(affine)
|
|
179
198
|
bbox = next_bbox if bbox is None else bbox.union(next_bbox)
|
|
180
199
|
return bbox
|
|
181
200
|
|
|
@@ -246,7 +265,11 @@ class NeuroglancerVolume:
|
|
|
246
265
|
self._io: PrecomputedIO = None
|
|
247
266
|
|
|
248
267
|
@property
|
|
249
|
-
def transform_nm(self):
|
|
268
|
+
def transform_nm(self) -> np.ndarray:
|
|
269
|
+
"""
|
|
270
|
+
This is the transformation matrix created to cater neuroglancer viewer
|
|
271
|
+
for a neuroglancer precomputed images.
|
|
272
|
+
"""
|
|
250
273
|
if self._transform_nm is not None:
|
|
251
274
|
return self._transform_nm
|
|
252
275
|
try:
|
|
@@ -326,7 +349,7 @@ class NeuroglancerVolume:
|
|
|
326
349
|
):
|
|
327
350
|
# the caller has to make sure voi is defined in the correct reference space
|
|
328
351
|
scale = self._select_scale(resolution_mm=resolution_mm, bbox=voi, max_bytes=max_bytes)
|
|
329
|
-
return scale.fetch(voi=voi)
|
|
352
|
+
return scale.fetch(voi=voi, **kwargs)
|
|
330
353
|
|
|
331
354
|
def get_shape(self, resolution_mm=None, max_bytes: float = MAX_BYTES):
|
|
332
355
|
scale = self._select_scale(resolution_mm=resolution_mm, max_bytes=max_bytes)
|
|
@@ -399,7 +422,7 @@ class NeuroglancerScale:
|
|
|
399
422
|
return self.res_nm / 1e6
|
|
400
423
|
|
|
401
424
|
def resolves(self, resolution_mm):
|
|
402
|
-
"""Test
|
|
425
|
+
"""Test whether the resolution of this scale is sufficient to provide the given resolution."""
|
|
403
426
|
return all(r / 1e6 <= resolution_mm for r in self.res_nm)
|
|
404
427
|
|
|
405
428
|
def __lt__(self, other):
|
|
@@ -445,10 +468,13 @@ class NeuroglancerScale:
|
|
|
445
468
|
|
|
446
469
|
@property
|
|
447
470
|
def affine(self):
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
471
|
+
affine_ = shift_ng_transfrom(
|
|
472
|
+
transform_nm=self.volume.transform_nm,
|
|
473
|
+
scale_resolution_nm=self.res_nm,
|
|
474
|
+
max_resolution_nm=self.volume.scales[0].res_nm[0],
|
|
475
|
+
)
|
|
476
|
+
affine_[:3, :] /= 1e6
|
|
477
|
+
return affine_
|
|
452
478
|
|
|
453
479
|
def _point_to_lower_chunk_idx(self, xyz):
|
|
454
480
|
return (
|
|
@@ -508,9 +534,9 @@ class NeuroglancerScale:
|
|
|
508
534
|
for dim in range(3):
|
|
509
535
|
if bbox_.shape[dim] < 1:
|
|
510
536
|
logger.warning(
|
|
511
|
-
f"Bounding box in voxel space will be enlarged to
|
|
537
|
+
f"Bounding box in voxel space will be enlarged to by {self.res_mm[dim]} along axis {dim}."
|
|
512
538
|
)
|
|
513
|
-
bbox_.maxpoint[dim] = bbox_.maxpoint[dim] +
|
|
539
|
+
bbox_.maxpoint[dim] = bbox_.maxpoint[dim] + self.res_mm[dim]
|
|
514
540
|
|
|
515
541
|
# extract minimum and maximum the chunk indices to be loaded
|
|
516
542
|
gx0, gy0, gz0 = self._point_to_lower_chunk_idx(tuple(bbox_.minpoint))
|
|
@@ -533,7 +559,7 @@ class NeuroglancerScale:
|
|
|
533
559
|
# exact bounding box requested, to cut off undesired borders
|
|
534
560
|
data_min = np.array([gx0, gy0, gz0]) * self.chunk_sizes
|
|
535
561
|
x0, y0, z0 = (np.array(bbox_.minpoint) - data_min).astype("int")
|
|
536
|
-
xd, yd, zd = np.ceil((np.array(bbox_.maxpoint))).astype(int) - np.floor((np.array(bbox_.minpoint))).astype(int)
|
|
562
|
+
xd, yd, zd = np.ceil((np.array(bbox_.maxpoint))).astype(int) - np.floor((np.array(bbox_.minpoint))).astype(int)
|
|
537
563
|
offset = tuple(bbox_.minpoint)
|
|
538
564
|
|
|
539
565
|
# build the nifti image
|
|
@@ -552,7 +578,7 @@ class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompme
|
|
|
552
578
|
|
|
553
579
|
@staticmethod
|
|
554
580
|
def _fragmentinfo(url: str) -> Dict[str, Union[str, np.ndarray, Dict]]:
|
|
555
|
-
"""
|
|
581
|
+
"""Prepare basic mesh fragment information from url."""
|
|
556
582
|
return {
|
|
557
583
|
"url": url,
|
|
558
584
|
"transform_nm": np.array(requests.HttpRequest(f"{url}/transform.json").data),
|
|
@@ -568,7 +594,7 @@ class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompme
|
|
|
568
594
|
elif isinstance(resource, dict):
|
|
569
595
|
self._meshes = {n: self._fragmentinfo(u) for n, u in resource.items()}
|
|
570
596
|
else:
|
|
571
|
-
raise ValueError(f"Resource
|
|
597
|
+
raise ValueError(f"Resource specification not understood for {self.__class__.__name__}: {resource}")
|
|
572
598
|
|
|
573
599
|
@property
|
|
574
600
|
def _url(self) -> Union[str, Dict[str, str]]:
|
|
@@ -609,7 +635,7 @@ class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompme
|
|
|
609
635
|
result[name] = (f"{spec['url']}/{mesh_key}/{fragment_names[0]}", transform)
|
|
610
636
|
else:
|
|
611
637
|
# only one mesh was configures, so we might still
|
|
612
|
-
# see
|
|
638
|
+
# see multiple fragments under the mesh url
|
|
613
639
|
for fragment_name in fragment_names:
|
|
614
640
|
result[fragment_name] = (f"{spec['url']}/{mesh_key}/{fragment_name}", transform)
|
|
615
641
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2025
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -17,7 +17,7 @@ from . import provider as _provider
|
|
|
17
17
|
|
|
18
18
|
from ...commons import logger, resample_img_to_img
|
|
19
19
|
from ...retrieval import requests
|
|
20
|
-
from ...locations import
|
|
20
|
+
from ...locations import pointcloud, boundingbox as _boundingbox
|
|
21
21
|
|
|
22
22
|
from typing import Union, Dict, Tuple
|
|
23
23
|
import nibabel as nib
|
|
@@ -65,28 +65,16 @@ class NiftiProvider(_provider.VolumeProvider, srctype="nii"):
|
|
|
65
65
|
def fragments(self):
|
|
66
66
|
return [k for k in self._img_loaders if k is not None]
|
|
67
67
|
|
|
68
|
-
def get_boundingbox(self,
|
|
68
|
+
def get_boundingbox(self, **fetch_kwargs) -> "_boundingbox.BoundingBox":
|
|
69
69
|
"""
|
|
70
70
|
Return the bounding box in physical coordinates of the union of
|
|
71
71
|
fragments in this nifti volume.
|
|
72
72
|
|
|
73
73
|
Parameters
|
|
74
74
|
----------
|
|
75
|
-
clip : bool, default: True
|
|
76
|
-
Whether to clip the background of the volume.
|
|
77
|
-
background : float, default: 0.0
|
|
78
|
-
The background value to clip.
|
|
79
|
-
Note
|
|
80
|
-
----
|
|
81
|
-
To use it, clip must be True.
|
|
82
75
|
fetch_kwargs:
|
|
83
76
|
Not used
|
|
84
77
|
"""
|
|
85
|
-
if fetch_kwargs:
|
|
86
|
-
logger.warning(
|
|
87
|
-
"`volume.fetch()` keyword arguments supplied. Nifti volumes"
|
|
88
|
-
" cannot pass them for bounding box calculation."
|
|
89
|
-
)
|
|
90
78
|
bbox = None
|
|
91
79
|
for loader in self._img_loaders.values():
|
|
92
80
|
img = loader()
|
|
@@ -95,19 +83,17 @@ class NiftiProvider(_provider.VolumeProvider, srctype="nii"):
|
|
|
95
83
|
f"N-D NIfTI volume has shape {img.shape}, but "
|
|
96
84
|
f"bounding box considers only {img.shape[:3]}"
|
|
97
85
|
)
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
|
|
102
|
-
else:
|
|
103
|
-
shape = img.shape[:3]
|
|
104
|
-
next_bbox = _boundingbox.BoundingBox(
|
|
105
|
-
(0, 0, 0), shape, space=None
|
|
106
|
-
).transform(img.affine)
|
|
86
|
+
shape = img.shape[:3]
|
|
87
|
+
next_bbox = _boundingbox.BoundingBox(
|
|
88
|
+
(0, 0, 0), shape, space=None
|
|
89
|
+
).transform(img.affine)
|
|
107
90
|
bbox = next_bbox if bbox is None else bbox.union(next_bbox)
|
|
108
91
|
return bbox
|
|
109
92
|
|
|
110
93
|
def _merge_fragments(self) -> nib.Nifti1Image:
|
|
94
|
+
"""
|
|
95
|
+
Merge all fragments this volume contains into one Nifti1Image.
|
|
96
|
+
"""
|
|
111
97
|
bbox = self.get_boundingbox(clip=False, background=0.0)
|
|
112
98
|
num_conflicts = 0
|
|
113
99
|
result = None
|
|
@@ -243,7 +229,7 @@ class NiftiProvider(_provider.VolumeProvider, srctype="nii"):
|
|
|
243
229
|
|
|
244
230
|
Returns:
|
|
245
231
|
--------
|
|
246
|
-
|
|
232
|
+
PointCloud
|
|
247
233
|
"""
|
|
248
234
|
|
|
249
235
|
from skimage.feature.peak import peak_local_max
|
|
@@ -257,7 +243,7 @@ class NiftiProvider(_provider.VolumeProvider, srctype="nii"):
|
|
|
257
243
|
min_distance=dist,
|
|
258
244
|
)
|
|
259
245
|
return (
|
|
260
|
-
|
|
246
|
+
pointcloud.PointCloud(
|
|
261
247
|
[np.dot(img.affine, [x, y, z, 1])[:3] for x, y, z in voxels],
|
|
262
248
|
space=self.space,
|
|
263
249
|
),
|