siibra 1.0a1__1-py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of siibra might be problematic. Click here for more details.
- siibra/VERSION +1 -0
- siibra/__init__.py +164 -0
- siibra/commons.py +823 -0
- siibra/configuration/__init__.py +17 -0
- siibra/configuration/configuration.py +189 -0
- siibra/configuration/factory.py +589 -0
- siibra/core/__init__.py +16 -0
- siibra/core/assignment.py +110 -0
- siibra/core/atlas.py +239 -0
- siibra/core/concept.py +308 -0
- siibra/core/parcellation.py +387 -0
- siibra/core/region.py +1223 -0
- siibra/core/space.py +131 -0
- siibra/core/structure.py +111 -0
- siibra/exceptions.py +63 -0
- siibra/experimental/__init__.py +19 -0
- siibra/experimental/contour.py +61 -0
- siibra/experimental/cortical_profile_sampler.py +57 -0
- siibra/experimental/patch.py +98 -0
- siibra/experimental/plane3d.py +256 -0
- siibra/explorer/__init__.py +17 -0
- siibra/explorer/url.py +222 -0
- siibra/explorer/util.py +87 -0
- siibra/features/__init__.py +117 -0
- siibra/features/anchor.py +224 -0
- siibra/features/connectivity/__init__.py +33 -0
- siibra/features/connectivity/functional_connectivity.py +57 -0
- siibra/features/connectivity/regional_connectivity.py +494 -0
- siibra/features/connectivity/streamline_counts.py +27 -0
- siibra/features/connectivity/streamline_lengths.py +27 -0
- siibra/features/connectivity/tracing_connectivity.py +30 -0
- siibra/features/dataset/__init__.py +17 -0
- siibra/features/dataset/ebrains.py +90 -0
- siibra/features/feature.py +970 -0
- siibra/features/image/__init__.py +27 -0
- siibra/features/image/image.py +115 -0
- siibra/features/image/sections.py +26 -0
- siibra/features/image/volume_of_interest.py +88 -0
- siibra/features/tabular/__init__.py +24 -0
- siibra/features/tabular/bigbrain_intensity_profile.py +77 -0
- siibra/features/tabular/cell_density_profile.py +298 -0
- siibra/features/tabular/cortical_profile.py +322 -0
- siibra/features/tabular/gene_expression.py +257 -0
- siibra/features/tabular/layerwise_bigbrain_intensities.py +62 -0
- siibra/features/tabular/layerwise_cell_density.py +95 -0
- siibra/features/tabular/receptor_density_fingerprint.py +192 -0
- siibra/features/tabular/receptor_density_profile.py +110 -0
- siibra/features/tabular/regional_timeseries_activity.py +294 -0
- siibra/features/tabular/tabular.py +139 -0
- siibra/livequeries/__init__.py +19 -0
- siibra/livequeries/allen.py +352 -0
- siibra/livequeries/bigbrain.py +197 -0
- siibra/livequeries/ebrains.py +145 -0
- siibra/livequeries/query.py +49 -0
- siibra/locations/__init__.py +91 -0
- siibra/locations/boundingbox.py +454 -0
- siibra/locations/location.py +115 -0
- siibra/locations/point.py +344 -0
- siibra/locations/pointcloud.py +349 -0
- siibra/retrieval/__init__.py +27 -0
- siibra/retrieval/cache.py +233 -0
- siibra/retrieval/datasets.py +389 -0
- siibra/retrieval/exceptions/__init__.py +27 -0
- siibra/retrieval/repositories.py +769 -0
- siibra/retrieval/requests.py +659 -0
- siibra/vocabularies/__init__.py +45 -0
- siibra/vocabularies/gene_names.json +29176 -0
- siibra/vocabularies/receptor_symbols.json +210 -0
- siibra/vocabularies/region_aliases.json +460 -0
- siibra/volumes/__init__.py +23 -0
- siibra/volumes/parcellationmap.py +1279 -0
- siibra/volumes/providers/__init__.py +20 -0
- siibra/volumes/providers/freesurfer.py +113 -0
- siibra/volumes/providers/gifti.py +165 -0
- siibra/volumes/providers/neuroglancer.py +736 -0
- siibra/volumes/providers/nifti.py +266 -0
- siibra/volumes/providers/provider.py +107 -0
- siibra/volumes/sparsemap.py +468 -0
- siibra/volumes/volume.py +892 -0
- siibra-1.0.0a1.dist-info/LICENSE +201 -0
- siibra-1.0.0a1.dist-info/METADATA +160 -0
- siibra-1.0.0a1.dist-info/RECORD +84 -0
- siibra-1.0.0a1.dist-info/WHEEL +5 -0
- siibra-1.0.0a1.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,736 @@
|
|
|
1
|
+
# Copyright 2018-2024
|
|
2
|
+
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
|
+
|
|
4
|
+
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
5
|
+
# you may not use this file except in compliance with the License.
|
|
6
|
+
# You may obtain a copy of the License at
|
|
7
|
+
|
|
8
|
+
# http://www.apache.org/licenses/LICENSE-2.0
|
|
9
|
+
|
|
10
|
+
# Unless required by applicable law or agreed to in writing, software
|
|
11
|
+
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
12
|
+
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
13
|
+
# See the License for the specific language governing permissions and
|
|
14
|
+
# limitations under the License.
|
|
15
|
+
|
|
16
|
+
from . import provider as _provider
|
|
17
|
+
|
|
18
|
+
from ...commons import (
|
|
19
|
+
logger,
|
|
20
|
+
MapType,
|
|
21
|
+
merge_meshes,
|
|
22
|
+
SIIBRA_MAX_FETCH_SIZE_GIB,
|
|
23
|
+
QUIET,
|
|
24
|
+
resample_img_to_img
|
|
25
|
+
)
|
|
26
|
+
from ...retrieval import requests, cache
|
|
27
|
+
from ...locations import boundingbox as _boundingbox
|
|
28
|
+
|
|
29
|
+
from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset, PrecomputedIO
|
|
30
|
+
from neuroglancer_scripts.http_accessor import HttpAccessor
|
|
31
|
+
from neuroglancer_scripts.mesh import read_precomputed_mesh, affine_transform_mesh
|
|
32
|
+
from io import BytesIO
|
|
33
|
+
import nibabel as nib
|
|
34
|
+
import os
|
|
35
|
+
import numpy as np
|
|
36
|
+
from typing import Union, Dict, Tuple
|
|
37
|
+
import json
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
def shift_ng_transfrom(
|
|
41
|
+
transform_nm: np.ndarray, scale_resolution_nm: np.ndarray, max_resolution_nm: np.ndarray
|
|
42
|
+
) -> np.ndarray:
|
|
43
|
+
"""
|
|
44
|
+
Helper method to get nifti standard affine.
|
|
45
|
+
|
|
46
|
+
transfrorm.json stored with neuroglancer precomputed images and meshes
|
|
47
|
+
are meant to be used for neuroglancer viewers and hence they are not
|
|
48
|
+
representative of the affine in other tools. This method shifts back
|
|
49
|
+
half a voxel in each axis.
|
|
50
|
+
(see https://neuroglancer-scripts.readthedocs.io/en/latest/neuroglancer-info.html#different-conventions-for-coordinate-transformations)
|
|
51
|
+
|
|
52
|
+
Parameters
|
|
53
|
+
----------
|
|
54
|
+
transform_nm: np.ndarray
|
|
55
|
+
Transform array created for dispalying an image correctly from
|
|
56
|
+
neuroglancer precomputed format in neuroglancer viewer.
|
|
57
|
+
max_resolution_nm: np.ndarray
|
|
58
|
+
The voxel resolution of the highest level of resolution.
|
|
59
|
+
|
|
60
|
+
Returns
|
|
61
|
+
-------
|
|
62
|
+
np.ndarray
|
|
63
|
+
Standard affine in nm
|
|
64
|
+
"""
|
|
65
|
+
scaling = np.diag(np.r_[scale_resolution_nm, 1.0])
|
|
66
|
+
affine = np.dot(transform_nm, scaling)
|
|
67
|
+
affine[:3, 3] += (max_resolution_nm * 0.5)
|
|
68
|
+
return affine
|
|
69
|
+
|
|
70
|
+
|
|
71
|
+
class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/precomputed"):
|
|
72
|
+
|
|
73
|
+
def __init__(self, url: Union[str, Dict[str, str]]):
|
|
74
|
+
_provider.VolumeProvider.__init__(self)
|
|
75
|
+
self._init_url = url
|
|
76
|
+
# TODO duplicated code to giftimesh
|
|
77
|
+
if isinstance(url, str): # one single image to load
|
|
78
|
+
self._fragments = {None: NeuroglancerVolume(url)}
|
|
79
|
+
elif isinstance(url, dict): # assuming multiple for fragment images
|
|
80
|
+
self._fragments = {n: NeuroglancerVolume(u) for n, u in url.items()}
|
|
81
|
+
else:
|
|
82
|
+
raise ValueError(f"Invalid url specified for {self.__class__.__name__}: {url}")
|
|
83
|
+
|
|
84
|
+
@property
|
|
85
|
+
def _url(self) -> Union[str, Dict[str, str]]:
|
|
86
|
+
return self._init_url
|
|
87
|
+
|
|
88
|
+
def fetch(
|
|
89
|
+
self,
|
|
90
|
+
fragment: str = None,
|
|
91
|
+
resolution_mm: float = None,
|
|
92
|
+
voi: _boundingbox.BoundingBox = None,
|
|
93
|
+
**kwargs
|
|
94
|
+
) -> nib.Nifti1Image:
|
|
95
|
+
"""
|
|
96
|
+
Fetch 3D image data from neuroglancer volume.
|
|
97
|
+
|
|
98
|
+
Parameters
|
|
99
|
+
----------
|
|
100
|
+
fragment: str, optional
|
|
101
|
+
The name of a fragment volume to fetch, if any. For example,
|
|
102
|
+
some volumes are split into left and right hemisphere fragments.
|
|
103
|
+
See :func:`~siibra.volumes.Volume.fragments`
|
|
104
|
+
resolution_mm: float, default: None (i.e, lowest)
|
|
105
|
+
Desired resolution in millimeters.
|
|
106
|
+
Tip
|
|
107
|
+
---
|
|
108
|
+
Set to -1 to get the highest resolution. (might need to set max_bytes)
|
|
109
|
+
voi: BoundingBox
|
|
110
|
+
optional specification of a volume of interest to fetch.
|
|
111
|
+
max_bytes: float: Default: NeuroglancerVolume.MAX_BYTES
|
|
112
|
+
Maximum allowable size (in bytes) for downloading the image. siibra
|
|
113
|
+
will attempt to find the highest resolution image with a size less
|
|
114
|
+
than this value.
|
|
115
|
+
"""
|
|
116
|
+
|
|
117
|
+
result = None
|
|
118
|
+
|
|
119
|
+
if 'index' in kwargs:
|
|
120
|
+
index = kwargs.pop('index')
|
|
121
|
+
if fragment is not None:
|
|
122
|
+
assert fragment == index.fragment
|
|
123
|
+
fragment = index.fragment
|
|
124
|
+
|
|
125
|
+
if len(self._fragments) > 1:
|
|
126
|
+
if fragment is None:
|
|
127
|
+
logger.info(
|
|
128
|
+
f"Merging fragments [{', '.join(self._fragments.keys())}]. "
|
|
129
|
+
f"You can select one using `fragment` kwarg."
|
|
130
|
+
)
|
|
131
|
+
result = self._merge_fragments(resolution_mm=resolution_mm, voi=voi, **kwargs)
|
|
132
|
+
else:
|
|
133
|
+
matched_names = [n for n in self._fragments if fragment.lower() in n.lower()]
|
|
134
|
+
if len(matched_names) != 1:
|
|
135
|
+
raise ValueError(
|
|
136
|
+
f"Requested fragment '{fragment}' could not be matched uniquely "
|
|
137
|
+
f"to [{', '.join(self._fragments)}]"
|
|
138
|
+
)
|
|
139
|
+
else:
|
|
140
|
+
result = self._fragments[matched_names[0]].fetch(
|
|
141
|
+
resolution_mm=resolution_mm, voi=voi, **kwargs
|
|
142
|
+
)
|
|
143
|
+
else:
|
|
144
|
+
assert len(self._fragments) > 0
|
|
145
|
+
fragment_name, ngvol = next(iter(self._fragments.items()))
|
|
146
|
+
if fragment is not None:
|
|
147
|
+
assert fragment.lower() in fragment_name.lower()
|
|
148
|
+
result = ngvol.fetch(
|
|
149
|
+
resolution_mm=resolution_mm, voi=voi, **kwargs
|
|
150
|
+
)
|
|
151
|
+
|
|
152
|
+
# if a label is specified, mask the resulting image.
|
|
153
|
+
if result is not None:
|
|
154
|
+
if 'label' in kwargs:
|
|
155
|
+
label = kwargs['label']
|
|
156
|
+
elif ('index') in kwargs:
|
|
157
|
+
label = kwargs['index'].label
|
|
158
|
+
else:
|
|
159
|
+
label = None
|
|
160
|
+
if label is not None:
|
|
161
|
+
result = nib.Nifti1Image(
|
|
162
|
+
(np.asanyarray(result.dataobj) == label).astype('uint8'),
|
|
163
|
+
result.affine,
|
|
164
|
+
dtype='uint8',
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
return result
|
|
168
|
+
|
|
169
|
+
def get_boundingbox(self, **fetch_kwargs) -> "_boundingbox.BoundingBox":
|
|
170
|
+
"""
|
|
171
|
+
Return the bounding box in physical coordinates of the union of
|
|
172
|
+
fragments in this neuroglancer volume.
|
|
173
|
+
|
|
174
|
+
Parameters
|
|
175
|
+
----------
|
|
176
|
+
fetch_kwargs:
|
|
177
|
+
key word arguments that are used for fetchin volumes,
|
|
178
|
+
such as voi or resolution_mm.
|
|
179
|
+
"""
|
|
180
|
+
bbox = None
|
|
181
|
+
for frag in self._fragments.values():
|
|
182
|
+
if len(frag.shape) > 3:
|
|
183
|
+
logger.warning(
|
|
184
|
+
f"N-D Neuroglancer volume has shape {frag.shape}, but "
|
|
185
|
+
f"bounding box considers only {frag.shape[:3]}"
|
|
186
|
+
)
|
|
187
|
+
resolution_mm = fetch_kwargs.get("resolution_mm")
|
|
188
|
+
if resolution_mm is None:
|
|
189
|
+
affine = frag.affine
|
|
190
|
+
shape = frag.shape[:3]
|
|
191
|
+
else:
|
|
192
|
+
scale = frag._select_scale(resolution_mm=resolution_mm)
|
|
193
|
+
affine = scale.affine
|
|
194
|
+
shape = scale.size[:3]
|
|
195
|
+
next_bbox = _boundingbox.BoundingBox(
|
|
196
|
+
(0, 0, 0), shape, space=None
|
|
197
|
+
).transform(affine)
|
|
198
|
+
bbox = next_bbox if bbox is None else bbox.union(next_bbox)
|
|
199
|
+
return bbox
|
|
200
|
+
|
|
201
|
+
def _merge_fragments(
|
|
202
|
+
self,
|
|
203
|
+
resolution_mm: float = None,
|
|
204
|
+
voi: _boundingbox.BoundingBox = None,
|
|
205
|
+
**kwargs
|
|
206
|
+
) -> nib.Nifti1Image:
|
|
207
|
+
with QUIET:
|
|
208
|
+
bbox = self.get_boundingbox(
|
|
209
|
+
clip=False,
|
|
210
|
+
background=0,
|
|
211
|
+
resolution_mm=resolution_mm,
|
|
212
|
+
voi=voi
|
|
213
|
+
)
|
|
214
|
+
|
|
215
|
+
num_conflicts = 0
|
|
216
|
+
result = None
|
|
217
|
+
for frag_vol in self._fragments.values():
|
|
218
|
+
frag_scale = frag_vol._select_scale(
|
|
219
|
+
resolution_mm=resolution_mm,
|
|
220
|
+
bbox=voi,
|
|
221
|
+
max_bytes=kwargs.pop("maxbytes", NeuroglancerVolume.MAX_BYTES)
|
|
222
|
+
)
|
|
223
|
+
img = frag_scale.fetch(voi=voi)
|
|
224
|
+
if result is None:
|
|
225
|
+
# build the empty result image with its own affine and voxel space
|
|
226
|
+
s0 = np.identity(4)
|
|
227
|
+
s0[:3, -1] = list(bbox.minpoint.transform(np.linalg.inv(img.affine)))
|
|
228
|
+
result_affine = np.dot(img.affine, s0) # adjust global bounding box offset to get global affine
|
|
229
|
+
voxdims = np.asanyarray(np.ceil(
|
|
230
|
+
bbox.transform(np.linalg.inv(result_affine)).shape # transform to the voxel space
|
|
231
|
+
), dtype="int")
|
|
232
|
+
result_arr = np.zeros(voxdims, dtype=img.dataobj.dtype)
|
|
233
|
+
result = nib.Nifti1Image(dataobj=result_arr, affine=result_affine)
|
|
234
|
+
|
|
235
|
+
# resample to merge template and update it
|
|
236
|
+
resampled_img = resample_img_to_img(source_img=img, target_img=result)
|
|
237
|
+
arr = np.asanyarray(resampled_img.dataobj)
|
|
238
|
+
nonzero_voxels = arr != 0
|
|
239
|
+
num_conflicts += np.count_nonzero(result_arr[nonzero_voxels])
|
|
240
|
+
result_arr[nonzero_voxels] = arr[nonzero_voxels]
|
|
241
|
+
|
|
242
|
+
if num_conflicts > 0:
|
|
243
|
+
num_voxels = np.count_nonzero(result_arr)
|
|
244
|
+
logger.warning(
|
|
245
|
+
f"Merging fragments required to overwrite {num_conflicts} "
|
|
246
|
+
f"conflicting voxels ({num_conflicts / num_voxels * 100.:2.3f}%)."
|
|
247
|
+
)
|
|
248
|
+
|
|
249
|
+
return result
|
|
250
|
+
|
|
251
|
+
|
|
252
|
+
class NeuroglancerVolume:
|
|
253
|
+
|
|
254
|
+
USE_CACHE = False # Whether to keep fetched data in local cache
|
|
255
|
+
MAX_BYTES = SIIBRA_MAX_FETCH_SIZE_GIB * 1024 ** 3 # Number of bytes at which an image array is considered to large to fetch
|
|
256
|
+
|
|
257
|
+
def __init__(self, url: str):
|
|
258
|
+
# TODO do we still need VolumeProvider.__init__ ? given it's not a subclass of VolumeProvider?
|
|
259
|
+
_provider.VolumeProvider.__init__(self)
|
|
260
|
+
assert isinstance(url, str)
|
|
261
|
+
self.url = url
|
|
262
|
+
self._scales_cached = None
|
|
263
|
+
self._info = None
|
|
264
|
+
self._transform_nm = None
|
|
265
|
+
self._io: PrecomputedIO = None
|
|
266
|
+
|
|
267
|
+
@property
|
|
268
|
+
def transform_nm(self) -> np.ndarray:
|
|
269
|
+
"""
|
|
270
|
+
This is the transformation matrix created to cater neuroglancer viewer
|
|
271
|
+
for a neuroglancer precomputed images.
|
|
272
|
+
"""
|
|
273
|
+
if self._transform_nm is not None:
|
|
274
|
+
return self._transform_nm
|
|
275
|
+
try:
|
|
276
|
+
res = requests.HttpRequest(f"{self.url}/transform.json").get()
|
|
277
|
+
except requests.SiibraHttpRequestError:
|
|
278
|
+
res = None
|
|
279
|
+
if res is not None:
|
|
280
|
+
self._transform_nm = np.array(res)
|
|
281
|
+
return self._transform_nm
|
|
282
|
+
|
|
283
|
+
self._transform_nm = np.identity(4)
|
|
284
|
+
logger.warning(f"No transform.json found at {self.url}, using identity.")
|
|
285
|
+
return self._transform_nm
|
|
286
|
+
|
|
287
|
+
@transform_nm.setter
|
|
288
|
+
def transform_nm(self, val):
|
|
289
|
+
self._transform_nm = val
|
|
290
|
+
|
|
291
|
+
@property
|
|
292
|
+
def io(self) -> PrecomputedIO:
|
|
293
|
+
if self._io is None:
|
|
294
|
+
accessor = HttpAccessor(self.url)
|
|
295
|
+
self._io = get_IO_for_existing_dataset(accessor)
|
|
296
|
+
return self._io
|
|
297
|
+
|
|
298
|
+
@property
|
|
299
|
+
def map_type(self):
|
|
300
|
+
if self._info is None:
|
|
301
|
+
self._bootstrap()
|
|
302
|
+
return (
|
|
303
|
+
MapType.LABELLED
|
|
304
|
+
if self._info.get("type") == "segmentation"
|
|
305
|
+
else MapType.STATISTICAL
|
|
306
|
+
)
|
|
307
|
+
|
|
308
|
+
@map_type.setter
|
|
309
|
+
def map_type(self, val):
|
|
310
|
+
if val is not None:
|
|
311
|
+
logger.debug(
|
|
312
|
+
"NeuroglancerVolume can determine its own maptype from self._info.get('type')"
|
|
313
|
+
)
|
|
314
|
+
|
|
315
|
+
def _bootstrap(self):
|
|
316
|
+
self._info = requests.HttpRequest(f"{self.url}/info", func=lambda b: json.loads(b.decode())).get()
|
|
317
|
+
self._scales_cached = sorted(
|
|
318
|
+
[NeuroglancerScale(self, i) for i in self._info["scales"]]
|
|
319
|
+
)
|
|
320
|
+
|
|
321
|
+
@property
|
|
322
|
+
def dtype(self):
|
|
323
|
+
if self._info is None:
|
|
324
|
+
self._bootstrap()
|
|
325
|
+
return np.dtype(self._info["data_type"])
|
|
326
|
+
|
|
327
|
+
@property
|
|
328
|
+
def scales(self):
|
|
329
|
+
if self._scales_cached is None:
|
|
330
|
+
self._bootstrap()
|
|
331
|
+
return self._scales_cached
|
|
332
|
+
|
|
333
|
+
@property
|
|
334
|
+
def shape(self):
|
|
335
|
+
# return the shape of the scale 0 array
|
|
336
|
+
return self.scales[0].size
|
|
337
|
+
|
|
338
|
+
@property
|
|
339
|
+
def affine(self):
|
|
340
|
+
# return the affine matrix of the scale 0 data
|
|
341
|
+
return self.scales[0].affine
|
|
342
|
+
|
|
343
|
+
def fetch(
|
|
344
|
+
self,
|
|
345
|
+
resolution_mm: float = None,
|
|
346
|
+
voi: _boundingbox.BoundingBox = None,
|
|
347
|
+
max_bytes: float = MAX_BYTES,
|
|
348
|
+
**kwargs
|
|
349
|
+
):
|
|
350
|
+
# the caller has to make sure voi is defined in the correct reference space
|
|
351
|
+
scale = self._select_scale(resolution_mm=resolution_mm, bbox=voi, max_bytes=max_bytes)
|
|
352
|
+
return scale.fetch(voi=voi, **kwargs)
|
|
353
|
+
|
|
354
|
+
def get_shape(self, resolution_mm=None, max_bytes: float = MAX_BYTES):
|
|
355
|
+
scale = self._select_scale(resolution_mm=resolution_mm, max_bytes=max_bytes)
|
|
356
|
+
return scale.size
|
|
357
|
+
|
|
358
|
+
def is_float(self):
|
|
359
|
+
return self.dtype.kind == "f"
|
|
360
|
+
|
|
361
|
+
def _select_scale(
|
|
362
|
+
self,
|
|
363
|
+
resolution_mm: float,
|
|
364
|
+
max_bytes: float = MAX_BYTES,
|
|
365
|
+
bbox: _boundingbox.BoundingBox = None
|
|
366
|
+
) -> 'NeuroglancerScale':
|
|
367
|
+
if resolution_mm is None:
|
|
368
|
+
suitable = self.scales
|
|
369
|
+
elif resolution_mm < 0:
|
|
370
|
+
suitable = [self.scales[0]]
|
|
371
|
+
else:
|
|
372
|
+
suitable = sorted(s for s in self.scales if s.resolves(resolution_mm))
|
|
373
|
+
|
|
374
|
+
if len(suitable) > 0:
|
|
375
|
+
scale = suitable[-1]
|
|
376
|
+
else:
|
|
377
|
+
scale = self.scales[0]
|
|
378
|
+
xyz_res = ['{:.6f}'.format(r).rstrip('0') for r in scale.res_mm]
|
|
379
|
+
if all(r.startswith(str(resolution_mm)) for r in xyz_res):
|
|
380
|
+
logger.info(f"Closest resolution to requested is {', '.join(xyz_res)} mm.")
|
|
381
|
+
else:
|
|
382
|
+
logger.warning(
|
|
383
|
+
f"Requested resolution {resolution_mm} is not available. "
|
|
384
|
+
f"Falling back to the highest possible resolution of "
|
|
385
|
+
f"{', '.join(xyz_res)} mm."
|
|
386
|
+
)
|
|
387
|
+
|
|
388
|
+
scale_changed = False
|
|
389
|
+
while scale._estimate_nbytes(bbox) > max_bytes:
|
|
390
|
+
scale = scale.next()
|
|
391
|
+
scale_changed = True
|
|
392
|
+
if scale is None:
|
|
393
|
+
raise RuntimeError(
|
|
394
|
+
f"Fetching bounding box {bbox} is infeasible "
|
|
395
|
+
f"relative to the limit of {max_bytes / 1024**3}GiB."
|
|
396
|
+
)
|
|
397
|
+
if scale_changed:
|
|
398
|
+
logger.warning(
|
|
399
|
+
f"Resolution was reduced to {scale.res_mm} to provide a "
|
|
400
|
+
f"feasible volume size of {max_bytes / 1024**3} GiB. Set `max_bytes` to"
|
|
401
|
+
f" fetch in the resolution requested."
|
|
402
|
+
)
|
|
403
|
+
return scale
|
|
404
|
+
|
|
405
|
+
|
|
406
|
+
class NeuroglancerScale:
|
|
407
|
+
"""One scale of a NeuroglancerVolume."""
|
|
408
|
+
|
|
409
|
+
color_warning_issued = False
|
|
410
|
+
|
|
411
|
+
def __init__(self, volume: NeuroglancerVolume, scaleinfo: dict):
|
|
412
|
+
self.volume = volume
|
|
413
|
+
self.chunk_sizes = np.array(scaleinfo["chunk_sizes"]).squeeze()
|
|
414
|
+
self.encoding = scaleinfo["encoding"]
|
|
415
|
+
self.key = scaleinfo["key"]
|
|
416
|
+
self.res_nm = np.array(scaleinfo["resolution"]).squeeze()
|
|
417
|
+
self.size = scaleinfo["size"]
|
|
418
|
+
self.voxel_offset = np.array(scaleinfo["voxel_offset"])
|
|
419
|
+
|
|
420
|
+
@property
|
|
421
|
+
def res_mm(self):
|
|
422
|
+
return self.res_nm / 1e6
|
|
423
|
+
|
|
424
|
+
def resolves(self, resolution_mm):
|
|
425
|
+
"""Test wether the resolution of this scale is sufficient to provide the given resolution."""
|
|
426
|
+
return all(r / 1e6 <= resolution_mm for r in self.res_nm)
|
|
427
|
+
|
|
428
|
+
def __lt__(self, other):
|
|
429
|
+
"""Sort scales by resolution."""
|
|
430
|
+
return all(self.res_nm[i] < other.res_nm[i] for i in range(3))
|
|
431
|
+
|
|
432
|
+
def __repr__(self):
|
|
433
|
+
return str(self)
|
|
434
|
+
|
|
435
|
+
def __str__(self):
|
|
436
|
+
return f"{self.__class__.__name__} {self.key}"
|
|
437
|
+
|
|
438
|
+
def _estimate_nbytes(self, bbox: _boundingbox.BoundingBox = None):
|
|
439
|
+
"""Estimate the size image array to be fetched in bytes, given a bounding box."""
|
|
440
|
+
if bbox is None:
|
|
441
|
+
bbox_ = _boundingbox.BoundingBox((0, 0, 0), self.size, space=None)
|
|
442
|
+
else:
|
|
443
|
+
bbox_ = bbox.transform(np.linalg.inv(self.affine))
|
|
444
|
+
result = self.volume.dtype.itemsize * bbox_.volume
|
|
445
|
+
logger.debug(
|
|
446
|
+
f"Approximate size for fetching resolution "
|
|
447
|
+
f"({', '.join(map('{:.6f}'.format, self.res_mm))}) mm "
|
|
448
|
+
f"is {result / 1024**3:.5f} GiB."
|
|
449
|
+
)
|
|
450
|
+
return result
|
|
451
|
+
|
|
452
|
+
def next(self):
|
|
453
|
+
"""Returns the next scale in this volume, of None if this is the last."""
|
|
454
|
+
my_index = self.volume.scales.index(self)
|
|
455
|
+
if my_index < len(self.volume.scales):
|
|
456
|
+
return self.volume.scales[my_index + 1]
|
|
457
|
+
else:
|
|
458
|
+
return None
|
|
459
|
+
|
|
460
|
+
def prev(self):
|
|
461
|
+
"""Returns the previous scale in this volume, or None if this is the first."""
|
|
462
|
+
my_index = self.volume.scales.index(self)
|
|
463
|
+
print(f"Index of {self.key} is {my_index} of {len(self.volume.scales)}.")
|
|
464
|
+
if my_index > 0:
|
|
465
|
+
return self.volume.scales[my_index - 1]
|
|
466
|
+
else:
|
|
467
|
+
return None
|
|
468
|
+
|
|
469
|
+
@property
|
|
470
|
+
def affine(self):
|
|
471
|
+
affine_ = shift_ng_transfrom(
|
|
472
|
+
transform_nm=self.volume.transform_nm,
|
|
473
|
+
scale_resolution_nm=self.res_nm,
|
|
474
|
+
max_resolution_nm=self.volume.scales[0].res_nm[0],
|
|
475
|
+
)
|
|
476
|
+
affine_[:3, :] /= 1e6
|
|
477
|
+
return affine_
|
|
478
|
+
|
|
479
|
+
def _point_to_lower_chunk_idx(self, xyz):
|
|
480
|
+
return (
|
|
481
|
+
np.floor((np.array(xyz) - self.voxel_offset) / self.chunk_sizes)
|
|
482
|
+
.astype("int")
|
|
483
|
+
.ravel()
|
|
484
|
+
)
|
|
485
|
+
|
|
486
|
+
def _point_to_upper_chunk_idx(self, xyz):
|
|
487
|
+
return (
|
|
488
|
+
np.ceil((np.array(xyz) - self.voxel_offset) / self.chunk_sizes)
|
|
489
|
+
.astype("int")
|
|
490
|
+
.ravel()
|
|
491
|
+
)
|
|
492
|
+
|
|
493
|
+
def _read_chunk(self, gx, gy, gz, channel: int = None):
|
|
494
|
+
if any(v < 0 for v in (gx, gy, gz)):
|
|
495
|
+
raise RuntimeError('Negative tile index observed - you have likely requested fetch() with a voi specification ranging outside the actual data.')
|
|
496
|
+
if self.volume.USE_CACHE:
|
|
497
|
+
cachefile = cache.CACHE.build_filename(
|
|
498
|
+
"{}_{}_{}_{}_{}".format(self.volume.url, self.key, gx, gy, gz),
|
|
499
|
+
suffix=".npy",
|
|
500
|
+
)
|
|
501
|
+
if os.path.isfile(cachefile):
|
|
502
|
+
return np.load(cachefile)
|
|
503
|
+
|
|
504
|
+
x0 = gx * self.chunk_sizes[0]
|
|
505
|
+
y0 = gy * self.chunk_sizes[1]
|
|
506
|
+
z0 = gz * self.chunk_sizes[2]
|
|
507
|
+
x1, y1, z1 = np.minimum(self.chunk_sizes + [x0, y0, z0], self.size)
|
|
508
|
+
chunk_czyx = self.volume.io.read_chunk(self.key, (x0, x1, y0, y1, z0, z1))
|
|
509
|
+
if channel is None:
|
|
510
|
+
channel = 0
|
|
511
|
+
if chunk_czyx.shape[0] > 1 and not self.color_warning_issued:
|
|
512
|
+
logger.warning(
|
|
513
|
+
f"The volume has {chunk_czyx.shape[0]} color channels. "
|
|
514
|
+
"Returning the first channel now but you can specify one "
|
|
515
|
+
"with 'channel' keyword."
|
|
516
|
+
)
|
|
517
|
+
self.color_warning_issued = True
|
|
518
|
+
elif channel + 1 > chunk_czyx.shape[0]:
|
|
519
|
+
raise ValueError(f"There are only {chunk_czyx.shape[0]} color channels.")
|
|
520
|
+
chunk_zyx = chunk_czyx[channel]
|
|
521
|
+
|
|
522
|
+
if self.volume.USE_CACHE:
|
|
523
|
+
np.save(cachefile, chunk_zyx)
|
|
524
|
+
return chunk_zyx
|
|
525
|
+
|
|
526
|
+
def fetch(self, voi: _boundingbox.BoundingBox = None, **kwargs):
|
|
527
|
+
|
|
528
|
+
# define the bounding box in this scale's voxel space
|
|
529
|
+
if voi is None:
|
|
530
|
+
bbox_ = _boundingbox.BoundingBox((0, 0, 0), self.size, space=None)
|
|
531
|
+
else:
|
|
532
|
+
bbox_ = voi.transform(np.linalg.inv(self.affine))
|
|
533
|
+
|
|
534
|
+
for dim in range(3):
|
|
535
|
+
if bbox_.shape[dim] < 1:
|
|
536
|
+
logger.warning(
|
|
537
|
+
f"Bounding box in voxel space will be enlarged to by {self.res_mm[dim]} along axis {dim}."
|
|
538
|
+
)
|
|
539
|
+
bbox_.maxpoint[dim] = bbox_.maxpoint[dim] + self.res_mm[dim]
|
|
540
|
+
|
|
541
|
+
# extract minimum and maximum the chunk indices to be loaded
|
|
542
|
+
gx0, gy0, gz0 = self._point_to_lower_chunk_idx(tuple(bbox_.minpoint))
|
|
543
|
+
gx1, gy1, gz1 = self._point_to_upper_chunk_idx(tuple(bbox_.maxpoint))
|
|
544
|
+
|
|
545
|
+
# create requested data volume, and fill it with the required chunk data
|
|
546
|
+
shape_zyx = np.array([gz1 - gz0, gy1 - gy0, gx1 - gx0]) * self.chunk_sizes[::-1]
|
|
547
|
+
data_zyx = np.zeros(shape_zyx, dtype=self.volume.dtype)
|
|
548
|
+
for gx in range(gx0, gx1):
|
|
549
|
+
x0 = (gx - gx0) * self.chunk_sizes[0]
|
|
550
|
+
for gy in range(gy0, gy1):
|
|
551
|
+
y0 = (gy - gy0) * self.chunk_sizes[1]
|
|
552
|
+
for gz in range(gz0, gz1):
|
|
553
|
+
z0 = (gz - gz0) * self.chunk_sizes[2]
|
|
554
|
+
chunk = self._read_chunk(gx, gy, gz, kwargs.get("channel"))
|
|
555
|
+
z1, y1, x1 = np.array([z0, y0, x0]) + chunk.shape
|
|
556
|
+
data_zyx[z0:z1, y0:y1, x0:x1] = chunk
|
|
557
|
+
|
|
558
|
+
# determine the remaining offset from the "chunk mosaic" to the
|
|
559
|
+
# exact bounding box requested, to cut off undesired borders
|
|
560
|
+
data_min = np.array([gx0, gy0, gz0]) * self.chunk_sizes
|
|
561
|
+
x0, y0, z0 = (np.array(bbox_.minpoint) - data_min).astype("int")
|
|
562
|
+
xd, yd, zd = np.ceil((np.array(bbox_.maxpoint))).astype(int) - np.floor((np.array(bbox_.minpoint))).astype(int)
|
|
563
|
+
offset = tuple(bbox_.minpoint)
|
|
564
|
+
|
|
565
|
+
# build the nifti image
|
|
566
|
+
trans = np.identity(4)[[2, 1, 0, 3], :] # zyx -> xyz
|
|
567
|
+
shift = np.c_[np.identity(4)[:, :3], np.r_[offset, 1]]
|
|
568
|
+
return nib.Nifti1Image(
|
|
569
|
+
data_zyx[z0: z0 + zd, y0: y0 + yd, x0: x0 + xd],
|
|
570
|
+
np.dot(self.affine, np.dot(shift, trans)),
|
|
571
|
+
)
|
|
572
|
+
|
|
573
|
+
|
|
574
|
+
class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompmesh"):
|
|
575
|
+
"""
|
|
576
|
+
A surface mesh provided as neuroglancer precomputed mesh.
|
|
577
|
+
"""
|
|
578
|
+
|
|
579
|
+
@staticmethod
|
|
580
|
+
def _fragmentinfo(url: str) -> Dict[str, Union[str, np.ndarray, Dict]]:
|
|
581
|
+
"""Prepare basic mesh fragment information from url."""
|
|
582
|
+
return {
|
|
583
|
+
"url": url,
|
|
584
|
+
"transform_nm": np.array(requests.HttpRequest(f"{url}/transform.json").data),
|
|
585
|
+
"info": requests.HttpRequest(url=f"{url}/info", func=requests.DECODERS['.json']).data
|
|
586
|
+
}
|
|
587
|
+
|
|
588
|
+
# TODO check resource typing?
|
|
589
|
+
def __init__(self, resource: Union[str, dict], volume=None):
|
|
590
|
+
self.volume = volume
|
|
591
|
+
self._init_url = resource
|
|
592
|
+
if isinstance(resource, str):
|
|
593
|
+
self._meshes = {None: self._fragmentinfo(resource)}
|
|
594
|
+
elif isinstance(resource, dict):
|
|
595
|
+
self._meshes = {n: self._fragmentinfo(u) for n, u in resource.items()}
|
|
596
|
+
else:
|
|
597
|
+
raise ValueError(f"Resource specificaton not understood for {self.__class__.__name__}: {resource}")
|
|
598
|
+
|
|
599
|
+
@property
|
|
600
|
+
def _url(self) -> Union[str, Dict[str, str]]:
|
|
601
|
+
return self._init_url
|
|
602
|
+
|
|
603
|
+
def get_boundingbox(self, clip=False, background=0.0, **fetch_kwargs) -> '_boundingbox.BoundingBox':
|
|
604
|
+
"""
|
|
605
|
+
Bounding box calculation is not yet implemented for meshes.
|
|
606
|
+
"""
|
|
607
|
+
raise NotImplementedError(
|
|
608
|
+
f"Bounding box access to {self.__class__.__name__} objects not yet implemented."
|
|
609
|
+
)
|
|
610
|
+
|
|
611
|
+
def _get_fragment_info(self, meshindex: int) -> Dict[str, Tuple[str, ]]:
|
|
612
|
+
# extract available fragment urls with their names for the given mesh index
|
|
613
|
+
result = {}
|
|
614
|
+
|
|
615
|
+
for name, spec in self._meshes.items():
|
|
616
|
+
mesh_key = spec.get('info', {}).get('mesh')
|
|
617
|
+
meshurl = f"{spec['url']}/{mesh_key}/{str(meshindex)}:0"
|
|
618
|
+
transform = spec.get('transform_nm')
|
|
619
|
+
try:
|
|
620
|
+
meshinfo = requests.HttpRequest(url=meshurl, func=requests.DECODERS['.json']).data
|
|
621
|
+
except requests.SiibraHttpRequestError:
|
|
622
|
+
continue
|
|
623
|
+
fragment_names = meshinfo.get('fragments')
|
|
624
|
+
|
|
625
|
+
if len(fragment_names) == 0:
|
|
626
|
+
raise RuntimeError(f"No fragments found at {meshurl}")
|
|
627
|
+
elif len(self._meshes) > 1:
|
|
628
|
+
# multiple meshes were configured, so we expect only one fragment under each mesh url
|
|
629
|
+
if len(fragment_names) > 1:
|
|
630
|
+
raise RuntimeError(
|
|
631
|
+
f"{self.__class__.__name__} was configured with multiple mesh fragments "
|
|
632
|
+
f"({', '.join(self._meshes.keys())}), but unexpectedly even more fragmentations "
|
|
633
|
+
f"were found at {spec['url']}."
|
|
634
|
+
)
|
|
635
|
+
result[name] = (f"{spec['url']}/{mesh_key}/{fragment_names[0]}", transform)
|
|
636
|
+
else:
|
|
637
|
+
# only one mesh was configures, so we might still
|
|
638
|
+
# see muliple fragments under the mesh url
|
|
639
|
+
for fragment_name in fragment_names:
|
|
640
|
+
result[fragment_name] = (f"{spec['url']}/{mesh_key}/{fragment_name}", transform)
|
|
641
|
+
|
|
642
|
+
return result
|
|
643
|
+
|
|
644
|
+
def _fetch_fragment(self, url: str, transform_nm: np.ndarray):
|
|
645
|
+
r = requests.HttpRequest(url, func=lambda b: BytesIO(b))
|
|
646
|
+
(vertices_vox, triangles_vox) = read_precomputed_mesh(r.data)
|
|
647
|
+
vertices, triangles = affine_transform_mesh(vertices_vox, triangles_vox, transform_nm)
|
|
648
|
+
vertices /= 1e6
|
|
649
|
+
return {'verts': vertices, 'faces': triangles}
|
|
650
|
+
|
|
651
|
+
def fetch(self, label: int, fragment: str):
|
|
652
|
+
"""
|
|
653
|
+
Fetches a particular mesh. Each mesh is a dictionary with keys:
|
|
654
|
+
|
|
655
|
+
Parameters
|
|
656
|
+
----------
|
|
657
|
+
label: int
|
|
658
|
+
Label of the volume
|
|
659
|
+
fragment: str, default: None
|
|
660
|
+
A fragment name can be specified to choose from multiple fragments.
|
|
661
|
+
|
|
662
|
+
Note
|
|
663
|
+
----
|
|
664
|
+
If not specified, multiple fragments will be merged into one mesh.
|
|
665
|
+
In such a case, the verts and faces arrays of different fragments
|
|
666
|
+
are appended to one another.
|
|
667
|
+
Returns
|
|
668
|
+
-------
|
|
669
|
+
dict
|
|
670
|
+
- 'verts': An Nx3 array of vertex coordinates (in nanometer)
|
|
671
|
+
- 'faces': an MX3 array containing connection data of vertices
|
|
672
|
+
- 'name': Name of the of the mesh variant
|
|
673
|
+
"""
|
|
674
|
+
|
|
675
|
+
# extract fragment information for the requested mesh
|
|
676
|
+
fragment_infos = self._get_fragment_info(label)
|
|
677
|
+
|
|
678
|
+
if fragment is None:
|
|
679
|
+
|
|
680
|
+
# no fragment specified, return merged fragment meshes
|
|
681
|
+
if len(fragment_infos) == 1:
|
|
682
|
+
url, transform = next(iter(fragment_infos.values()))
|
|
683
|
+
return self._fetch_fragment(url, transform)
|
|
684
|
+
else:
|
|
685
|
+
logger.info(
|
|
686
|
+
f"Fragments [{', '.join(fragment_infos.keys())}] are merged during fetch(). "
|
|
687
|
+
"You can select one of them using the 'fragment' parameter."
|
|
688
|
+
)
|
|
689
|
+
return merge_meshes([self._fetch_fragment(u, t) for u, t in fragment_infos.values()])
|
|
690
|
+
|
|
691
|
+
else:
|
|
692
|
+
|
|
693
|
+
# match fragment to available fragments
|
|
694
|
+
matched = [
|
|
695
|
+
info for name, info in fragment_infos.items()
|
|
696
|
+
if fragment.lower() in name
|
|
697
|
+
]
|
|
698
|
+
if len(matched) == 1:
|
|
699
|
+
url, transform = next(iter(matched))
|
|
700
|
+
return self._fetch_fragment(url, transform)
|
|
701
|
+
else:
|
|
702
|
+
raise ValueError(
|
|
703
|
+
f"The requested mesh fragment name '{fragment}' could not be resolved. "
|
|
704
|
+
f"Valid names are: {', '.join(fragment_infos.keys())}"
|
|
705
|
+
)
|
|
706
|
+
|
|
707
|
+
|
|
708
|
+
class NeuroglancerSurfaceMesh(NeuroglancerMesh, srctype="neuroglancer/precompmesh/surface"):
|
|
709
|
+
"""
|
|
710
|
+
Only shadows NeuroglancerMesh for the special surface srctype,
|
|
711
|
+
which provides a mesh urls plus a mesh index for identifying the surface.
|
|
712
|
+
Behaves like NeuroglancerMesh otherwise.
|
|
713
|
+
|
|
714
|
+
TODO this class might be replaced by implementing a default label index for the parent class.
|
|
715
|
+
"""
|
|
716
|
+
def __init__(self, spec: str, **kwargs):
|
|
717
|
+
# Here we expect a string of the form "<url> <labelindex>",
|
|
718
|
+
# and use this to set the url and label index in the parent class.
|
|
719
|
+
assert ' ' in spec
|
|
720
|
+
url, labelindex, *args = spec.split(' ')
|
|
721
|
+
assert labelindex.isnumeric()
|
|
722
|
+
self.label = int(labelindex)
|
|
723
|
+
NeuroglancerMesh.__init__(self, resource=url, **kwargs)
|
|
724
|
+
|
|
725
|
+
@property
|
|
726
|
+
def fragments(self, meshindex=1):
|
|
727
|
+
"""
|
|
728
|
+
Returns the set of fragment names available
|
|
729
|
+
for the mesh with the given index.
|
|
730
|
+
"""
|
|
731
|
+
return set(self._get_fragment_info(self.label))
|
|
732
|
+
|
|
733
|
+
def fetch(self, **kwargs):
|
|
734
|
+
if 'fragment' not in kwargs:
|
|
735
|
+
kwargs['fragment'] = None
|
|
736
|
+
return super().fetch(label=self.label, **kwargs)
|