siibra 0.5a2__py3-none-any.whl → 1.0.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (83) hide show
  1. siibra/VERSION +1 -1
  2. siibra/__init__.py +20 -12
  3. siibra/commons.py +145 -90
  4. siibra/configuration/__init__.py +1 -1
  5. siibra/configuration/configuration.py +22 -17
  6. siibra/configuration/factory.py +177 -128
  7. siibra/core/__init__.py +1 -8
  8. siibra/core/{relation_qualification.py → assignment.py} +17 -14
  9. siibra/core/atlas.py +66 -35
  10. siibra/core/concept.py +81 -39
  11. siibra/core/parcellation.py +83 -67
  12. siibra/core/region.py +569 -263
  13. siibra/core/space.py +7 -39
  14. siibra/core/structure.py +111 -0
  15. siibra/exceptions.py +63 -0
  16. siibra/experimental/__init__.py +19 -0
  17. siibra/experimental/contour.py +61 -0
  18. siibra/experimental/cortical_profile_sampler.py +57 -0
  19. siibra/experimental/patch.py +98 -0
  20. siibra/experimental/plane3d.py +256 -0
  21. siibra/explorer/__init__.py +16 -0
  22. siibra/explorer/url.py +112 -52
  23. siibra/explorer/util.py +31 -9
  24. siibra/features/__init__.py +73 -8
  25. siibra/features/anchor.py +75 -196
  26. siibra/features/connectivity/__init__.py +1 -1
  27. siibra/features/connectivity/functional_connectivity.py +2 -2
  28. siibra/features/connectivity/regional_connectivity.py +99 -10
  29. siibra/features/connectivity/streamline_counts.py +1 -1
  30. siibra/features/connectivity/streamline_lengths.py +1 -1
  31. siibra/features/connectivity/tracing_connectivity.py +1 -1
  32. siibra/features/dataset/__init__.py +1 -1
  33. siibra/features/dataset/ebrains.py +3 -3
  34. siibra/features/feature.py +219 -110
  35. siibra/features/image/__init__.py +1 -1
  36. siibra/features/image/image.py +21 -13
  37. siibra/features/image/sections.py +1 -1
  38. siibra/features/image/volume_of_interest.py +1 -1
  39. siibra/features/tabular/__init__.py +1 -1
  40. siibra/features/tabular/bigbrain_intensity_profile.py +24 -13
  41. siibra/features/tabular/cell_density_profile.py +111 -69
  42. siibra/features/tabular/cortical_profile.py +82 -16
  43. siibra/features/tabular/gene_expression.py +117 -6
  44. siibra/features/tabular/layerwise_bigbrain_intensities.py +7 -9
  45. siibra/features/tabular/layerwise_cell_density.py +9 -24
  46. siibra/features/tabular/receptor_density_fingerprint.py +11 -6
  47. siibra/features/tabular/receptor_density_profile.py +12 -15
  48. siibra/features/tabular/regional_timeseries_activity.py +74 -18
  49. siibra/features/tabular/tabular.py +17 -8
  50. siibra/livequeries/__init__.py +1 -7
  51. siibra/livequeries/allen.py +139 -77
  52. siibra/livequeries/bigbrain.py +104 -128
  53. siibra/livequeries/ebrains.py +7 -4
  54. siibra/livequeries/query.py +1 -2
  55. siibra/locations/__init__.py +32 -25
  56. siibra/locations/boundingbox.py +153 -127
  57. siibra/locations/location.py +45 -80
  58. siibra/locations/point.py +97 -83
  59. siibra/locations/pointcloud.py +349 -0
  60. siibra/retrieval/__init__.py +1 -1
  61. siibra/retrieval/cache.py +107 -13
  62. siibra/retrieval/datasets.py +9 -14
  63. siibra/retrieval/exceptions/__init__.py +2 -1
  64. siibra/retrieval/repositories.py +147 -53
  65. siibra/retrieval/requests.py +64 -29
  66. siibra/vocabularies/__init__.py +2 -2
  67. siibra/volumes/__init__.py +7 -9
  68. siibra/volumes/parcellationmap.py +396 -253
  69. siibra/volumes/providers/__init__.py +20 -0
  70. siibra/volumes/providers/freesurfer.py +113 -0
  71. siibra/volumes/{gifti.py → providers/gifti.py} +29 -18
  72. siibra/volumes/{neuroglancer.py → providers/neuroglancer.py} +204 -92
  73. siibra/volumes/{nifti.py → providers/nifti.py} +64 -44
  74. siibra/volumes/providers/provider.py +107 -0
  75. siibra/volumes/sparsemap.py +159 -260
  76. siibra/volumes/volume.py +720 -152
  77. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/METADATA +25 -28
  78. siibra-1.0.0a1.dist-info/RECORD +84 -0
  79. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/WHEEL +1 -1
  80. siibra/locations/pointset.py +0 -198
  81. siibra-0.5a2.dist-info/RECORD +0 -74
  82. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/LICENSE +0 -0
  83. {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -12,28 +12,66 @@
12
12
  # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
13
  # See the License for the specific language governing permissions and
14
14
  # limitations under the License.
15
- """Handles reading and preparing neuroglancer files."""
16
15
 
17
- from . import volume
18
-
19
- from ..commons import logger, MapType, merge_meshes, NEUROGLANCER_MAX_GIB
20
- from ..retrieval import requests, cache
21
- from ..locations import boundingbox as _boundingbox
22
-
23
- from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset
24
- from neuroglancer_scripts.accessor import get_accessor_for_url
16
+ from . import provider as _provider
17
+
18
+ from ...commons import (
19
+ logger,
20
+ MapType,
21
+ merge_meshes,
22
+ SIIBRA_MAX_FETCH_SIZE_GIB,
23
+ QUIET,
24
+ resample_img_to_img
25
+ )
26
+ from ...retrieval import requests, cache
27
+ from ...locations import boundingbox as _boundingbox
28
+
29
+ from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset, PrecomputedIO
30
+ from neuroglancer_scripts.http_accessor import HttpAccessor
25
31
  from neuroglancer_scripts.mesh import read_precomputed_mesh, affine_transform_mesh
26
32
  from io import BytesIO
27
33
  import nibabel as nib
28
34
  import os
29
35
  import numpy as np
30
36
  from typing import Union, Dict, Tuple
37
+ import json
31
38
 
32
39
 
33
- class NeuroglancerProvider(volume.VolumeProvider, srctype="neuroglancer/precomputed"):
40
+ def shift_ng_transfrom(
41
+ transform_nm: np.ndarray, scale_resolution_nm: np.ndarray, max_resolution_nm: np.ndarray
42
+ ) -> np.ndarray:
43
+ """
44
+ Helper method to get nifti standard affine.
45
+
46
+ transfrorm.json stored with neuroglancer precomputed images and meshes
47
+ are meant to be used for neuroglancer viewers and hence they are not
48
+ representative of the affine in other tools. This method shifts back
49
+ half a voxel in each axis.
50
+ (see https://neuroglancer-scripts.readthedocs.io/en/latest/neuroglancer-info.html#different-conventions-for-coordinate-transformations)
51
+
52
+ Parameters
53
+ ----------
54
+ transform_nm: np.ndarray
55
+ Transform array created for dispalying an image correctly from
56
+ neuroglancer precomputed format in neuroglancer viewer.
57
+ max_resolution_nm: np.ndarray
58
+ The voxel resolution of the highest level of resolution.
59
+
60
+ Returns
61
+ -------
62
+ np.ndarray
63
+ Standard affine in nm
64
+ """
65
+ scaling = np.diag(np.r_[scale_resolution_nm, 1.0])
66
+ affine = np.dot(transform_nm, scaling)
67
+ affine[:3, 3] += (max_resolution_nm * 0.5)
68
+ return affine
69
+
70
+
71
+ class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/precomputed"):
34
72
 
35
73
  def __init__(self, url: Union[str, Dict[str, str]]):
36
- volume.VolumeProvider.__init__(self)
74
+ _provider.VolumeProvider.__init__(self)
37
75
  self._init_url = url
38
76
  # TODO duplicated code to giftimesh
39
77
  if isinstance(url, str): # one single image to load
@@ -63,10 +101,17 @@ class NeuroglancerProvider(volume.VolumeProvider, srctype="neuroglancer/precompu
63
101
  The name of a fragment volume to fetch, if any. For example,
64
102
  some volumes are split into left and right hemisphere fragments.
65
103
  See :func:`~siibra.volumes.Volume.fragments`
66
- resolution_mm: float
67
- Specify the resolution
104
+ resolution_mm: float, default: None (i.e, lowest)
105
+ Desired resolution in millimeters.
106
+ Tip
107
+ ---
108
+ Set to -1 to get the highest resolution. (might need to set max_bytes)
68
109
  voi: BoundingBox
69
110
  optional specification of a volume of interest to fetch.
111
+ max_bytes: float: Default: NeuroglancerVolume.MAX_BYTES
112
+ Maximum allowable size (in bytes) for downloading the image. siibra
113
+ will attempt to find the highest resolution image with a size less
114
+ than this value.
70
115
  """
71
116
 
72
117
  result = None
@@ -79,10 +124,11 @@ class NeuroglancerProvider(volume.VolumeProvider, srctype="neuroglancer/precompu
79
124
 
80
125
  if len(self._fragments) > 1:
81
126
  if fragment is None:
82
- raise RuntimeError(
83
- f"Merging of fragments not yet implemented in {self.__class__.__name__}. "
84
- f"Specify one of [{', '.join(self._fragments.keys())}] using fetch(fragment=<name>). "
127
+ logger.info(
128
+ f"Merging fragments [{', '.join(self._fragments.keys())}]. "
129
+ f"You can select one using `fragment` kwarg."
85
130
  )
131
+ result = self._merge_fragments(resolution_mm=resolution_mm, voi=voi, **kwargs)
86
132
  else:
87
133
  matched_names = [n for n in self._fragments if fragment.lower() in n.lower()]
88
134
  if len(matched_names) != 1:
@@ -113,81 +159,117 @@ class NeuroglancerProvider(volume.VolumeProvider, srctype="neuroglancer/precompu
113
159
  label = None
114
160
  if label is not None:
115
161
  result = nib.Nifti1Image(
116
- (result.get_fdata() == label).astype('uint8'),
117
- result.affine
162
+ (np.asanyarray(result.dataobj) == label).astype('uint8'),
163
+ result.affine,
164
+ dtype='uint8',
118
165
  )
119
166
 
120
167
  return result
121
168
 
122
- @property
123
- def boundingbox(self):
169
+ def get_boundingbox(self, **fetch_kwargs) -> "_boundingbox.BoundingBox":
124
170
  """
125
- Return the bounding box in physical coordinates
126
- of the union of fragments in this neuroglancer volume.
171
+ Return the bounding box in physical coordinates of the union of
172
+ fragments in this neuroglancer volume.
173
+
174
+ Parameters
175
+ ----------
176
+ fetch_kwargs:
177
+ key word arguments that are used for fetchin volumes,
178
+ such as voi or resolution_mm.
127
179
  """
128
180
  bbox = None
129
181
  for frag in self._fragments.values():
130
- next_bbox = _boundingbox.BoundingBox((0, 0, 0), frag.shape, space=None) \
131
- .transform(frag.affine)
182
+ if len(frag.shape) > 3:
183
+ logger.warning(
184
+ f"N-D Neuroglancer volume has shape {frag.shape}, but "
185
+ f"bounding box considers only {frag.shape[:3]}"
186
+ )
187
+ resolution_mm = fetch_kwargs.get("resolution_mm")
188
+ if resolution_mm is None:
189
+ affine = frag.affine
190
+ shape = frag.shape[:3]
191
+ else:
192
+ scale = frag._select_scale(resolution_mm=resolution_mm)
193
+ affine = scale.affine
194
+ shape = scale.size[:3]
195
+ next_bbox = _boundingbox.BoundingBox(
196
+ (0, 0, 0), shape, space=None
197
+ ).transform(affine)
132
198
  bbox = next_bbox if bbox is None else bbox.union(next_bbox)
133
199
  return bbox
134
200
 
135
- def _merge_fragments(self) -> nib.Nifti1Image:
136
- # TODO this only performs nearest neighbor interpolation, optimized for float types.
137
- bbox = self.boundingbox
201
+ def _merge_fragments(
202
+ self,
203
+ resolution_mm: float = None,
204
+ voi: _boundingbox.BoundingBox = None,
205
+ **kwargs
206
+ ) -> nib.Nifti1Image:
207
+ with QUIET:
208
+ bbox = self.get_boundingbox(
209
+ clip=False,
210
+ background=0,
211
+ resolution_mm=resolution_mm,
212
+ voi=voi
213
+ )
214
+
138
215
  num_conflicts = 0
139
216
  result = None
140
-
141
- for loader in self._img_loaders.values():
142
- img = loader()
217
+ for frag_vol in self._fragments.values():
218
+ frag_scale = frag_vol._select_scale(
219
+ resolution_mm=resolution_mm,
220
+ bbox=voi,
221
+ max_bytes=kwargs.pop("maxbytes", NeuroglancerVolume.MAX_BYTES)
222
+ )
223
+ img = frag_scale.fetch(voi=voi)
143
224
  if result is None:
144
225
  # build the empty result image with its own affine and voxel space
145
226
  s0 = np.identity(4)
146
227
  s0[:3, -1] = list(bbox.minpoint.transform(np.linalg.inv(img.affine)))
147
228
  result_affine = np.dot(img.affine, s0) # adjust global bounding box offset to get global affine
148
- voxdims = np.asanyarray(bbox.transform(result_affine).shape, dtype="int")
229
+ voxdims = np.asanyarray(np.ceil(
230
+ bbox.transform(np.linalg.inv(result_affine)).shape # transform to the voxel space
231
+ ), dtype="int")
149
232
  result_arr = np.zeros(voxdims, dtype=img.dataobj.dtype)
150
233
  result = nib.Nifti1Image(dataobj=result_arr, affine=result_affine)
151
234
 
152
- arr = np.asanyarray(img.dataobj)
153
- Xs, Ys, Zs = np.where(arr != 0)
154
- Xt, Yt, Zt, _ = np.split(
155
- (np.dot(
156
- np.linalg.inv(result_affine),
157
- np.dot(img.affine, np.c_[Xs, Ys, Zs, Zs * 0 + 1].T)
158
- ) + .5).astype('int'),
159
- 4, axis=0
160
- )
161
- num_conflicts += np.count_nonzero(result_arr[Xt, Yt, Zt])
162
- result_arr[Xt, Yt, Zt] = arr[Xs, Ys, Zs]
235
+ # resample to merge template and update it
236
+ resampled_img = resample_img_to_img(source_img=img, target_img=result)
237
+ arr = np.asanyarray(resampled_img.dataobj)
238
+ nonzero_voxels = arr != 0
239
+ num_conflicts += np.count_nonzero(result_arr[nonzero_voxels])
240
+ result_arr[nonzero_voxels] = arr[nonzero_voxels]
163
241
 
164
242
  if num_conflicts > 0:
165
243
  num_voxels = np.count_nonzero(result_arr)
166
- logger.warning(f"Merging fragments required to overwrite {num_conflicts} conflicting voxels ({num_conflicts/num_voxels*100.:2.1f}%).")
244
+ logger.warning(
245
+ f"Merging fragments required to overwrite {num_conflicts} "
246
+ f"conflicting voxels ({num_conflicts / num_voxels * 100.:2.3f}%)."
247
+ )
167
248
 
168
249
  return result
169
250
 
170
251
 
171
252
  class NeuroglancerVolume:
172
- # Wether to keep fetched data in local cache
173
- USE_CACHE = False
174
253
 
175
- @property
176
- def MAX_BYTES(self):
177
- "Number of bytes at which an image array is considered to large to fetch"
178
- return NEUROGLANCER_MAX_GIB * 1024 ** 3
254
+ USE_CACHE = False # Whether to keep fetched data in local cache
255
+ MAX_BYTES = SIIBRA_MAX_FETCH_SIZE_GIB * 1024 ** 3 # Number of bytes at which an image array is considered to large to fetch
179
256
 
180
257
  def __init__(self, url: str):
181
258
  # TODO do we still need VolumeProvider.__init__ ? given it's not a subclass of VolumeProvider?
182
- volume.VolumeProvider.__init__(self)
259
+ _provider.VolumeProvider.__init__(self)
183
260
  assert isinstance(url, str)
184
261
  self.url = url
185
262
  self._scales_cached = None
186
- self._io = None
263
+ self._info = None
187
264
  self._transform_nm = None
265
+ self._io: PrecomputedIO = None
188
266
 
189
267
  @property
190
- def transform_nm(self):
268
+ def transform_nm(self) -> np.ndarray:
269
+ """
270
+ This is the transformation matrix created to cater neuroglancer viewer
271
+ for a neuroglancer precomputed images.
272
+ """
191
273
  if self._transform_nm is not None:
192
274
  return self._transform_nm
193
275
  try:
@@ -207,12 +289,19 @@ class NeuroglancerVolume:
207
289
  self._transform_nm = val
208
290
 
209
291
  @property
210
- def map_type(self):
292
+ def io(self) -> PrecomputedIO:
211
293
  if self._io is None:
294
+ accessor = HttpAccessor(self.url)
295
+ self._io = get_IO_for_existing_dataset(accessor)
296
+ return self._io
297
+
298
+ @property
299
+ def map_type(self):
300
+ if self._info is None:
212
301
  self._bootstrap()
213
302
  return (
214
303
  MapType.LABELLED
215
- if self._io.info.get("type") == "segmentation"
304
+ if self._info.get("type") == "segmentation"
216
305
  else MapType.STATISTICAL
217
306
  )
218
307
 
@@ -220,21 +309,20 @@ class NeuroglancerVolume:
220
309
  def map_type(self, val):
221
310
  if val is not None:
222
311
  logger.debug(
223
- "NeuroglancerVolume can determine its own maptype from self._io.info.get('type')"
312
+ "NeuroglancerVolume can determine its own maptype from self._info.get('type')"
224
313
  )
225
314
 
226
315
  def _bootstrap(self):
227
- accessor = get_accessor_for_url(self.url)
228
- self._io = get_IO_for_existing_dataset(accessor)
316
+ self._info = requests.HttpRequest(f"{self.url}/info", func=lambda b: json.loads(b.decode())).get()
229
317
  self._scales_cached = sorted(
230
- [NeuroglancerScale(self, i) for i in self._io.info["scales"]]
318
+ [NeuroglancerScale(self, i) for i in self._info["scales"]]
231
319
  )
232
320
 
233
321
  @property
234
322
  def dtype(self):
235
- if self._io is None:
323
+ if self._info is None:
236
324
  self._bootstrap()
237
- return np.dtype(self._io.info["data_type"])
325
+ return np.dtype(self._info["data_type"])
238
326
 
239
327
  @property
240
328
  def scales(self):
@@ -252,19 +340,30 @@ class NeuroglancerVolume:
252
340
  # return the affine matrix of the scale 0 data
253
341
  return self.scales[0].affine
254
342
 
255
- def fetch(self, resolution_mm: float = None, voi: _boundingbox.BoundingBox = None, **kwargs):
343
+ def fetch(
344
+ self,
345
+ resolution_mm: float = None,
346
+ voi: _boundingbox.BoundingBox = None,
347
+ max_bytes: float = MAX_BYTES,
348
+ **kwargs
349
+ ):
256
350
  # the caller has to make sure voi is defined in the correct reference space
257
- scale = self._select_scale(resolution_mm=resolution_mm, bbox=voi)
258
- return scale.fetch(voi, **kwargs)
351
+ scale = self._select_scale(resolution_mm=resolution_mm, bbox=voi, max_bytes=max_bytes)
352
+ return scale.fetch(voi=voi, **kwargs)
259
353
 
260
- def get_shape(self, resolution_mm=None):
261
- scale = self._select_scale(resolution_mm)
354
+ def get_shape(self, resolution_mm=None, max_bytes: float = MAX_BYTES):
355
+ scale = self._select_scale(resolution_mm=resolution_mm, max_bytes=max_bytes)
262
356
  return scale.size
263
357
 
264
358
  def is_float(self):
265
359
  return self.dtype.kind == "f"
266
360
 
267
- def _select_scale(self, resolution_mm: float, bbox: _boundingbox.BoundingBox = None):
361
+ def _select_scale(
362
+ self,
363
+ resolution_mm: float,
364
+ max_bytes: float = MAX_BYTES,
365
+ bbox: _boundingbox.BoundingBox = None
366
+ ) -> 'NeuroglancerScale':
268
367
  if resolution_mm is None:
269
368
  suitable = self.scales
270
369
  elif resolution_mm < 0:
@@ -276,23 +375,31 @@ class NeuroglancerVolume:
276
375
  scale = suitable[-1]
277
376
  else:
278
377
  scale = self.scales[0]
279
- logger.warning(
280
- f"Requested resolution {resolution_mm}mm is not available for "
281
- f"all axes. Falling back to the highest possible resolution of "
282
- f"({', '.join(map('{:.4f}mm'.format, scale.res_mm))})."
283
- )
378
+ xyz_res = ['{:.6f}'.format(r).rstrip('0') for r in scale.res_mm]
379
+ if all(r.startswith(str(resolution_mm)) for r in xyz_res):
380
+ logger.info(f"Closest resolution to requested is {', '.join(xyz_res)} mm.")
381
+ else:
382
+ logger.warning(
383
+ f"Requested resolution {resolution_mm} is not available. "
384
+ f"Falling back to the highest possible resolution of "
385
+ f"{', '.join(xyz_res)} mm."
386
+ )
284
387
 
285
388
  scale_changed = False
286
- while scale._estimate_nbytes(bbox) > self.MAX_BYTES:
389
+ while scale._estimate_nbytes(bbox) > max_bytes:
287
390
  scale = scale.next()
288
391
  scale_changed = True
289
392
  if scale is None:
290
393
  raise RuntimeError(
291
394
  f"Fetching bounding box {bbox} is infeasible "
292
- f"relative to the limit of {self.MAX_BYTES/1024**3}GiB."
395
+ f"relative to the limit of {max_bytes / 1024**3}GiB."
293
396
  )
294
397
  if scale_changed:
295
- logger.warning(f"Resolution was reduced to {scale.res_mm} to provide a feasible volume size")
398
+ logger.warning(
399
+ f"Resolution was reduced to {scale.res_mm} to provide a "
400
+ f"feasible volume size of {max_bytes / 1024**3} GiB. Set `max_bytes` to"
401
+ f" fetch in the resolution requested."
402
+ )
296
403
  return scale
297
404
 
298
405
 
@@ -301,7 +408,7 @@ class NeuroglancerScale:
301
408
 
302
409
  color_warning_issued = False
303
410
 
304
- def __init__(self, volume: NeuroglancerProvider, scaleinfo: dict):
411
+ def __init__(self, volume: NeuroglancerVolume, scaleinfo: dict):
305
412
  self.volume = volume
306
413
  self.chunk_sizes = np.array(scaleinfo["chunk_sizes"]).squeeze()
307
414
  self.encoding = scaleinfo["encoding"]
@@ -337,8 +444,8 @@ class NeuroglancerScale:
337
444
  result = self.volume.dtype.itemsize * bbox_.volume
338
445
  logger.debug(
339
446
  f"Approximate size for fetching resolution "
340
- f"({', '.join(map('{:.2f}'.format, self.res_mm))}) mm "
341
- f"is {result/1024**3:.2f} GiB."
447
+ f"({', '.join(map('{:.6f}'.format, self.res_mm))}) mm "
448
+ f"is {result / 1024**3:.5f} GiB."
342
449
  )
343
450
  return result
344
451
 
@@ -361,10 +468,13 @@ class NeuroglancerScale:
361
468
 
362
469
  @property
363
470
  def affine(self):
364
- scaling = np.diag(np.r_[self.res_nm, 1.0])
365
- affine = np.dot(self.volume.transform_nm, scaling)
366
- affine[:3, :] /= 1e6
367
- return affine
471
+ affine_ = shift_ng_transfrom(
472
+ transform_nm=self.volume.transform_nm,
473
+ scale_resolution_nm=self.res_nm,
474
+ max_resolution_nm=self.volume.scales[0].res_nm[0],
475
+ )
476
+ affine_[:3, :] /= 1e6
477
+ return affine_
368
478
 
369
479
  def _point_to_lower_chunk_idx(self, xyz):
370
480
  return (
@@ -395,7 +505,7 @@ class NeuroglancerScale:
395
505
  y0 = gy * self.chunk_sizes[1]
396
506
  z0 = gz * self.chunk_sizes[2]
397
507
  x1, y1, z1 = np.minimum(self.chunk_sizes + [x0, y0, z0], self.size)
398
- chunk_czyx = self.volume._io.read_chunk(self.key, (x0, x1, y0, y1, z0, z1))
508
+ chunk_czyx = self.volume.io.read_chunk(self.key, (x0, x1, y0, y1, z0, z1))
399
509
  if channel is None:
400
510
  channel = 0
401
511
  if chunk_czyx.shape[0] > 1 and not self.color_warning_issued:
@@ -424,9 +534,9 @@ class NeuroglancerScale:
424
534
  for dim in range(3):
425
535
  if bbox_.shape[dim] < 1:
426
536
  logger.warning(
427
- f"Bounding box in voxel space will be enlarged to voxel size 1 along axis {dim}."
537
+ f"Bounding box in voxel space will be enlarged to by {self.res_mm[dim]} along axis {dim}."
428
538
  )
429
- bbox_.maxpoint[dim] = bbox_.maxpoint[dim] + 1
539
+ bbox_.maxpoint[dim] = bbox_.maxpoint[dim] + self.res_mm[dim]
430
540
 
431
541
  # extract minimum and maximum the chunk indices to be loaded
432
542
  gx0, gy0, gz0 = self._point_to_lower_chunk_idx(tuple(bbox_.minpoint))
@@ -448,8 +558,8 @@ class NeuroglancerScale:
448
558
  # determine the remaining offset from the "chunk mosaic" to the
449
559
  # exact bounding box requested, to cut off undesired borders
450
560
  data_min = np.array([gx0, gy0, gz0]) * self.chunk_sizes
451
- x0, y0, z0 = (np.array(tuple(bbox_.minpoint)) - data_min).astype("int")
452
- xd, yd, zd = np.array(bbox_.shape).astype("int")
561
+ x0, y0, z0 = (np.array(bbox_.minpoint) - data_min).astype("int")
562
+ xd, yd, zd = np.ceil((np.array(bbox_.maxpoint))).astype(int) - np.floor((np.array(bbox_.minpoint))).astype(int)
453
563
  offset = tuple(bbox_.minpoint)
454
564
 
455
565
  # build the nifti image
@@ -461,14 +571,14 @@ class NeuroglancerScale:
461
571
  )
462
572
 
463
573
 
464
- class NeuroglancerMesh(volume.VolumeProvider, srctype="neuroglancer/precompmesh"):
574
+ class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompmesh"):
465
575
  """
466
576
  A surface mesh provided as neuroglancer precomputed mesh.
467
577
  """
468
578
 
469
579
  @staticmethod
470
580
  def _fragmentinfo(url: str) -> Dict[str, Union[str, np.ndarray, Dict]]:
471
- """ Prepare basic mesh fragment information from url. """
581
+ """Prepare basic mesh fragment information from url."""
472
582
  return {
473
583
  "url": url,
474
584
  "transform_nm": np.array(requests.HttpRequest(f"{url}/transform.json").data),
@@ -490,10 +600,12 @@ class NeuroglancerMesh(volume.VolumeProvider, srctype="neuroglancer/precompmesh"
490
600
  def _url(self) -> Union[str, Dict[str, str]]:
491
601
  return self._init_url
492
602
 
493
- @property
494
- def boundingbox(self) -> _boundingbox.BoundingBox:
603
+ def get_boundingbox(self, clip=False, background=0.0, **fetch_kwargs) -> '_boundingbox.BoundingBox':
604
+ """
605
+ Bounding box calculation is not yet implemented for meshes.
606
+ """
495
607
  raise NotImplementedError(
496
- f"Fast bounding box access to {self.__class__.__name__} objects not yet implemented."
608
+ f"Bounding box access to {self.__class__.__name__} objects not yet implemented."
497
609
  )
498
610
 
499
611
  def _get_fragment_info(self, meshindex: int) -> Dict[str, Tuple[str, ]]: