siibra 1.0a14__py3-none-any.whl → 1.0.1a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (80) hide show
  1. siibra/VERSION +1 -1
  2. siibra/__init__.py +15 -5
  3. siibra/commons.py +3 -48
  4. siibra/configuration/__init__.py +1 -1
  5. siibra/configuration/configuration.py +1 -1
  6. siibra/configuration/factory.py +164 -127
  7. siibra/core/__init__.py +1 -1
  8. siibra/core/assignment.py +1 -1
  9. siibra/core/atlas.py +24 -17
  10. siibra/core/concept.py +18 -9
  11. siibra/core/parcellation.py +76 -55
  12. siibra/core/region.py +163 -183
  13. siibra/core/space.py +3 -1
  14. siibra/core/structure.py +1 -2
  15. siibra/exceptions.py +17 -1
  16. siibra/experimental/contour.py +6 -6
  17. siibra/experimental/patch.py +2 -2
  18. siibra/experimental/plane3d.py +8 -8
  19. siibra/explorer/__init__.py +1 -1
  20. siibra/explorer/url.py +15 -0
  21. siibra/explorer/util.py +1 -1
  22. siibra/features/__init__.py +1 -1
  23. siibra/features/anchor.py +13 -14
  24. siibra/features/connectivity/__init__.py +1 -1
  25. siibra/features/connectivity/functional_connectivity.py +1 -1
  26. siibra/features/connectivity/regional_connectivity.py +7 -5
  27. siibra/features/connectivity/streamline_counts.py +1 -1
  28. siibra/features/connectivity/streamline_lengths.py +1 -1
  29. siibra/features/connectivity/tracing_connectivity.py +1 -1
  30. siibra/features/dataset/__init__.py +1 -1
  31. siibra/features/dataset/ebrains.py +1 -1
  32. siibra/features/feature.py +50 -28
  33. siibra/features/image/__init__.py +1 -1
  34. siibra/features/image/image.py +18 -13
  35. siibra/features/image/sections.py +1 -1
  36. siibra/features/image/volume_of_interest.py +1 -1
  37. siibra/features/tabular/__init__.py +1 -1
  38. siibra/features/tabular/bigbrain_intensity_profile.py +2 -2
  39. siibra/features/tabular/cell_density_profile.py +102 -66
  40. siibra/features/tabular/cortical_profile.py +5 -3
  41. siibra/features/tabular/gene_expression.py +1 -1
  42. siibra/features/tabular/layerwise_bigbrain_intensities.py +1 -1
  43. siibra/features/tabular/layerwise_cell_density.py +8 -25
  44. siibra/features/tabular/receptor_density_fingerprint.py +5 -3
  45. siibra/features/tabular/receptor_density_profile.py +5 -3
  46. siibra/features/tabular/regional_timeseries_activity.py +7 -5
  47. siibra/features/tabular/tabular.py +5 -3
  48. siibra/livequeries/__init__.py +1 -1
  49. siibra/livequeries/allen.py +46 -20
  50. siibra/livequeries/bigbrain.py +9 -9
  51. siibra/livequeries/ebrains.py +1 -1
  52. siibra/livequeries/query.py +1 -2
  53. siibra/locations/__init__.py +10 -10
  54. siibra/locations/boundingbox.py +77 -38
  55. siibra/locations/location.py +12 -4
  56. siibra/locations/point.py +14 -9
  57. siibra/locations/{pointset.py → pointcloud.py} +69 -27
  58. siibra/retrieval/__init__.py +1 -1
  59. siibra/retrieval/cache.py +1 -1
  60. siibra/retrieval/datasets.py +1 -1
  61. siibra/retrieval/exceptions/__init__.py +1 -1
  62. siibra/retrieval/repositories.py +10 -27
  63. siibra/retrieval/requests.py +20 -3
  64. siibra/vocabularies/__init__.py +1 -1
  65. siibra/volumes/__init__.py +2 -2
  66. siibra/volumes/parcellationmap.py +121 -94
  67. siibra/volumes/providers/__init__.py +1 -1
  68. siibra/volumes/providers/freesurfer.py +1 -1
  69. siibra/volumes/providers/gifti.py +1 -1
  70. siibra/volumes/providers/neuroglancer.py +68 -42
  71. siibra/volumes/providers/nifti.py +18 -28
  72. siibra/volumes/providers/provider.py +2 -2
  73. siibra/volumes/sparsemap.py +128 -247
  74. siibra/volumes/volume.py +252 -65
  75. {siibra-1.0a14.dist-info → siibra-1.0.1a0.dist-info}/METADATA +17 -4
  76. siibra-1.0.1a0.dist-info/RECORD +84 -0
  77. {siibra-1.0a14.dist-info → siibra-1.0.1a0.dist-info}/WHEEL +1 -1
  78. siibra-1.0a14.dist-info/RECORD +0 -84
  79. {siibra-1.0a14.dist-info → siibra-1.0.1a0.dist-info}/LICENSE +0 -0
  80. {siibra-1.0a14.dist-info → siibra-1.0.1a0.dist-info}/top_level.txt +0 -0
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2023
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -32,8 +32,7 @@ from ..commons import (
32
32
  generate_uuid
33
33
  )
34
34
  from ..core import concept, space, parcellation, region as _region
35
- from ..locations import location, point, pointset
36
- from ..retrieval import requests
35
+ from ..locations import location, point, pointcloud
37
36
 
38
37
  import numpy as np
39
38
  from typing import Union, Dict, List, TYPE_CHECKING, Iterable, Tuple
@@ -76,6 +75,7 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
76
75
  modality: str = None,
77
76
  publications: list = [],
78
77
  datasets: list = [],
78
+ prerelease: bool = False,
79
79
  ):
80
80
  """
81
81
  Constructs a new parcellation object.
@@ -120,12 +120,11 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
120
120
  description=description,
121
121
  publications=publications,
122
122
  datasets=datasets,
123
- modality=modality
123
+ modality=modality,
124
+ prerelease=prerelease,
124
125
  )
125
126
  self._space_spec = space_spec
126
127
  self._parcellation_spec = parcellation_spec
127
- if 'prerelease' in self.parcellation.name.lower():
128
- self.name = f"[PRERELEASE] {self.name}"
129
128
 
130
129
  # Since the volumes might include 4D arrays, where the actual
131
130
  # volume index points to a z coordinate, we create subvolume
@@ -160,6 +159,11 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
160
159
  logger.warning(f"Non unique indices encountered in {self}: {duplicates}")
161
160
  self._affine_cached = None
162
161
 
162
+ @property
163
+ def key(self):
164
+ _id = self.id
165
+ return create_key(_id[len("siibra-map-v0.0.1"):])
166
+
163
167
  @property
164
168
  def species(self) -> Species:
165
169
  # lazy implementation
@@ -319,69 +323,27 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
319
323
  def regions(self):
320
324
  return list(self._indices)
321
325
 
322
- def fetch(
326
+ def get_volume(
323
327
  self,
324
- region_or_index: Union[str, "Region", MapIndex] = None,
328
+ region: Union[str, "Region"] = None,
325
329
  *,
326
330
  index: MapIndex = None,
327
- region: Union[str, "Region"] = None,
328
- **kwargs
329
- ):
330
- """
331
- Fetches one particular volume of this parcellation map.
332
-
333
- If there's only one volume, this is the default, otherwise further
334
- specification is requested:
335
- - the volume index,
336
- - the MapIndex (which results in a regional map being returned)
337
-
338
- You might also consider fetch_iter() to iterate the volumes, or
339
- compress() to produce a single-volume parcellation map.
340
-
341
- Parameters
342
- ----------
343
- region_or_index: str, Region, MapIndex
344
- Lazy match the specification.
345
- index: MapIndex
346
- Explicit specification of the map index, typically resulting
347
- in a regional map (mask or statistical map) to be returned.
348
- Note that supplying 'region' will result in retrieving the map index of that region
349
- automatically.
350
- region: str, Region
351
- Specification of a region name, resulting in a regional map
352
- (mask or statistical map) to be returned.
353
- **kwargs
354
- - resolution_mm: resolution in millimeters
355
- - format: the format of the volume, like "mesh" or "nii"
356
- - voi: a BoundingBox of interest
357
-
358
-
359
- Note
360
- ----
361
- Not all keyword arguments are supported for volume formats. Format
362
- is restricted by available formats (check formats property).
363
-
364
- Returns
365
- -------
366
- An image or mesh
367
- """
331
+ **kwargs,
332
+ ) -> Union[_volume.Volume, _volume.FilteredVolume, _volume.Subvolume]:
368
333
  try:
369
- length = len([arg for arg in [region_or_index, region, index] if arg is not None])
334
+ length = len([arg for arg in [region, index] if arg is not None])
370
335
  assert length == 1
371
336
  except AssertionError:
372
337
  if length > 1:
373
- raise exceptions.ExcessiveArgumentException("One and only one of region_or_index, region, index can be defined for fetch")
374
- # user can provide no arguments, which assumes one and only one volume present
375
-
376
- if isinstance(region_or_index, MapIndex):
377
- index = region_or_index
378
-
379
- if isinstance(region_or_index, (str, _region.Region)):
380
- region = region_or_index
381
-
338
+ raise exceptions.ExcessiveArgumentException(
339
+ "One and only one of region or index can be defined for `get_volume`."
340
+ )
382
341
  mapindex = None
383
342
  if region is not None:
384
- assert isinstance(region, (str, _region.Region))
343
+ try:
344
+ assert isinstance(region, (str, _region.Region))
345
+ except AssertionError:
346
+ raise TypeError(f"Please provide a region name or region instance, not a {type(region)}")
385
347
  mapindex = self.get_index(region)
386
348
  if index is not None:
387
349
  assert isinstance(index, MapIndex)
@@ -390,19 +352,17 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
390
352
  if len(self) == 1:
391
353
  mapindex = MapIndex(volume=0, label=None)
392
354
  elif len(self) > 1:
355
+ assert self.maptype == MapType.LABELLED, f"Cannot merge multiple volumes of map type {self.maptype}. Please specify a region or index."
393
356
  logger.info(
394
357
  "Map provides multiple volumes and no specification is"
395
358
  " provided. Resampling all volumes to the space."
396
359
  )
397
360
  labels = list(range(len(self.volumes)))
398
361
  merged_volume = _volume.merge(self.volumes, labels, **kwargs)
399
- return merged_volume.fetch()
362
+ return merged_volume
400
363
  else:
401
364
  raise exceptions.NoVolumeFound("Map provides no volumes.")
402
365
 
403
- if "resolution_mm" in kwargs and kwargs.get("format") is None:
404
- kwargs["format"] = 'neuroglancer/precomputed'
405
-
406
366
  kwargs_fragment = kwargs.pop("fragment", None)
407
367
  if kwargs_fragment is not None:
408
368
  if (mapindex.fragment is not None) and (kwargs_fragment != mapindex.fragment):
@@ -418,17 +378,60 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
418
378
  raise IndexError(
419
379
  f"{self} provides {len(self)} mapped volumes, but #{mapindex.volume} was requested."
420
380
  )
421
- try:
422
- result = self.volumes[mapindex.volume or 0].fetch(
423
- fragment=mapindex.fragment, label=mapindex.label, **kwargs
424
- )
425
- except requests.SiibraHttpRequestError as e:
426
- print(str(e))
381
+ if mapindex.label is None and mapindex.fragment is None:
382
+ return self.volumes[mapindex.volume]
427
383
 
428
- if result is None:
429
- raise RuntimeError(f"Error fetching {mapindex} from {self} as {kwargs.get('format', f'{self.formats}')}.")
384
+ return _volume.FilteredVolume(
385
+ parent_volume=self.volumes[mapindex.volume],
386
+ label=mapindex.label,
387
+ fragment=mapindex.fragment,
388
+ )
389
+
390
+ def fetch(
391
+ self,
392
+ region: Union[str, "Region"] = None,
393
+ *,
394
+ index: MapIndex = None,
395
+ **fetch_kwargs
396
+ ):
397
+ """
398
+ Fetches one particular volume of this parcellation map.
399
+
400
+ If there's only one volume, this is the default, otherwise further
401
+ specification is requested:
402
+ - the volume index,
403
+ - the MapIndex (which results in a regional map being returned)
404
+
405
+ You might also consider fetch_iter() to iterate the volumes, or
406
+ compress() to produce a single-volume parcellation map.
407
+
408
+ Parameters
409
+ ----------
410
+ region: str, Region
411
+ Specification of a region name, resulting in a regional map
412
+ (mask or statistical map) to be returned.
413
+ index: MapIndex
414
+ Explicit specification of the map index, typically resulting
415
+ in a regional map (mask or statistical map) to be returned.
416
+ Note that supplying 'region' will result in retrieving the map index of that region
417
+ automatically.
418
+ **fetch_kwargs
419
+ - resolution_mm: resolution in millimeters
420
+ - format: the format of the volume, like "mesh" or "nii"
421
+ - voi: a BoundingBox of interest
430
422
 
431
- return result
423
+
424
+ Note
425
+ ----
426
+ Not all keyword arguments are supported for volume formats. Format
427
+ is restricted by available formats (check formats property).
428
+
429
+ Returns
430
+ -------
431
+ An image or mesh
432
+ """
433
+ vol = self.get_volume(region=region, index=index, **fetch_kwargs)
434
+ return vol.fetch(**fetch_kwargs)
432
435
 
433
436
  def fetch_iter(self, **kwargs):
434
437
  """
@@ -531,7 +534,7 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
531
534
  disable=(len(self.fragments) == 1 or self.fragments is None)
532
535
  ):
533
536
  mapindex = MapIndex(volume=volidx, fragment=frag)
534
- img = self.fetch(mapindex)
537
+ img = self.fetch(index=mapindex)
535
538
  if np.allclose(img.affine, result_affine):
536
539
  img_data = np.asanyarray(img.dataobj)
537
540
  else:
@@ -572,32 +575,56 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
572
575
  )]
573
576
  )
574
577
 
575
- def compute_centroids(self) -> Dict[str, point.Point]:
578
+ def compute_centroids(self, split_components: bool = True, **fetch_kwargs) -> Dict[str, pointcloud.PointCloud]:
576
579
  """
577
- Compute a dictionary of the centroids of all regions in this map.
580
+ Compute a dictionary of all regions in this map to their centroids.
581
+ By default, the regional masks will be split to connected components
582
+ and each point in the PointCloud corresponds to a region component.
583
+
584
+ Parameters
585
+ ----------
586
+ split_components: bool, default: True
587
+ If True, finds the spatial properties for each connected component
588
+ found by skimage.measure.label.
578
589
 
579
590
  Returns
580
591
  -------
581
592
  Dict[str, point.Point]
582
593
  Region names as keys and computed centroids as items.
583
594
  """
584
- centroids = {}
585
- maparr = None
595
+ assert self.provides_image, "Centroid computation for meshes is not supported yet."
596
+ centroids = dict()
586
597
  for regionname, indexlist in siibra_tqdm(
587
598
  self._indices.items(), unit="regions", desc="Computing centroids"
588
599
  ):
589
- assert len(indexlist) == 1
590
- index = indexlist[0]
591
- if index.label == 0:
592
- continue
593
- with QUIET:
594
- mapimg = self.fetch(index=index) # returns a mask of the region
595
- maparr = np.asanyarray(mapimg.dataobj)
596
- centroid_vox = np.mean(np.where(maparr == 1), axis=1)
597
600
  assert regionname not in centroids
598
- centroids[regionname] = point.Point(
599
- np.dot(mapimg.affine, np.r_[centroid_vox, 1])[:3], space=self.space
601
+ # get the mask of the region in this map
602
+ with QUIET:
603
+ if len(indexlist) >= 1:
604
+ merged_volume = _volume.merge(
605
+ [
606
+ _volume.from_nifti(
607
+ self.fetch(index=index, **fetch_kwargs),
608
+ self.space,
609
+ f"{self.name} - {index}"
610
+ )
611
+ for index in indexlist
612
+ ],
613
+ labels=[1] * len(indexlist)
614
+ )
615
+ mapimg = merged_volume.fetch()
616
+ elif len(indexlist) == 1:
617
+ index = indexlist[0]
618
+ mapimg = self.fetch(index=index, **fetch_kwargs) # returns a mask of the region
619
+ props = _volume.ComponentSpatialProperties.compute_from_image(
620
+ img=mapimg,
621
+ space=self.space,
622
+ split_components=split_components,
600
623
  )
624
+ try:
625
+ centroids[regionname] = pointcloud.from_points([c.centroid for c in props])
626
+ except exceptions.EmptyPointCloudError:
627
+ centroids[regionname] = None
601
628
  return centroids
602
629
 
603
630
  def get_resampled_template(self, **fetch_kwargs) -> _volume.Volume:
@@ -722,7 +749,7 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
722
749
 
723
750
  Returns
724
751
  -------
725
- PointSet
752
+ PointCloud
726
753
  Sample points in physcial coordinates corresponding to this
727
754
  parcellationmap
728
755
  """
@@ -740,7 +767,7 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
740
767
  np.unravel_index(np.random.choice(len(p), numpoints, p=p), W.shape)
741
768
  ).T
742
769
  XYZ = np.dot(mask.affine, np.c_[XYZ_, np.ones(numpoints)].T)[:3, :].T
743
- return pointset.PointSet(XYZ, space=self.space)
770
+ return pointcloud.PointCloud(XYZ, space=self.space)
744
771
 
745
772
  def to_sparse(self):
746
773
  """
@@ -811,10 +838,10 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
811
838
 
812
839
  if isinstance(item, point.Point):
813
840
  return self._assign_points(
814
- pointset.PointSet([item], item.space, sigma_mm=item.sigma),
841
+ pointcloud.PointCloud([item], item.space, sigma_mm=item.sigma),
815
842
  lower_threshold
816
843
  )
817
- if isinstance(item, pointset.PointSet):
844
+ if isinstance(item, pointcloud.PointCloud):
818
845
  return self._assign_points(item, lower_threshold)
819
846
  if isinstance(item, _volume.Volume):
820
847
  return self._assign_volume(
@@ -969,9 +996,9 @@ class Map(concept.AtlasConcept, configuration_folder="maps"):
969
996
  .dropna(axis='columns', how='all')
970
997
  )
971
998
 
972
- def _assign_points(self, points: pointset.PointSet, lower_threshold: float) -> List[MapAssignment]:
999
+ def _assign_points(self, points: pointcloud.PointCloud, lower_threshold: float) -> List[MapAssignment]:
973
1000
  """
974
- assign a PointSet to this parcellation map.
1001
+ assign a PointCloud to this parcellation map.
975
1002
 
976
1003
  Parameters
977
1004
  -----------
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -1,4 +1,4 @@
1
- # Copyright 2018-2021
1
+ # Copyright 2018-2024
2
2
  # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
3
 
4
4
  # Licensed under the Apache License, Version 2.0 (the "License");
@@ -26,7 +26,7 @@ from ...commons import (
26
26
  from ...retrieval import requests, cache
27
27
  from ...locations import boundingbox as _boundingbox
28
28
 
29
- from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset
29
+ from neuroglancer_scripts.precomputed_io import get_IO_for_existing_dataset, PrecomputedIO
30
30
  from neuroglancer_scripts.http_accessor import HttpAccessor
31
31
  from neuroglancer_scripts.mesh import read_precomputed_mesh, affine_transform_mesh
32
32
  from io import BytesIO
@@ -37,6 +37,37 @@ from typing import Union, Dict, Tuple
37
37
  import json
38
38
 
39
39
 
40
+ def shift_ng_transfrom(
41
+ transform_nm: np.ndarray, scale_resolution_nm: np.ndarray, max_resolution_nm: np.ndarray
42
+ ) -> np.ndarray:
43
+ """
44
+ Helper method to get nifti standard affine.
45
+
46
+ transfrorm.json stored with neuroglancer precomputed images and meshes
47
+ are meant to be used for neuroglancer viewers and hence they are not
48
+ representative of the affine in other tools. This method shifts back
49
+ half a voxel in each axis.
50
+ (see https://neuroglancer-scripts.readthedocs.io/en/latest/neuroglancer-info.html#different-conventions-for-coordinate-transformations)
51
+
52
+ Parameters
53
+ ----------
54
+ transform_nm: np.ndarray
55
+ Transform array created for dispalying an image correctly from
56
+ neuroglancer precomputed format in neuroglancer viewer.
57
+ max_resolution_nm: np.ndarray
58
+ The voxel resolution of the highest level of resolution.
59
+
60
+ Returns
61
+ -------
62
+ np.ndarray
63
+ Standard affine in nm
64
+ """
65
+ scaling = np.diag(np.r_[scale_resolution_nm, 1.0])
66
+ affine = np.dot(transform_nm, scaling)
67
+ affine[:3, 3] += (max_resolution_nm * 0.5)
68
+ return affine
69
+
70
+
40
71
  class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/precomputed"):
41
72
 
42
73
  def __init__(self, url: Union[str, Dict[str, str]]):
@@ -128,26 +159,20 @@ class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/preco
128
159
  label = None
129
160
  if label is not None:
130
161
  result = nib.Nifti1Image(
131
- (result.get_fdata() == label).astype('uint8'),
132
- result.affine
162
+ (np.asanyarray(result.dataobj) == label).astype('uint8'),
163
+ result.affine,
164
+ dtype='uint8',
133
165
  )
134
166
 
135
167
  return result
136
168
 
137
- def get_boundingbox(self, clip=False, background=0, **fetch_kwargs) -> "_boundingbox.BoundingBox":
169
+ def get_boundingbox(self, **fetch_kwargs) -> "_boundingbox.BoundingBox":
138
170
  """
139
171
  Return the bounding box in physical coordinates of the union of
140
172
  fragments in this neuroglancer volume.
141
173
 
142
174
  Parameters
143
175
  ----------
144
- clip: bool, default: True
145
- Whether to clip the background of the volume.
146
- background: float, default: 0.0
147
- The background value to clip.
148
- Note
149
- ----
150
- To use it, clip must be True.
151
176
  fetch_kwargs:
152
177
  key word arguments that are used for fetchin volumes,
153
178
  such as voi or resolution_mm.
@@ -159,23 +184,17 @@ class NeuroglancerProvider(_provider.VolumeProvider, srctype="neuroglancer/preco
159
184
  f"N-D Neuroglancer volume has shape {frag.shape}, but "
160
185
  f"bounding box considers only {frag.shape[:3]}"
161
186
  )
162
- if clip:
163
- img = frag.fetch(**fetch_kwargs)
164
- next_bbox = _boundingbox.from_array(
165
- np.asanyarray(img.dataobj), threshold=background, space=None
166
- ).transform(img.affine) # use the affine of the image matching fetch_kwargs
187
+ resolution_mm = fetch_kwargs.get("resolution_mm")
188
+ if resolution_mm is None:
189
+ affine = frag.affine
190
+ shape = frag.shape[:3]
167
191
  else:
168
- resolution_mm = fetch_kwargs.get("resolution_mm")
169
- if resolution_mm is None:
170
- affine = frag.affine
171
- shape = frag.shape[:3]
172
- else:
173
- scale = frag._select_scale(resolution_mm=resolution_mm)
174
- affine = scale.affine
175
- shape = scale.size[:3]
176
- next_bbox = _boundingbox.BoundingBox(
177
- (0, 0, 0), shape, space=None
178
- ).transform(affine)
192
+ scale = frag._select_scale(resolution_mm=resolution_mm)
193
+ affine = scale.affine
194
+ shape = scale.size[:3]
195
+ next_bbox = _boundingbox.BoundingBox(
196
+ (0, 0, 0), shape, space=None
197
+ ).transform(affine)
179
198
  bbox = next_bbox if bbox is None else bbox.union(next_bbox)
180
199
  return bbox
181
200
 
@@ -243,10 +262,14 @@ class NeuroglancerVolume:
243
262
  self._scales_cached = None
244
263
  self._info = None
245
264
  self._transform_nm = None
246
- self._io = None
265
+ self._io: PrecomputedIO = None
247
266
 
248
267
  @property
249
- def transform_nm(self):
268
+ def transform_nm(self) -> np.ndarray:
269
+ """
270
+ This is the transformation matrix created to cater neuroglancer viewer
271
+ for a neuroglancer precomputed images.
272
+ """
250
273
  if self._transform_nm is not None:
251
274
  return self._transform_nm
252
275
  try:
@@ -266,7 +289,7 @@ class NeuroglancerVolume:
266
289
  self._transform_nm = val
267
290
 
268
291
  @property
269
- def io(self):
292
+ def io(self) -> PrecomputedIO:
270
293
  if self._io is None:
271
294
  accessor = HttpAccessor(self.url)
272
295
  self._io = get_IO_for_existing_dataset(accessor)
@@ -326,7 +349,7 @@ class NeuroglancerVolume:
326
349
  ):
327
350
  # the caller has to make sure voi is defined in the correct reference space
328
351
  scale = self._select_scale(resolution_mm=resolution_mm, bbox=voi, max_bytes=max_bytes)
329
- return scale.fetch(voi=voi)
352
+ return scale.fetch(voi=voi, **kwargs)
330
353
 
331
354
  def get_shape(self, resolution_mm=None, max_bytes: float = MAX_BYTES):
332
355
  scale = self._select_scale(resolution_mm=resolution_mm, max_bytes=max_bytes)
@@ -385,7 +408,7 @@ class NeuroglancerScale:
385
408
 
386
409
  color_warning_issued = False
387
410
 
388
- def __init__(self, volume: NeuroglancerProvider, scaleinfo: dict):
411
+ def __init__(self, volume: NeuroglancerVolume, scaleinfo: dict):
389
412
  self.volume = volume
390
413
  self.chunk_sizes = np.array(scaleinfo["chunk_sizes"]).squeeze()
391
414
  self.encoding = scaleinfo["encoding"]
@@ -445,10 +468,13 @@ class NeuroglancerScale:
445
468
 
446
469
  @property
447
470
  def affine(self):
448
- scaling = np.diag(np.r_[self.res_nm, 1.0])
449
- affine = np.dot(self.volume.transform_nm, scaling)
450
- affine[:3, :] /= 1e6
451
- return affine
471
+ affine_ = shift_ng_transfrom(
472
+ transform_nm=self.volume.transform_nm,
473
+ scale_resolution_nm=self.res_nm,
474
+ max_resolution_nm=self.volume.scales[0].res_nm[0],
475
+ )
476
+ affine_[:3, :] /= 1e6
477
+ return affine_
452
478
 
453
479
  def _point_to_lower_chunk_idx(self, xyz):
454
480
  return (
@@ -508,9 +534,9 @@ class NeuroglancerScale:
508
534
  for dim in range(3):
509
535
  if bbox_.shape[dim] < 1:
510
536
  logger.warning(
511
- f"Bounding box in voxel space will be enlarged to voxel size 1 along axis {dim}."
537
+ f"Bounding box in voxel space will be enlarged to by {self.res_mm[dim]} along axis {dim}."
512
538
  )
513
- bbox_.maxpoint[dim] = bbox_.maxpoint[dim] + 1
539
+ bbox_.maxpoint[dim] = bbox_.maxpoint[dim] + self.res_mm[dim]
514
540
 
515
541
  # extract minimum and maximum the chunk indices to be loaded
516
542
  gx0, gy0, gz0 = self._point_to_lower_chunk_idx(tuple(bbox_.minpoint))
@@ -532,8 +558,8 @@ class NeuroglancerScale:
532
558
  # determine the remaining offset from the "chunk mosaic" to the
533
559
  # exact bounding box requested, to cut off undesired borders
534
560
  data_min = np.array([gx0, gy0, gz0]) * self.chunk_sizes
535
- x0, y0, z0 = (np.array(tuple(bbox_.minpoint)) - data_min).astype("int")
536
- xd, yd, zd = np.array(bbox_.shape).astype("int")
561
+ x0, y0, z0 = (np.array(bbox_.minpoint) - data_min).astype("int")
562
+ xd, yd, zd = np.ceil((np.array(bbox_.maxpoint))).astype(int) - np.floor((np.array(bbox_.minpoint))).astype(int)
537
563
  offset = tuple(bbox_.minpoint)
538
564
 
539
565
  # build the nifti image
@@ -552,7 +578,7 @@ class NeuroglancerMesh(_provider.VolumeProvider, srctype="neuroglancer/precompme
552
578
 
553
579
  @staticmethod
554
580
  def _fragmentinfo(url: str) -> Dict[str, Union[str, np.ndarray, Dict]]:
555
- """ Prepare basic mesh fragment information from url. """
581
+ """Prepare basic mesh fragment information from url."""
556
582
  return {
557
583
  "url": url,
558
584
  "transform_nm": np.array(requests.HttpRequest(f"{url}/transform.json").data),