siibra 1.0a1__1-py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of siibra might be problematic. Click here for more details.

Files changed (84) hide show
  1. siibra/VERSION +1 -0
  2. siibra/__init__.py +164 -0
  3. siibra/commons.py +823 -0
  4. siibra/configuration/__init__.py +17 -0
  5. siibra/configuration/configuration.py +189 -0
  6. siibra/configuration/factory.py +589 -0
  7. siibra/core/__init__.py +16 -0
  8. siibra/core/assignment.py +110 -0
  9. siibra/core/atlas.py +239 -0
  10. siibra/core/concept.py +308 -0
  11. siibra/core/parcellation.py +387 -0
  12. siibra/core/region.py +1223 -0
  13. siibra/core/space.py +131 -0
  14. siibra/core/structure.py +111 -0
  15. siibra/exceptions.py +63 -0
  16. siibra/experimental/__init__.py +19 -0
  17. siibra/experimental/contour.py +61 -0
  18. siibra/experimental/cortical_profile_sampler.py +57 -0
  19. siibra/experimental/patch.py +98 -0
  20. siibra/experimental/plane3d.py +256 -0
  21. siibra/explorer/__init__.py +17 -0
  22. siibra/explorer/url.py +222 -0
  23. siibra/explorer/util.py +87 -0
  24. siibra/features/__init__.py +117 -0
  25. siibra/features/anchor.py +224 -0
  26. siibra/features/connectivity/__init__.py +33 -0
  27. siibra/features/connectivity/functional_connectivity.py +57 -0
  28. siibra/features/connectivity/regional_connectivity.py +494 -0
  29. siibra/features/connectivity/streamline_counts.py +27 -0
  30. siibra/features/connectivity/streamline_lengths.py +27 -0
  31. siibra/features/connectivity/tracing_connectivity.py +30 -0
  32. siibra/features/dataset/__init__.py +17 -0
  33. siibra/features/dataset/ebrains.py +90 -0
  34. siibra/features/feature.py +970 -0
  35. siibra/features/image/__init__.py +27 -0
  36. siibra/features/image/image.py +115 -0
  37. siibra/features/image/sections.py +26 -0
  38. siibra/features/image/volume_of_interest.py +88 -0
  39. siibra/features/tabular/__init__.py +24 -0
  40. siibra/features/tabular/bigbrain_intensity_profile.py +77 -0
  41. siibra/features/tabular/cell_density_profile.py +298 -0
  42. siibra/features/tabular/cortical_profile.py +322 -0
  43. siibra/features/tabular/gene_expression.py +257 -0
  44. siibra/features/tabular/layerwise_bigbrain_intensities.py +62 -0
  45. siibra/features/tabular/layerwise_cell_density.py +95 -0
  46. siibra/features/tabular/receptor_density_fingerprint.py +192 -0
  47. siibra/features/tabular/receptor_density_profile.py +110 -0
  48. siibra/features/tabular/regional_timeseries_activity.py +294 -0
  49. siibra/features/tabular/tabular.py +139 -0
  50. siibra/livequeries/__init__.py +19 -0
  51. siibra/livequeries/allen.py +352 -0
  52. siibra/livequeries/bigbrain.py +197 -0
  53. siibra/livequeries/ebrains.py +145 -0
  54. siibra/livequeries/query.py +49 -0
  55. siibra/locations/__init__.py +91 -0
  56. siibra/locations/boundingbox.py +454 -0
  57. siibra/locations/location.py +115 -0
  58. siibra/locations/point.py +344 -0
  59. siibra/locations/pointcloud.py +349 -0
  60. siibra/retrieval/__init__.py +27 -0
  61. siibra/retrieval/cache.py +233 -0
  62. siibra/retrieval/datasets.py +389 -0
  63. siibra/retrieval/exceptions/__init__.py +27 -0
  64. siibra/retrieval/repositories.py +769 -0
  65. siibra/retrieval/requests.py +659 -0
  66. siibra/vocabularies/__init__.py +45 -0
  67. siibra/vocabularies/gene_names.json +29176 -0
  68. siibra/vocabularies/receptor_symbols.json +210 -0
  69. siibra/vocabularies/region_aliases.json +460 -0
  70. siibra/volumes/__init__.py +23 -0
  71. siibra/volumes/parcellationmap.py +1279 -0
  72. siibra/volumes/providers/__init__.py +20 -0
  73. siibra/volumes/providers/freesurfer.py +113 -0
  74. siibra/volumes/providers/gifti.py +165 -0
  75. siibra/volumes/providers/neuroglancer.py +736 -0
  76. siibra/volumes/providers/nifti.py +266 -0
  77. siibra/volumes/providers/provider.py +107 -0
  78. siibra/volumes/sparsemap.py +468 -0
  79. siibra/volumes/volume.py +892 -0
  80. siibra-1.0.0a1.dist-info/LICENSE +201 -0
  81. siibra-1.0.0a1.dist-info/METADATA +160 -0
  82. siibra-1.0.0a1.dist-info/RECORD +84 -0
  83. siibra-1.0.0a1.dist-info/WHEEL +5 -0
  84. siibra-1.0.0a1.dist-info/top_level.txt +1 -0
@@ -0,0 +1,27 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Multimodal data features in volume or image formats."""
16
+
17
+ from .volume_of_interest import (
18
+ CellBodyStainedVolumeOfInterest,
19
+ BlockfaceVolumeOfInterest,
20
+ PLIVolumeOfInterest,
21
+ MRIVolumeOfInterest,
22
+ XPCTVolumeOfInterest,
23
+ LSFMVolumeOfInterest,
24
+ DTIVolumeOfInterest
25
+ # SegmentedVolumeOfInterest
26
+ )
27
+ from .sections import CellbodyStainedSection
@@ -0,0 +1,115 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Base type of features in volume format and related anatomical anchor."""
16
+
17
+ from zipfile import ZipFile
18
+ from .. import feature
19
+
20
+ from .. import anchor as _anchor
21
+
22
+ from ...volumes import volume as _volume
23
+
24
+ from typing import List, TYPE_CHECKING
25
+
26
+ if TYPE_CHECKING:
27
+ from ...locations.boundingbox import BoundingBox
28
+ from ...volumes.providers import provider
29
+
30
+
31
+ class ImageAnchor(_anchor.AnatomicalAnchor):
32
+
33
+ def __init__(self, volume: _volume.Volume, region: str = None):
34
+ _anchor.AnatomicalAnchor.__init__(
35
+ self, species=volume.space.species, location=None, region=region
36
+ )
37
+ self.volume = volume
38
+
39
+ @property
40
+ def location(self):
41
+ """
42
+ Loads the bounding box only if required, since it demands image data access.
43
+ """
44
+ if self._location_cached is None:
45
+ self._location_cached = self.volume.get_boundingbox(
46
+ clip=False
47
+ ) # use unclipped to preseve exisiting behaviour
48
+ return self._location_cached
49
+
50
+ @property
51
+ def space(self):
52
+ return self.volume.space
53
+
54
+ def __str__(self):
55
+ return f"Bounding box of image in {self.space.name}"
56
+
57
+
58
+ class Image(feature.Feature, _volume.Volume):
59
+
60
+ def __init__(
61
+ self,
62
+ name: str,
63
+ modality: str,
64
+ space_spec: dict,
65
+ providers: List["provider.VolumeProvider"],
66
+ region: str = None,
67
+ datasets: List = [],
68
+ bbox: "BoundingBox" = None,
69
+ id: str = None,
70
+ prerelease: bool = False,
71
+ ):
72
+ feature.Feature.__init__(
73
+ self,
74
+ modality=modality,
75
+ description=None, # lazy implementation below!
76
+ anchor=None, # lazy implementation below!
77
+ datasets=datasets,
78
+ id=id,
79
+ prerelease=prerelease,
80
+ )
81
+
82
+ _volume.Volume.__init__(
83
+ self,
84
+ space_spec=space_spec,
85
+ providers=providers,
86
+ name=name,
87
+ datasets=datasets,
88
+ bbox=bbox,
89
+ )
90
+
91
+ self._anchor_cached = ImageAnchor(self, region=region)
92
+ self._description_cached = None
93
+ self._name_cached = name
94
+
95
+ def _to_zip(self, fh: ZipFile):
96
+ super()._to_zip(fh)
97
+ # How, what do we download?
98
+ # e.g. for marcel's volume, do we download at full resolution?
99
+ # cannot implement until Volume has an export friendly method
100
+ fh.writestr("volume.txt", "Volume cannot be downloaded yet.")
101
+
102
+ @property
103
+ def name(self):
104
+ if self._name_cached is None:
105
+ return feature.Feature.name(self)
106
+ else:
107
+ return f"{self._name_cached} ({self.modality})"
108
+
109
+ @property
110
+ def description(self):
111
+ if self._description_cached is None:
112
+ self._description_cached = (
113
+ f"Image feature with modality {self.modality} " f"at {self.anchor}"
114
+ )
115
+ return self._description_cached
@@ -0,0 +1,26 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Multimodal data features in 2D section."""
16
+
17
+ from . import image
18
+
19
+
20
+ class CellbodyStainedSection(
21
+ image.Image,
22
+ configuration_folder='features/images/sections/cellbody',
23
+ category="cellular"
24
+ ):
25
+ def __init__(self, **kwargs):
26
+ image.Image.__init__(self, **kwargs, modality="cell body staining")
@@ -0,0 +1,88 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Multimodal data features as volumes."""
16
+
17
+ from . import image
18
+
19
+
20
+ class CellBodyStainedVolumeOfInterest(
21
+ image.Image,
22
+ configuration_folder="features/images/vois/cellbody",
23
+ category="cellular"
24
+ ):
25
+ def __init__(self, **kwargs):
26
+ image.Image.__init__(self, **kwargs, modality="cell body staining")
27
+
28
+
29
+ class BlockfaceVolumeOfInterest(
30
+ image.Image,
31
+ configuration_folder="features/images/vois/blockface",
32
+ category="macrostructural"
33
+ ):
34
+ def __init__(self, **kwargs):
35
+ image.Image.__init__(self, **kwargs, modality="blockface")
36
+
37
+
38
+ class DTIVolumeOfInterest(
39
+ image.Image,
40
+ configuration_folder="features/images/vois/blockface",
41
+ category="fibres"
42
+ ):
43
+ def __init__(self, modality, **kwargs):
44
+ image.Image.__init__(self, **kwargs, modality=modality)
45
+
46
+
47
+ class PLIVolumeOfInterest(
48
+ image.Image,
49
+ configuration_folder="features/images/vois/pli",
50
+ category="fibres"
51
+ ):
52
+ def __init__(self, modality, **kwargs):
53
+ image.Image.__init__(self, **kwargs, modality=modality)
54
+
55
+
56
+ class MRIVolumeOfInterest(
57
+ image.Image,
58
+ configuration_folder="features/images/vois/mri",
59
+ category="macrostructural"
60
+ ):
61
+ def __init__(self, modality, **kwargs):
62
+ image.Image.__init__(self, **kwargs, modality=modality)
63
+
64
+
65
+ class XPCTVolumeOfInterest(
66
+ image.Image,
67
+ configuration_folder="features/images/vois/xpct",
68
+ category="cellular"
69
+ ):
70
+ def __init__(self, modality, **kwargs):
71
+ image.Image.__init__(self, **kwargs, modality=modality)
72
+
73
+
74
+ class LSFMVolumeOfInterest(
75
+ image.Image,
76
+ configuration_folder="features/images/vois/lsfm",
77
+ category="cellular"
78
+ ):
79
+ def __init__(self, modality, **kwargs):
80
+ image.Image.__init__(self, **kwargs, modality=modality)
81
+
82
+ # class SegmentedVolumeOfInterest(
83
+ # image.Image,
84
+ # configuration_folder="features/images/vois/segmentation",
85
+ # category="segmentation"
86
+ # ):
87
+ # def __init__(self, **kwargs):
88
+ # image.Image.__init__(self, **kwargs, modality="segmentation")
@@ -0,0 +1,24 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+ """Multimodal data features in tabular formats."""
16
+
17
+ from .bigbrain_intensity_profile import BigBrainIntensityProfile
18
+ from .cell_density_profile import CellDensityProfile
19
+ from .gene_expression import GeneExpressions
20
+ from .layerwise_bigbrain_intensities import LayerwiseBigBrainIntensities
21
+ from .layerwise_cell_density import LayerwiseCellDensity
22
+ from .receptor_density_fingerprint import ReceptorDensityFingerprint
23
+ from .receptor_density_profile import ReceptorDensityProfile
24
+ from .regional_timeseries_activity import RegionalBOLD
@@ -0,0 +1,77 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from . import cortical_profile
17
+
18
+ from typing import List, TYPE_CHECKING
19
+ if TYPE_CHECKING:
20
+ from ...features.anchor import AnatomicalAnchor
21
+
22
+
23
+ class BigBrainIntensityProfile(
24
+ cortical_profile.CorticalProfile,
25
+ category='cellular'
26
+ ):
27
+
28
+ DESCRIPTION = (
29
+ "Cortical profiles of BigBrain staining intensities computed by Konrad Wagstyl, "
30
+ "as described in the publication 'Wagstyl, K., et al (2020). BigBrain 3D atlas of "
31
+ "cortical layers: Cortical and laminar thickness gradients diverge in sensory and "
32
+ "motor cortices. PLoS Biology, 18(4), e3000678. "
33
+ "http://dx.doi.org/10.1371/journal.pbio.3000678."
34
+ "The data is taken from the tutorial at "
35
+ "https://github.com/kwagstyl/cortical_layers_tutorial. Each vertex is "
36
+ "assigned to the regional map when queried."
37
+ )
38
+
39
+ _filter_attrs = cortical_profile.CorticalProfile._filter_attrs + ["location"]
40
+
41
+ def __init__(
42
+ self,
43
+ anchor: "AnatomicalAnchor",
44
+ depths: list,
45
+ values: list,
46
+ boundaries: list
47
+ ):
48
+ cortical_profile.CorticalProfile.__init__(
49
+ self,
50
+ description=self.DESCRIPTION,
51
+ modality="Modified silver staining",
52
+ anchor=anchor,
53
+ depths=depths,
54
+ values=values,
55
+ unit="staining intensity",
56
+ boundary_positions={
57
+ b: boundaries[b[0]]
58
+ for b in cortical_profile.CorticalProfile.BOUNDARIES
59
+ }
60
+ )
61
+
62
+ @property
63
+ def location(self):
64
+ return self.anchor.location
65
+
66
+ @classmethod
67
+ def _merge_anchors(cls, anchors: List['AnatomicalAnchor']):
68
+ from ...locations.pointcloud import from_points
69
+ from ...features.anchor import AnatomicalAnchor
70
+
71
+ location = from_points([anchor.location for anchor in anchors])
72
+ regions = {anchor._regionspec for anchor in anchors}
73
+ return AnatomicalAnchor(
74
+ location=location,
75
+ region=", ".join(regions),
76
+ species='Homo sapiens'
77
+ )
@@ -0,0 +1,298 @@
1
+ # Copyright 2018-2024
2
+ # Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
3
+
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ from . import cortical_profile
17
+
18
+ from .. import anchor as _anchor
19
+ from ...commons import logger
20
+ from ...retrieval import requests
21
+
22
+ from skimage.draw import polygon
23
+ from skimage.transform import resize
24
+ import numpy as np
25
+ import pandas as pd
26
+
27
+ from io import BytesIO
28
+ from typing import Union, Tuple, Iterable
29
+
30
+
31
+ def cell_reader(bytes_buffer: bytes):
32
+ return pd.read_csv(BytesIO(bytes_buffer[2:]), delimiter=" ", header=0).astype(
33
+ {"layer": int, "label": int}
34
+ )
35
+
36
+
37
+ def layer_reader(bytes_buffer: bytes):
38
+ return pd.read_csv(BytesIO(bytes_buffer[2:]), delimiter=" ", header=0, index_col=0)
39
+
40
+
41
+ def poly_srt(poly):
42
+ return poly[poly[:, 0].argsort(), :]
43
+
44
+
45
+ def poly_rev(poly):
46
+ return poly[poly[:, 0].argsort()[::-1], :]
47
+
48
+
49
+ class PolyLine:
50
+ """Simple polyline representation which allows equidistant sampling."""
51
+
52
+ def __init__(self, pts):
53
+ self.pts = pts
54
+ self.lengths = [
55
+ np.sqrt(np.sum((pts[i, :] - pts[i - 1, :]) ** 2))
56
+ for i in range(1, pts.shape[0])
57
+ ]
58
+
59
+ def length(self):
60
+ return sum(self.lengths)
61
+
62
+ def sample(self, d: Union[Iterable[float], np.ndarray, float]):
63
+ # if d is interable, we assume a list of sample positions
64
+ try:
65
+ iter(d)
66
+ except TypeError:
67
+ positions = [d]
68
+ else:
69
+ positions = d
70
+
71
+ samples = []
72
+ for s_ in positions:
73
+ s = min(max(s_, 0), 1)
74
+ target_distance = s * self.length()
75
+ current_distance = 0
76
+ for i, length in enumerate(self.lengths):
77
+ current_distance += length
78
+ if current_distance >= target_distance:
79
+ p1 = self.pts[i, :]
80
+ p2 = self.pts[i + 1, :]
81
+ r = (target_distance - current_distance + length) / length
82
+ samples.append(p1 + (p2 - p1) * r)
83
+ break
84
+
85
+ if len(samples) == 1:
86
+ return samples[0]
87
+ else:
88
+ return np.array(samples)
89
+
90
+
91
+ class CellDensityProfile(
92
+ cortical_profile.CorticalProfile,
93
+ configuration_folder="features/tabular/corticalprofiles/celldensity",
94
+ category='cellular'
95
+ ):
96
+
97
+ DESCRIPTION = (
98
+ "Cortical profile of estimated densities of detected cell bodies (in detected cells per 0.1 cube millimeter) "
99
+ "obtained by applying a Deep Learning based instance segmentation algorithm (Contour Proposal Network; Upschulte "
100
+ "et al., Neuroimage 2022) to a 1 micron resolution cortical image patch prepared with modified Silver staining. "
101
+ "Densities have been computed per cortical layer after manual layer segmentation, by dividing the number of "
102
+ "detected cells in that layer with the area covered by the layer. Therefore, each profile contains 6 measurement points. "
103
+ "The cortical depth is estimated from the measured layer thicknesses."
104
+ )
105
+
106
+ BIGBRAIN_VOLUMETRIC_SHRINKAGE_FACTOR = 1.931
107
+
108
+ _filter_attrs = cortical_profile.CorticalProfile._filter_attrs + ["location"]
109
+
110
+ def __init__(
111
+ self,
112
+ section: int,
113
+ patch: int,
114
+ url: str,
115
+ anchor: _anchor.AnatomicalAnchor,
116
+ datasets: list = [],
117
+ id: str = None,
118
+ prerelease: bool = False,
119
+ ):
120
+ """
121
+ Generate a cell density profile from a URL to a cloud folder
122
+ formatted according to the structure used by Bludau/Dickscheid et al.
123
+ """
124
+ cortical_profile.CorticalProfile.__init__(
125
+ self,
126
+ description=self.DESCRIPTION,
127
+ modality="Segmented cell body density",
128
+ unit="cells / 0.1mm3",
129
+ anchor=anchor,
130
+ datasets=datasets,
131
+ id=id,
132
+ prerelease=prerelease,
133
+ )
134
+ self._step = 0.01
135
+ self._url = url
136
+ self._cell_loader = requests.HttpRequest(url, cell_reader)
137
+ self._layer_loader = requests.HttpRequest(
138
+ url.replace("segments", "layerinfo"), layer_reader
139
+ )
140
+ self._density_image = None
141
+ self._layer_mask = None
142
+ self._depth_image = None
143
+ self.section = section
144
+ self.patch = patch
145
+
146
+ @property
147
+ def location(self):
148
+ return self.anchor.location
149
+
150
+ @property
151
+ def shape(self):
152
+ """(y,x)"""
153
+ return tuple(np.ceil(self.cells[["y", "x"]].max()).astype("int"))
154
+
155
+ def boundary_annotation(self, boundary: Tuple[int, int]) -> np.ndarray:
156
+ """Returns the annotation of a specific layer boundary."""
157
+ shape_y, shape_x = self.shape
158
+
159
+ # start of image patch
160
+ if boundary == (-1, 0):
161
+ return np.array([[0, 0], [shape_x, 0]])
162
+
163
+ # end of image patch
164
+ if boundary == (7, 8):
165
+ return np.array([[0, shape_y], [shape_x, shape_y]])
166
+
167
+ # retrieve polygon
168
+ basename = "{}_{}.json".format(
169
+ *(self.LAYERS[layer] for layer in boundary)
170
+ ).replace("0_I", "0")
171
+ poly_url = self._url.replace("segments.txt", basename)
172
+ poly = poly_srt(np.array(requests.HttpRequest(poly_url).get()["segments"]))
173
+
174
+ # ensure full width and trim to the image shape
175
+ poly[0, 0] = 0
176
+ poly[poly[:, 0] > shape_x, 0] = shape_x
177
+ poly[poly[:, 1] > shape_y, 1] = shape_y
178
+
179
+ return poly
180
+
181
+ def layer_annotation(self, layer: int) -> np.ndarray:
182
+ return np.vstack(
183
+ (
184
+ self.boundary_annotation((layer - 1, layer)),
185
+ poly_rev(self.boundary_annotation((layer, layer + 1))),
186
+ self.boundary_annotation((layer - 1, layer))[0, :],
187
+ )
188
+ )
189
+
190
+ @property
191
+ def layer_mask(self) -> np.ndarray:
192
+ """Generates a layer mask from boundary annotations."""
193
+ if self._layer_mask is None:
194
+ self._layer_mask = np.zeros(np.array(self.shape, dtype=int) + 1, dtype="int")
195
+ for layer in range(1, 8):
196
+ pl = self.layer_annotation(layer)
197
+ X, Y = polygon(pl[:, 0], pl[:, 1])
198
+ self._layer_mask[Y, X] = layer
199
+ return self._layer_mask
200
+
201
+ @property
202
+ def depth_image(self) -> np.ndarray:
203
+ """Cortical depth image from layer boundary polygons by equidistant sampling."""
204
+
205
+ if self._depth_image is None:
206
+ logger.info("Calculating cell densities from cell and layer data...")
207
+ # compute equidistant cortical depth image from inner and outer contour
208
+ scale = 0.1
209
+ depth_arr = np.zeros(np.ceil(np.array(self.shape) * scale).astype("int") + 1)
210
+
211
+ # determine sufficient stepwidth for profile sampling
212
+ # to match downscaled image resolution
213
+ vstep, hstep = 1.0 / np.array(depth_arr.shape) / 2.0
214
+ vsteps = np.arange(0, 1 + vstep, vstep)
215
+ hsteps = np.arange(0, 1 + hstep, hstep)
216
+
217
+ # build straight profiles between outer and inner cortical boundary
218
+ s0 = PolyLine(self.boundary_annotation((0, 1)) * scale).sample(hsteps)
219
+ s1 = PolyLine(self.boundary_annotation((6, 7)) * scale).sample(hsteps)
220
+ profiles = [PolyLine(_.reshape(2, 2)) for _ in np.hstack((s0, s1))]
221
+
222
+ # write sample depths to their location in the depth image
223
+ for prof in profiles:
224
+ prof_samples_as_index = prof.sample(vsteps).astype("int")
225
+ depth_arr[prof_samples_as_index[:, 1], prof_samples_as_index[:, 0]] = vsteps
226
+
227
+ # fix wm region, account for rounding error
228
+ XY = self.layer_annotation(7) * scale
229
+ depth_arr[polygon(XY[:, 1] - 1, XY[:, 0])] = 1
230
+ depth_arr[-1, :] = 1
231
+
232
+ # rescale depth image to original patch size
233
+ self._depth_image = resize(depth_arr, self.density_image.shape)
234
+
235
+ return self._depth_image
236
+
237
+ @property
238
+ def boundary_positions(self):
239
+ if self._boundary_positions is None:
240
+ self._boundary_positions = {}
241
+ for b in self.BOUNDARIES:
242
+ XY = self.boundary_annotation(b).astype("int")
243
+ self._boundary_positions[b] = self.depth_image[
244
+ XY[:, 1], XY[:, 0]
245
+ ].mean()
246
+ return self._boundary_positions
247
+
248
+ @property
249
+ def density_image(self) -> np.ndarray:
250
+ if self._density_image is None:
251
+ logger.debug("Computing density image for", self._url)
252
+ # we integrate cell counts into 2D bins
253
+ # of square shape with a fixed sidelength
254
+ pixel_size_micron = 100
255
+ counts, xedges, yedges = np.histogram2d(
256
+ self.cells.y,
257
+ self.cells.x,
258
+ bins=np.round(np.array(self.shape) / pixel_size_micron).astype("int"),
259
+ )
260
+
261
+ # rescale the counts from count / pixel_size**2 to count / 0.1mm^3,
262
+ # assuming 20 micron section thickness.
263
+ counts = counts / pixel_size_micron ** 2 / 20 * 100 ** 3
264
+
265
+ # apply the Bigbrain shrinkage factor
266
+ # TODO The planar correction factor was used for the datasets, but should
267
+ # clarify if the full volumetric correction factor is not more correct.
268
+ counts /= np.cbrt(self.BIGBRAIN_VOLUMETRIC_SHRINKAGE_FACTOR) ** 2
269
+
270
+ # to go to 0.1 millimeter cube, we multiply by 0.1 / 0.0002 = 500
271
+ self._density_image = resize(counts, self.layer_mask.shape, order=2)
272
+
273
+ return self._density_image
274
+
275
+ @property
276
+ def cells(self) -> pd.DataFrame:
277
+ return self._cell_loader.get()
278
+
279
+ @property
280
+ def layers(self) -> pd.DataFrame:
281
+ return self._layer_loader.get()
282
+
283
+ @property
284
+ def _depths(self):
285
+ return np.arange(0, 1, self._step) + self._step / 2
286
+
287
+ @property
288
+ def _values(self):
289
+ # TODO: release a dataset update instead of on the fly computation
290
+ densities = []
291
+ delta = self._step / 2.0
292
+ for d in self._depths:
293
+ mask = (self.depth_image >= d - delta) & (self.depth_image < d + delta)
294
+ if np.sum(mask) > 0:
295
+ densities.append(self.density_image[mask].mean())
296
+ else:
297
+ densities.append(np.nan)
298
+ return np.asanyarray(densities)