siibra 0.5a2__py3-none-any.whl → 1.0.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of siibra might be problematic. Click here for more details.
- siibra/VERSION +1 -1
- siibra/__init__.py +20 -12
- siibra/commons.py +145 -90
- siibra/configuration/__init__.py +1 -1
- siibra/configuration/configuration.py +22 -17
- siibra/configuration/factory.py +177 -128
- siibra/core/__init__.py +1 -8
- siibra/core/{relation_qualification.py → assignment.py} +17 -14
- siibra/core/atlas.py +66 -35
- siibra/core/concept.py +81 -39
- siibra/core/parcellation.py +83 -67
- siibra/core/region.py +569 -263
- siibra/core/space.py +7 -39
- siibra/core/structure.py +111 -0
- siibra/exceptions.py +63 -0
- siibra/experimental/__init__.py +19 -0
- siibra/experimental/contour.py +61 -0
- siibra/experimental/cortical_profile_sampler.py +57 -0
- siibra/experimental/patch.py +98 -0
- siibra/experimental/plane3d.py +256 -0
- siibra/explorer/__init__.py +16 -0
- siibra/explorer/url.py +112 -52
- siibra/explorer/util.py +31 -9
- siibra/features/__init__.py +73 -8
- siibra/features/anchor.py +75 -196
- siibra/features/connectivity/__init__.py +1 -1
- siibra/features/connectivity/functional_connectivity.py +2 -2
- siibra/features/connectivity/regional_connectivity.py +99 -10
- siibra/features/connectivity/streamline_counts.py +1 -1
- siibra/features/connectivity/streamline_lengths.py +1 -1
- siibra/features/connectivity/tracing_connectivity.py +1 -1
- siibra/features/dataset/__init__.py +1 -1
- siibra/features/dataset/ebrains.py +3 -3
- siibra/features/feature.py +219 -110
- siibra/features/image/__init__.py +1 -1
- siibra/features/image/image.py +21 -13
- siibra/features/image/sections.py +1 -1
- siibra/features/image/volume_of_interest.py +1 -1
- siibra/features/tabular/__init__.py +1 -1
- siibra/features/tabular/bigbrain_intensity_profile.py +24 -13
- siibra/features/tabular/cell_density_profile.py +111 -69
- siibra/features/tabular/cortical_profile.py +82 -16
- siibra/features/tabular/gene_expression.py +117 -6
- siibra/features/tabular/layerwise_bigbrain_intensities.py +7 -9
- siibra/features/tabular/layerwise_cell_density.py +9 -24
- siibra/features/tabular/receptor_density_fingerprint.py +11 -6
- siibra/features/tabular/receptor_density_profile.py +12 -15
- siibra/features/tabular/regional_timeseries_activity.py +74 -18
- siibra/features/tabular/tabular.py +17 -8
- siibra/livequeries/__init__.py +1 -7
- siibra/livequeries/allen.py +139 -77
- siibra/livequeries/bigbrain.py +104 -128
- siibra/livequeries/ebrains.py +7 -4
- siibra/livequeries/query.py +1 -2
- siibra/locations/__init__.py +32 -25
- siibra/locations/boundingbox.py +153 -127
- siibra/locations/location.py +45 -80
- siibra/locations/point.py +97 -83
- siibra/locations/pointcloud.py +349 -0
- siibra/retrieval/__init__.py +1 -1
- siibra/retrieval/cache.py +107 -13
- siibra/retrieval/datasets.py +9 -14
- siibra/retrieval/exceptions/__init__.py +2 -1
- siibra/retrieval/repositories.py +147 -53
- siibra/retrieval/requests.py +64 -29
- siibra/vocabularies/__init__.py +2 -2
- siibra/volumes/__init__.py +7 -9
- siibra/volumes/parcellationmap.py +396 -253
- siibra/volumes/providers/__init__.py +20 -0
- siibra/volumes/providers/freesurfer.py +113 -0
- siibra/volumes/{gifti.py → providers/gifti.py} +29 -18
- siibra/volumes/{neuroglancer.py → providers/neuroglancer.py} +204 -92
- siibra/volumes/{nifti.py → providers/nifti.py} +64 -44
- siibra/volumes/providers/provider.py +107 -0
- siibra/volumes/sparsemap.py +159 -260
- siibra/volumes/volume.py +720 -152
- {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/METADATA +25 -28
- siibra-1.0.0a1.dist-info/RECORD +84 -0
- {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/WHEEL +1 -1
- siibra/locations/pointset.py +0 -198
- siibra-0.5a2.dist-info/RECORD +0 -74
- {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/LICENSE +0 -0
- {siibra-0.5a2.dist-info → siibra-1.0.0a1.dist-info}/top_level.txt +0 -0
siibra/features/image/image.py
CHANGED
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2024
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -21,17 +21,18 @@ from .. import anchor as _anchor
|
|
|
21
21
|
|
|
22
22
|
from ...volumes import volume as _volume
|
|
23
23
|
|
|
24
|
-
from typing import List
|
|
24
|
+
from typing import List, TYPE_CHECKING
|
|
25
|
+
|
|
26
|
+
if TYPE_CHECKING:
|
|
27
|
+
from ...locations.boundingbox import BoundingBox
|
|
28
|
+
from ...volumes.providers import provider
|
|
25
29
|
|
|
26
30
|
|
|
27
31
|
class ImageAnchor(_anchor.AnatomicalAnchor):
|
|
28
32
|
|
|
29
33
|
def __init__(self, volume: _volume.Volume, region: str = None):
|
|
30
34
|
_anchor.AnatomicalAnchor.__init__(
|
|
31
|
-
self,
|
|
32
|
-
species=volume.space.species,
|
|
33
|
-
location=None,
|
|
34
|
-
region=region
|
|
35
|
+
self, species=volume.space.species, location=None, region=region
|
|
35
36
|
)
|
|
36
37
|
self.volume = volume
|
|
37
38
|
|
|
@@ -41,7 +42,9 @@ class ImageAnchor(_anchor.AnatomicalAnchor):
|
|
|
41
42
|
Loads the bounding box only if required, since it demands image data access.
|
|
42
43
|
"""
|
|
43
44
|
if self._location_cached is None:
|
|
44
|
-
self._location_cached = self.volume.
|
|
45
|
+
self._location_cached = self.volume.get_boundingbox(
|
|
46
|
+
clip=False
|
|
47
|
+
) # use unclipped to preseve exisiting behaviour
|
|
45
48
|
return self._location_cached
|
|
46
49
|
|
|
47
50
|
@property
|
|
@@ -59,16 +62,21 @@ class Image(feature.Feature, _volume.Volume):
|
|
|
59
62
|
name: str,
|
|
60
63
|
modality: str,
|
|
61
64
|
space_spec: dict,
|
|
62
|
-
providers: List[
|
|
65
|
+
providers: List["provider.VolumeProvider"],
|
|
63
66
|
region: str = None,
|
|
64
67
|
datasets: List = [],
|
|
68
|
+
bbox: "BoundingBox" = None,
|
|
69
|
+
id: str = None,
|
|
70
|
+
prerelease: bool = False,
|
|
65
71
|
):
|
|
66
72
|
feature.Feature.__init__(
|
|
67
73
|
self,
|
|
68
74
|
modality=modality,
|
|
69
75
|
description=None, # lazy implementation below!
|
|
70
76
|
anchor=None, # lazy implementation below!
|
|
71
|
-
datasets=datasets
|
|
77
|
+
datasets=datasets,
|
|
78
|
+
id=id,
|
|
79
|
+
prerelease=prerelease,
|
|
72
80
|
)
|
|
73
81
|
|
|
74
82
|
_volume.Volume.__init__(
|
|
@@ -77,14 +85,15 @@ class Image(feature.Feature, _volume.Volume):
|
|
|
77
85
|
providers=providers,
|
|
78
86
|
name=name,
|
|
79
87
|
datasets=datasets,
|
|
88
|
+
bbox=bbox,
|
|
80
89
|
)
|
|
81
90
|
|
|
82
91
|
self._anchor_cached = ImageAnchor(self, region=region)
|
|
83
92
|
self._description_cached = None
|
|
84
93
|
self._name_cached = name
|
|
85
94
|
|
|
86
|
-
def
|
|
87
|
-
super().
|
|
95
|
+
def _to_zip(self, fh: ZipFile):
|
|
96
|
+
super()._to_zip(fh)
|
|
88
97
|
# How, what do we download?
|
|
89
98
|
# e.g. for marcel's volume, do we download at full resolution?
|
|
90
99
|
# cannot implement until Volume has an export friendly method
|
|
@@ -101,7 +110,6 @@ class Image(feature.Feature, _volume.Volume):
|
|
|
101
110
|
def description(self):
|
|
102
111
|
if self._description_cached is None:
|
|
103
112
|
self._description_cached = (
|
|
104
|
-
f"Image feature with modality {self.modality} "
|
|
105
|
-
f"at {self.anchor}"
|
|
113
|
+
f"Image feature with modality {self.modality} " f"at {self.anchor}"
|
|
106
114
|
)
|
|
107
115
|
return self._description_cached
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2024
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -15,7 +15,9 @@
|
|
|
15
15
|
|
|
16
16
|
from . import cortical_profile
|
|
17
17
|
|
|
18
|
-
from
|
|
18
|
+
from typing import List, TYPE_CHECKING
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from ...features.anchor import AnatomicalAnchor
|
|
19
21
|
|
|
20
22
|
|
|
21
23
|
class BigBrainIntensityProfile(
|
|
@@ -28,7 +30,7 @@ class BigBrainIntensityProfile(
|
|
|
28
30
|
"as described in the publication 'Wagstyl, K., et al (2020). BigBrain 3D atlas of "
|
|
29
31
|
"cortical layers: Cortical and laminar thickness gradients diverge in sensory and "
|
|
30
32
|
"motor cortices. PLoS Biology, 18(4), e3000678. "
|
|
31
|
-
"http://dx.doi.org/10.1371/journal.pbio.3000678
|
|
33
|
+
"http://dx.doi.org/10.1371/journal.pbio.3000678."
|
|
32
34
|
"The data is taken from the tutorial at "
|
|
33
35
|
"https://github.com/kwagstyl/cortical_layers_tutorial. Each vertex is "
|
|
34
36
|
"assigned to the regional map when queried."
|
|
@@ -38,18 +40,11 @@ class BigBrainIntensityProfile(
|
|
|
38
40
|
|
|
39
41
|
def __init__(
|
|
40
42
|
self,
|
|
41
|
-
|
|
43
|
+
anchor: "AnatomicalAnchor",
|
|
42
44
|
depths: list,
|
|
43
45
|
values: list,
|
|
44
|
-
boundaries: list
|
|
45
|
-
location: point.Point
|
|
46
|
+
boundaries: list
|
|
46
47
|
):
|
|
47
|
-
from ..anchor import AnatomicalAnchor
|
|
48
|
-
anchor = AnatomicalAnchor(
|
|
49
|
-
location=location,
|
|
50
|
-
region=regionname,
|
|
51
|
-
species='Homo sapiens'
|
|
52
|
-
)
|
|
53
48
|
cortical_profile.CorticalProfile.__init__(
|
|
54
49
|
self,
|
|
55
50
|
description=self.DESCRIPTION,
|
|
@@ -63,4 +58,20 @@ class BigBrainIntensityProfile(
|
|
|
63
58
|
for b in cortical_profile.CorticalProfile.BOUNDARIES
|
|
64
59
|
}
|
|
65
60
|
)
|
|
66
|
-
|
|
61
|
+
|
|
62
|
+
@property
|
|
63
|
+
def location(self):
|
|
64
|
+
return self.anchor.location
|
|
65
|
+
|
|
66
|
+
@classmethod
|
|
67
|
+
def _merge_anchors(cls, anchors: List['AnatomicalAnchor']):
|
|
68
|
+
from ...locations.pointcloud import from_points
|
|
69
|
+
from ...features.anchor import AnatomicalAnchor
|
|
70
|
+
|
|
71
|
+
location = from_points([anchor.location for anchor in anchors])
|
|
72
|
+
regions = {anchor._regionspec for anchor in anchors}
|
|
73
|
+
return AnatomicalAnchor(
|
|
74
|
+
location=location,
|
|
75
|
+
region=", ".join(regions),
|
|
76
|
+
species='Homo sapiens'
|
|
77
|
+
)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2024
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -16,15 +16,77 @@
|
|
|
16
16
|
from . import cortical_profile
|
|
17
17
|
|
|
18
18
|
from .. import anchor as _anchor
|
|
19
|
-
from ...commons import
|
|
19
|
+
from ...commons import logger
|
|
20
20
|
from ...retrieval import requests
|
|
21
21
|
|
|
22
22
|
from skimage.draw import polygon
|
|
23
23
|
from skimage.transform import resize
|
|
24
|
-
from io import BytesIO
|
|
25
24
|
import numpy as np
|
|
26
25
|
import pandas as pd
|
|
27
26
|
|
|
27
|
+
from io import BytesIO
|
|
28
|
+
from typing import Union, Tuple, Iterable
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def cell_reader(bytes_buffer: bytes):
|
|
32
|
+
return pd.read_csv(BytesIO(bytes_buffer[2:]), delimiter=" ", header=0).astype(
|
|
33
|
+
{"layer": int, "label": int}
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
|
|
37
|
+
def layer_reader(bytes_buffer: bytes):
|
|
38
|
+
return pd.read_csv(BytesIO(bytes_buffer[2:]), delimiter=" ", header=0, index_col=0)
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def poly_srt(poly):
|
|
42
|
+
return poly[poly[:, 0].argsort(), :]
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
def poly_rev(poly):
|
|
46
|
+
return poly[poly[:, 0].argsort()[::-1], :]
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class PolyLine:
|
|
50
|
+
"""Simple polyline representation which allows equidistant sampling."""
|
|
51
|
+
|
|
52
|
+
def __init__(self, pts):
|
|
53
|
+
self.pts = pts
|
|
54
|
+
self.lengths = [
|
|
55
|
+
np.sqrt(np.sum((pts[i, :] - pts[i - 1, :]) ** 2))
|
|
56
|
+
for i in range(1, pts.shape[0])
|
|
57
|
+
]
|
|
58
|
+
|
|
59
|
+
def length(self):
|
|
60
|
+
return sum(self.lengths)
|
|
61
|
+
|
|
62
|
+
def sample(self, d: Union[Iterable[float], np.ndarray, float]):
|
|
63
|
+
# if d is interable, we assume a list of sample positions
|
|
64
|
+
try:
|
|
65
|
+
iter(d)
|
|
66
|
+
except TypeError:
|
|
67
|
+
positions = [d]
|
|
68
|
+
else:
|
|
69
|
+
positions = d
|
|
70
|
+
|
|
71
|
+
samples = []
|
|
72
|
+
for s_ in positions:
|
|
73
|
+
s = min(max(s_, 0), 1)
|
|
74
|
+
target_distance = s * self.length()
|
|
75
|
+
current_distance = 0
|
|
76
|
+
for i, length in enumerate(self.lengths):
|
|
77
|
+
current_distance += length
|
|
78
|
+
if current_distance >= target_distance:
|
|
79
|
+
p1 = self.pts[i, :]
|
|
80
|
+
p2 = self.pts[i + 1, :]
|
|
81
|
+
r = (target_distance - current_distance + length) / length
|
|
82
|
+
samples.append(p1 + (p2 - p1) * r)
|
|
83
|
+
break
|
|
84
|
+
|
|
85
|
+
if len(samples) == 1:
|
|
86
|
+
return samples[0]
|
|
87
|
+
else:
|
|
88
|
+
return np.array(samples)
|
|
89
|
+
|
|
28
90
|
|
|
29
91
|
class CellDensityProfile(
|
|
30
92
|
cortical_profile.CorticalProfile,
|
|
@@ -43,25 +105,7 @@ class CellDensityProfile(
|
|
|
43
105
|
|
|
44
106
|
BIGBRAIN_VOLUMETRIC_SHRINKAGE_FACTOR = 1.931
|
|
45
107
|
|
|
46
|
-
_filter_attrs = cortical_profile.CorticalProfile._filter_attrs + ["
|
|
47
|
-
|
|
48
|
-
@classmethod
|
|
49
|
-
def CELL_READER(cls, b):
|
|
50
|
-
return pd.read_csv(BytesIO(b[2:]), delimiter=" ", header=0).astype(
|
|
51
|
-
{"layer": int, "label": int}
|
|
52
|
-
)
|
|
53
|
-
|
|
54
|
-
@classmethod
|
|
55
|
-
def LAYER_READER(cls, b):
|
|
56
|
-
return pd.read_csv(BytesIO(b[2:]), delimiter=" ", header=0, index_col=0)
|
|
57
|
-
|
|
58
|
-
@staticmethod
|
|
59
|
-
def poly_srt(poly):
|
|
60
|
-
return poly[poly[:, 0].argsort(), :]
|
|
61
|
-
|
|
62
|
-
@staticmethod
|
|
63
|
-
def poly_rev(poly):
|
|
64
|
-
return poly[poly[:, 0].argsort()[::-1], :]
|
|
108
|
+
_filter_attrs = cortical_profile.CorticalProfile._filter_attrs + ["location"]
|
|
65
109
|
|
|
66
110
|
def __init__(
|
|
67
111
|
self,
|
|
@@ -69,7 +113,9 @@ class CellDensityProfile(
|
|
|
69
113
|
patch: int,
|
|
70
114
|
url: str,
|
|
71
115
|
anchor: _anchor.AnatomicalAnchor,
|
|
72
|
-
datasets: list = []
|
|
116
|
+
datasets: list = [],
|
|
117
|
+
id: str = None,
|
|
118
|
+
prerelease: bool = False,
|
|
73
119
|
):
|
|
74
120
|
"""
|
|
75
121
|
Generate a cell density profile from a URL to a cloud folder
|
|
@@ -79,15 +125,17 @@ class CellDensityProfile(
|
|
|
79
125
|
self,
|
|
80
126
|
description=self.DESCRIPTION,
|
|
81
127
|
modality="Segmented cell body density",
|
|
82
|
-
unit="
|
|
128
|
+
unit="cells / 0.1mm3",
|
|
83
129
|
anchor=anchor,
|
|
84
130
|
datasets=datasets,
|
|
131
|
+
id=id,
|
|
132
|
+
prerelease=prerelease,
|
|
85
133
|
)
|
|
86
134
|
self._step = 0.01
|
|
87
135
|
self._url = url
|
|
88
|
-
self._cell_loader = requests.HttpRequest(url,
|
|
136
|
+
self._cell_loader = requests.HttpRequest(url, cell_reader)
|
|
89
137
|
self._layer_loader = requests.HttpRequest(
|
|
90
|
-
url.replace("segments", "layerinfo"),
|
|
138
|
+
url.replace("segments", "layerinfo"), layer_reader
|
|
91
139
|
)
|
|
92
140
|
self._density_image = None
|
|
93
141
|
self._layer_mask = None
|
|
@@ -95,49 +143,55 @@ class CellDensityProfile(
|
|
|
95
143
|
self.section = section
|
|
96
144
|
self.patch = patch
|
|
97
145
|
|
|
146
|
+
@property
|
|
147
|
+
def location(self):
|
|
148
|
+
return self.anchor.location
|
|
149
|
+
|
|
98
150
|
@property
|
|
99
151
|
def shape(self):
|
|
100
|
-
|
|
152
|
+
"""(y,x)"""
|
|
153
|
+
return tuple(np.ceil(self.cells[["y", "x"]].max()).astype("int"))
|
|
101
154
|
|
|
102
|
-
def boundary_annotation(self, boundary):
|
|
155
|
+
def boundary_annotation(self, boundary: Tuple[int, int]) -> np.ndarray:
|
|
103
156
|
"""Returns the annotation of a specific layer boundary."""
|
|
104
|
-
|
|
157
|
+
shape_y, shape_x = self.shape
|
|
105
158
|
|
|
106
159
|
# start of image patch
|
|
107
160
|
if boundary == (-1, 0):
|
|
108
|
-
return np.array([[0, 0], [
|
|
161
|
+
return np.array([[0, 0], [shape_x, 0]])
|
|
109
162
|
|
|
110
163
|
# end of image patch
|
|
111
164
|
if boundary == (7, 8):
|
|
112
|
-
return np.array([[0,
|
|
165
|
+
return np.array([[0, shape_y], [shape_x, shape_y]])
|
|
113
166
|
|
|
114
167
|
# retrieve polygon
|
|
115
168
|
basename = "{}_{}.json".format(
|
|
116
169
|
*(self.LAYERS[layer] for layer in boundary)
|
|
117
170
|
).replace("0_I", "0")
|
|
118
|
-
|
|
119
|
-
poly =
|
|
171
|
+
poly_url = self._url.replace("segments.txt", basename)
|
|
172
|
+
poly = poly_srt(np.array(requests.HttpRequest(poly_url).get()["segments"]))
|
|
120
173
|
|
|
121
|
-
# ensure full width
|
|
174
|
+
# ensure full width and trim to the image shape
|
|
122
175
|
poly[0, 0] = 0
|
|
123
|
-
poly[
|
|
176
|
+
poly[poly[:, 0] > shape_x, 0] = shape_x
|
|
177
|
+
poly[poly[:, 1] > shape_y, 1] = shape_y
|
|
124
178
|
|
|
125
179
|
return poly
|
|
126
180
|
|
|
127
|
-
def layer_annotation(self, layer):
|
|
181
|
+
def layer_annotation(self, layer: int) -> np.ndarray:
|
|
128
182
|
return np.vstack(
|
|
129
183
|
(
|
|
130
184
|
self.boundary_annotation((layer - 1, layer)),
|
|
131
|
-
|
|
185
|
+
poly_rev(self.boundary_annotation((layer, layer + 1))),
|
|
132
186
|
self.boundary_annotation((layer - 1, layer))[0, :],
|
|
133
187
|
)
|
|
134
188
|
)
|
|
135
189
|
|
|
136
190
|
@property
|
|
137
|
-
def layer_mask(self):
|
|
191
|
+
def layer_mask(self) -> np.ndarray:
|
|
138
192
|
"""Generates a layer mask from boundary annotations."""
|
|
139
193
|
if self._layer_mask is None:
|
|
140
|
-
self._layer_mask = np.zeros(np.array(self.shape
|
|
194
|
+
self._layer_mask = np.zeros(np.array(self.shape, dtype=int) + 1, dtype="int")
|
|
141
195
|
for layer in range(1, 8):
|
|
142
196
|
pl = self.layer_annotation(layer)
|
|
143
197
|
X, Y = polygon(pl[:, 0], pl[:, 1])
|
|
@@ -145,20 +199,20 @@ class CellDensityProfile(
|
|
|
145
199
|
return self._layer_mask
|
|
146
200
|
|
|
147
201
|
@property
|
|
148
|
-
def depth_image(self):
|
|
202
|
+
def depth_image(self) -> np.ndarray:
|
|
149
203
|
"""Cortical depth image from layer boundary polygons by equidistant sampling."""
|
|
150
204
|
|
|
151
205
|
if self._depth_image is None:
|
|
152
|
-
|
|
206
|
+
logger.info("Calculating cell densities from cell and layer data...")
|
|
153
207
|
# compute equidistant cortical depth image from inner and outer contour
|
|
154
208
|
scale = 0.1
|
|
155
|
-
|
|
209
|
+
depth_arr = np.zeros(np.ceil(np.array(self.shape) * scale).astype("int") + 1)
|
|
156
210
|
|
|
157
211
|
# determine sufficient stepwidth for profile sampling
|
|
158
212
|
# to match downscaled image resolution
|
|
159
|
-
vstep, hstep = 1.0 / np.array(
|
|
213
|
+
vstep, hstep = 1.0 / np.array(depth_arr.shape) / 2.0
|
|
160
214
|
vsteps = np.arange(0, 1 + vstep, vstep)
|
|
161
|
-
hsteps = np.arange(0, 1 +
|
|
215
|
+
hsteps = np.arange(0, 1 + hstep, hstep)
|
|
162
216
|
|
|
163
217
|
# build straight profiles between outer and inner cortical boundary
|
|
164
218
|
s0 = PolyLine(self.boundary_annotation((0, 1)) * scale).sample(hsteps)
|
|
@@ -167,16 +221,16 @@ class CellDensityProfile(
|
|
|
167
221
|
|
|
168
222
|
# write sample depths to their location in the depth image
|
|
169
223
|
for prof in profiles:
|
|
170
|
-
|
|
171
|
-
|
|
224
|
+
prof_samples_as_index = prof.sample(vsteps).astype("int")
|
|
225
|
+
depth_arr[prof_samples_as_index[:, 1], prof_samples_as_index[:, 0]] = vsteps
|
|
172
226
|
|
|
173
227
|
# fix wm region, account for rounding error
|
|
174
228
|
XY = self.layer_annotation(7) * scale
|
|
175
|
-
|
|
176
|
-
|
|
229
|
+
depth_arr[polygon(XY[:, 1] - 1, XY[:, 0])] = 1
|
|
230
|
+
depth_arr[-1, :] = 1
|
|
177
231
|
|
|
178
232
|
# rescale depth image to original patch size
|
|
179
|
-
self._depth_image = resize(
|
|
233
|
+
self._depth_image = resize(depth_arr, self.density_image.shape)
|
|
180
234
|
|
|
181
235
|
return self._depth_image
|
|
182
236
|
|
|
@@ -192,7 +246,7 @@ class CellDensityProfile(
|
|
|
192
246
|
return self._boundary_positions
|
|
193
247
|
|
|
194
248
|
@property
|
|
195
|
-
def density_image(self):
|
|
249
|
+
def density_image(self) -> np.ndarray:
|
|
196
250
|
if self._density_image is None:
|
|
197
251
|
logger.debug("Computing density image for", self._url)
|
|
198
252
|
# we integrate cell counts into 2D bins
|
|
@@ -201,9 +255,7 @@ class CellDensityProfile(
|
|
|
201
255
|
counts, xedges, yedges = np.histogram2d(
|
|
202
256
|
self.cells.y,
|
|
203
257
|
self.cells.x,
|
|
204
|
-
bins=(np.array(self.
|
|
205
|
-
"int"
|
|
206
|
-
),
|
|
258
|
+
bins=np.round(np.array(self.shape) / pixel_size_micron).astype("int"),
|
|
207
259
|
)
|
|
208
260
|
|
|
209
261
|
# rescale the counts from count / pixel_size**2 to count / 0.1mm^3,
|
|
@@ -221,19 +273,20 @@ class CellDensityProfile(
|
|
|
221
273
|
return self._density_image
|
|
222
274
|
|
|
223
275
|
@property
|
|
224
|
-
def cells(self):
|
|
276
|
+
def cells(self) -> pd.DataFrame:
|
|
225
277
|
return self._cell_loader.get()
|
|
226
278
|
|
|
227
279
|
@property
|
|
228
|
-
def layers(self):
|
|
280
|
+
def layers(self) -> pd.DataFrame:
|
|
229
281
|
return self._layer_loader.get()
|
|
230
282
|
|
|
231
283
|
@property
|
|
232
284
|
def _depths(self):
|
|
233
|
-
return
|
|
285
|
+
return np.arange(0, 1, self._step) + self._step / 2
|
|
234
286
|
|
|
235
287
|
@property
|
|
236
288
|
def _values(self):
|
|
289
|
+
# TODO: release a dataset update instead of on the fly computation
|
|
237
290
|
densities = []
|
|
238
291
|
delta = self._step / 2.0
|
|
239
292
|
for d in self._depths:
|
|
@@ -241,16 +294,5 @@ class CellDensityProfile(
|
|
|
241
294
|
if np.sum(mask) > 0:
|
|
242
295
|
densities.append(self.density_image[mask].mean())
|
|
243
296
|
else:
|
|
244
|
-
densities.append(np.
|
|
245
|
-
return densities
|
|
246
|
-
|
|
247
|
-
@property
|
|
248
|
-
def key(self):
|
|
249
|
-
assert len(self.species) == 1
|
|
250
|
-
return create_key("{}_{}_{}_{}_{}".format(
|
|
251
|
-
self.id,
|
|
252
|
-
self.species[0]['name'],
|
|
253
|
-
self.regionspec,
|
|
254
|
-
self.section,
|
|
255
|
-
self.patch
|
|
256
|
-
))
|
|
297
|
+
densities.append(np.nan)
|
|
298
|
+
return np.asanyarray(densities)
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2024
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -19,7 +19,7 @@ from ..feature import Compoundable
|
|
|
19
19
|
from .. import anchor as _anchor
|
|
20
20
|
|
|
21
21
|
import pandas as pd
|
|
22
|
-
from typing import Union, Dict, Tuple
|
|
22
|
+
from typing import Union, Dict, Tuple, List
|
|
23
23
|
from textwrap import wrap
|
|
24
24
|
import numpy as np
|
|
25
25
|
|
|
@@ -56,7 +56,9 @@ class CorticalProfile(tabular.Tabular, Compoundable):
|
|
|
56
56
|
values: Union[list, np.ndarray] = None,
|
|
57
57
|
unit: str = None,
|
|
58
58
|
boundary_positions: Dict[Tuple[int, int], float] = None,
|
|
59
|
-
datasets: list = []
|
|
59
|
+
datasets: list = [],
|
|
60
|
+
id: str = None,
|
|
61
|
+
prerelease: bool = False,
|
|
60
62
|
):
|
|
61
63
|
"""Initialize profile.
|
|
62
64
|
|
|
@@ -96,21 +98,23 @@ class CorticalProfile(tabular.Tabular, Compoundable):
|
|
|
96
98
|
description=description,
|
|
97
99
|
anchor=anchor,
|
|
98
100
|
data=None, # lazy loader below
|
|
99
|
-
datasets=datasets
|
|
101
|
+
datasets=datasets,
|
|
102
|
+
id=id,
|
|
103
|
+
prerelease=prerelease,
|
|
100
104
|
)
|
|
101
105
|
|
|
102
106
|
def _check_sanity(self):
|
|
103
107
|
# check plausibility of the profile
|
|
104
|
-
assert isinstance(self._depths, (list, np.ndarray))
|
|
105
|
-
assert isinstance(self._values, (list, np.ndarray))
|
|
106
|
-
assert len(self._values) == len(self._depths)
|
|
107
|
-
assert all(0 <= d <= 1 for d in self._depths)
|
|
108
|
+
assert isinstance(self._depths, (list, np.ndarray)), "Some depths are not valid"
|
|
109
|
+
assert isinstance(self._values, (list, np.ndarray)), "Some values are not valid"
|
|
110
|
+
assert len(self._values) == len(self._depths), "There exist uneven number of depths and values"
|
|
111
|
+
assert all(0 <= d <= 1 for d in self._depths), "Some depths is not between 0 and 1"
|
|
108
112
|
if self.boundaries_mapped:
|
|
109
|
-
assert all(0 <= d <= 1 for d in self.boundary_positions.values())
|
|
113
|
+
assert all(0 <= d <= 1 for d in self.boundary_positions.values()), "Some boundary positions are not between 0 and 1"
|
|
110
114
|
assert all(
|
|
111
115
|
layerpair in self.BOUNDARIES
|
|
112
116
|
for layerpair in self.boundary_positions.keys()
|
|
113
|
-
)
|
|
117
|
+
), "Some value in BOUNDARIES are not mapped in boundary_positions"
|
|
114
118
|
|
|
115
119
|
@property
|
|
116
120
|
def unit(self) -> str:
|
|
@@ -159,8 +163,32 @@ class CorticalProfile(tabular.Tabular, Compoundable):
|
|
|
159
163
|
def data(self):
|
|
160
164
|
"""Return a pandas Series representing the profile."""
|
|
161
165
|
self._check_sanity()
|
|
162
|
-
|
|
163
|
-
|
|
166
|
+
iscompound = len(self._values.shape) > 1 and self._values.shape[1] == 2
|
|
167
|
+
if iscompound:
|
|
168
|
+
columns = [f"{self.modality} mean ({self.unit})", "std"]
|
|
169
|
+
else:
|
|
170
|
+
columns = [f"{self.modality} ({self.unit})"]
|
|
171
|
+
return pd.DataFrame(self._values, index=self._depths, columns=columns)
|
|
172
|
+
|
|
173
|
+
@classmethod
|
|
174
|
+
def _merge_elements(
|
|
175
|
+
cls,
|
|
176
|
+
elements: List["CorticalProfile"],
|
|
177
|
+
description: str,
|
|
178
|
+
modality: str,
|
|
179
|
+
anchor: _anchor.AnatomicalAnchor,
|
|
180
|
+
):
|
|
181
|
+
assert all(np.array_equal(elements[0]._depths, f._depths) for f in elements)
|
|
182
|
+
assert len({f.unit for f in elements}) == 1
|
|
183
|
+
values_stacked = np.stack([f._values for f in elements])
|
|
184
|
+
return CorticalProfile(
|
|
185
|
+
description=description,
|
|
186
|
+
modality=modality,
|
|
187
|
+
anchor=anchor,
|
|
188
|
+
depths=np.stack([f._depths for f in elements]).mean(0),
|
|
189
|
+
values=np.stack([values_stacked.mean(0), values_stacked.std(0)]).T,
|
|
190
|
+
unit=elements[0].unit,
|
|
191
|
+
boundary_positions=None,
|
|
164
192
|
)
|
|
165
193
|
|
|
166
194
|
def plot(self, *args, backend="matplotlib", **kwargs):
|
|
@@ -180,12 +208,17 @@ class CorticalProfile(tabular.Tabular, Compoundable):
|
|
|
180
208
|
kwargs["title"] = kwargs.get("title", "\n".join(wrap(self.name, wrapwidth)))
|
|
181
209
|
layercolor = kwargs.pop("layercolor", "gray")
|
|
182
210
|
|
|
211
|
+
iscompound = len(self._values.shape) > 1 and self._values.shape[1] == 2
|
|
212
|
+
ymax = max(
|
|
213
|
+
0,
|
|
214
|
+
sum(self._values.max(axis=0)) if iscompound else self._values.max()
|
|
215
|
+
)
|
|
183
216
|
if backend == "matplotlib":
|
|
184
217
|
kwargs["xlabel"] = kwargs.get("xlabel", "Cortical depth")
|
|
185
218
|
kwargs["ylabel"] = kwargs.get("ylabel", self.unit)
|
|
186
219
|
kwargs["grid"] = kwargs.get("grid", True)
|
|
187
|
-
|
|
188
|
-
axs
|
|
220
|
+
axs = self.data.iloc[:, 0].plot(*args, **kwargs, backend=backend)
|
|
221
|
+
axs.set_ylim(kwargs.get("ylim", (0, ymax)))
|
|
189
222
|
|
|
190
223
|
if self.boundaries_mapped:
|
|
191
224
|
bvals = list(self.boundary_positions.values())
|
|
@@ -201,14 +234,22 @@ class CorticalProfile(tabular.Tabular, Compoundable):
|
|
|
201
234
|
axs.axvspan(d1, d2, color=layercolor, alpha=0.3)
|
|
202
235
|
|
|
203
236
|
axs.set_title(axs.get_title(), fontsize="medium")
|
|
237
|
+
|
|
238
|
+
if iscompound:
|
|
239
|
+
axs.set_ylabel(f"average {kwargs['ylabel']} \u00b1 std")
|
|
240
|
+
av = self.data.values[:, 0]
|
|
241
|
+
std = self.data.values[:, 1]
|
|
242
|
+
axs.fill_between(self.data.index.values, av - std, av + std, alpha=0.5)
|
|
243
|
+
|
|
204
244
|
return axs
|
|
245
|
+
|
|
205
246
|
elif backend == "plotly":
|
|
206
247
|
kwargs["title"] = kwargs["title"].replace("\n", "<br>")
|
|
207
248
|
kwargs["labels"] = {
|
|
208
249
|
"index": kwargs.pop("xlabel", None) or kwargs.pop("index", "Cortical depth"),
|
|
209
250
|
"value": kwargs.pop("ylabel", None) or kwargs.pop("value", self.unit)
|
|
210
251
|
}
|
|
211
|
-
fig = self.data.plot(*args, **kwargs, backend=backend)
|
|
252
|
+
fig = self.data.iloc[:, 0].plot(*args, **kwargs, backend=backend)
|
|
212
253
|
if self.boundaries_mapped:
|
|
213
254
|
bvals = list(self.boundary_positions.values())
|
|
214
255
|
for i, (d1, d2) in enumerate(list(zip(bvals[:-1], bvals[1:]))):
|
|
@@ -219,12 +260,29 @@ class CorticalProfile(tabular.Tabular, Compoundable):
|
|
|
219
260
|
)
|
|
220
261
|
fig.update_layout(
|
|
221
262
|
showlegend=False,
|
|
222
|
-
yaxis_range=(0,
|
|
263
|
+
yaxis_range=(0, ymax),
|
|
223
264
|
title=dict(
|
|
224
265
|
automargin=True, yref="container", xref="container",
|
|
225
266
|
pad=dict(t=40), xanchor="left", yanchor="top"
|
|
226
267
|
)
|
|
227
268
|
)
|
|
269
|
+
if iscompound:
|
|
270
|
+
from plotly.graph_objects import Scatter
|
|
271
|
+
x = self.data.index.values
|
|
272
|
+
av = self.data.values[:, 0]
|
|
273
|
+
std = self.data.values[:, 1]
|
|
274
|
+
fig.update_layout(yaxis_title=f"average {kwargs['labels']['value']} ± std")
|
|
275
|
+
fig.add_traces(
|
|
276
|
+
Scatter(
|
|
277
|
+
x=np.concatenate((x, x[::-1])), # x, then x reversed
|
|
278
|
+
y=np.concatenate((av + std, (av - std)[::-1])), # upper, then lower reversed
|
|
279
|
+
fill='toself',
|
|
280
|
+
fillcolor='rgba(0,100,80,0.5)',
|
|
281
|
+
line=dict(color='rgba(255,255,255,0)'),
|
|
282
|
+
hoverinfo="skip",
|
|
283
|
+
showlegend=False
|
|
284
|
+
)
|
|
285
|
+
)
|
|
228
286
|
return fig
|
|
229
287
|
else:
|
|
230
288
|
return self.data.plot(*args, **kwargs, backend=backend)
|
|
@@ -254,3 +312,11 @@ class CorticalProfile(tabular.Tabular, Compoundable):
|
|
|
254
312
|
f"'_values' not available for {self.__class__.__name__}."
|
|
255
313
|
)
|
|
256
314
|
return self._values_cached
|
|
315
|
+
|
|
316
|
+
@property
|
|
317
|
+
def name(self):
|
|
318
|
+
if hasattr(self, "receptor"):
|
|
319
|
+
return super().name + f": {self.receptor}"
|
|
320
|
+
if hasattr(self, "location"):
|
|
321
|
+
return super().name + f": {self.location.coordinate}"
|
|
322
|
+
return super().name
|