siibra 1.0a14__tar.gz → 1.0a19__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of siibra might be problematic. Click here for more details.
- {siibra-1.0a14/siibra.egg-info → siibra-1.0a19}/PKG-INFO +1 -1
- siibra-1.0a19/siibra/VERSION +1 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/__init__.py +12 -2
- {siibra-1.0a14 → siibra-1.0a19}/siibra/commons.py +3 -2
- {siibra-1.0a14 → siibra-1.0a19}/siibra/configuration/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/configuration/configuration.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/configuration/factory.py +164 -117
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/assignment.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/atlas.py +4 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/concept.py +18 -9
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/parcellation.py +9 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/region.py +35 -65
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/space.py +3 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/core/structure.py +1 -2
- {siibra-1.0a14 → siibra-1.0a19}/siibra/exceptions.py +9 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/explorer/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/explorer/url.py +15 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/explorer/util.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/anchor.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/connectivity/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/connectivity/functional_connectivity.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/connectivity/regional_connectivity.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/connectivity/streamline_counts.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/connectivity/streamline_lengths.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/connectivity/tracing_connectivity.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/dataset/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/dataset/ebrains.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/feature.py +39 -15
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/image/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/image/image.py +18 -13
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/image/sections.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/image/volume_of_interest.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/bigbrain_intensity_profile.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/cell_density_profile.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/cortical_profile.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/gene_expression.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/layerwise_bigbrain_intensities.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/layerwise_cell_density.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/receptor_density_fingerprint.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/receptor_density_profile.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/regional_timeseries_activity.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/features/tabular/tabular.py +5 -3
- {siibra-1.0a14 → siibra-1.0a19}/siibra/livequeries/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/livequeries/allen.py +9 -6
- {siibra-1.0a14 → siibra-1.0a19}/siibra/livequeries/bigbrain.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/livequeries/ebrains.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/livequeries/query.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/locations/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/locations/boundingbox.py +51 -17
- {siibra-1.0a14 → siibra-1.0a19}/siibra/locations/location.py +12 -4
- {siibra-1.0a14 → siibra-1.0a19}/siibra/locations/point.py +10 -5
- {siibra-1.0a14 → siibra-1.0a19}/siibra/locations/pointset.py +45 -11
- {siibra-1.0a14 → siibra-1.0a19}/siibra/retrieval/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/retrieval/cache.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/retrieval/datasets.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/retrieval/exceptions/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/retrieval/repositories.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/retrieval/requests.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/vocabularies/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/parcellationmap.py +38 -18
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/providers/__init__.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/providers/freesurfer.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/providers/gifti.py +1 -1
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/providers/neuroglancer.py +7 -7
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/providers/nifti.py +8 -4
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/providers/provider.py +2 -2
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/sparsemap.py +4 -2
- {siibra-1.0a14 → siibra-1.0a19}/siibra/volumes/volume.py +114 -16
- {siibra-1.0a14 → siibra-1.0a19/siibra.egg-info}/PKG-INFO +1 -1
- siibra-1.0a14/siibra/VERSION +0 -1
- {siibra-1.0a14 → siibra-1.0a19}/LICENSE +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/MANIFEST.in +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/README.rst +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/setup.cfg +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/setup.py +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/experimental/__init__.py +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/experimental/contour.py +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/experimental/cortical_profile_sampler.py +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/experimental/patch.py +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/experimental/plane3d.py +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/vocabularies/gene_names.json +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/vocabularies/receptor_symbols.json +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra/vocabularies/region_aliases.json +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra.egg-info/SOURCES.txt +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra.egg-info/dependency_links.txt +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra.egg-info/requires.txt +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/siibra.egg-info/top_level.txt +0 -0
- {siibra-1.0a14 → siibra-1.0a19}/test/test_siibra.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: siibra
|
|
3
|
-
Version: 1.
|
|
3
|
+
Version: 1.0a19
|
|
4
4
|
Summary: siibra - Software interfaces for interacting with brain atlases
|
|
5
5
|
Home-page: https://github.com/FZJ-INM1-BDA/siibra-python
|
|
6
6
|
Author: Big Data Analytics Group, Forschungszentrum Juelich, Institute of Neuroscience and Medicine (INM-1)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
1.0a19
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2024
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -107,6 +107,14 @@ def set_feasible_download_size(maxsize_gbyte):
|
|
|
107
107
|
|
|
108
108
|
|
|
109
109
|
def set_cache_size(maxsize_gbyte: int):
|
|
110
|
+
"""
|
|
111
|
+
siibra runs maintainance on its local cache to keep it under a predetermined
|
|
112
|
+
size of 2 gigabytes. This method changes the cache size.
|
|
113
|
+
|
|
114
|
+
Parameters
|
|
115
|
+
----------
|
|
116
|
+
maxsize_gbyte : int
|
|
117
|
+
"""
|
|
110
118
|
assert maxsize_gbyte >= 0
|
|
111
119
|
cache.SIZE_GIB = maxsize_gbyte
|
|
112
120
|
logger.info(f"Set cache size to {maxsize_gbyte} GiB.")
|
|
@@ -150,5 +158,7 @@ def __dir__():
|
|
|
150
158
|
"vocabularies",
|
|
151
159
|
"__version__",
|
|
152
160
|
"cache",
|
|
153
|
-
"warm_cache"
|
|
161
|
+
"warm_cache",
|
|
162
|
+
"set_cache_size",
|
|
163
|
+
"from_json",
|
|
154
164
|
]
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2024
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -35,6 +35,7 @@ except ImportError:
|
|
|
35
35
|
# support python 3.7
|
|
36
36
|
from typing_extensions import TypedDict
|
|
37
37
|
|
|
38
|
+
logging.addLevelName(21, "INFO_WO_PROGRESS_BARS")
|
|
38
39
|
logger = logging.getLogger(__name__.split(os.path.extsep)[0])
|
|
39
40
|
ch = logging.StreamHandler()
|
|
40
41
|
formatter = logging.Formatter("[{name}:{levelname}] {message}", style="{")
|
|
@@ -297,7 +298,7 @@ def siibra_tqdm(iterable: Iterable[T] = None, *args, **kwargs):
|
|
|
297
298
|
return tqdm(
|
|
298
299
|
iterable,
|
|
299
300
|
*args,
|
|
300
|
-
disable=kwargs.pop("disable", False) or (logger.level >
|
|
301
|
+
disable=kwargs.pop("disable", False) or (logger.level > logging.INFO),
|
|
301
302
|
**kwargs
|
|
302
303
|
)
|
|
303
304
|
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
# Copyright 2018-
|
|
1
|
+
# Copyright 2018-2024
|
|
2
2
|
# Institute of Neuroscience and Medicine (INM-1), Forschungszentrum Jülich GmbH
|
|
3
3
|
|
|
4
4
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
@@ -20,11 +20,11 @@ from ..features.tabular import (
|
|
|
20
20
|
receptor_density_fingerprint,
|
|
21
21
|
cell_density_profile,
|
|
22
22
|
layerwise_cell_density,
|
|
23
|
-
regional_timeseries_activity
|
|
23
|
+
regional_timeseries_activity,
|
|
24
24
|
)
|
|
25
25
|
from ..features.image import sections, volume_of_interest
|
|
26
26
|
from ..core import atlas, parcellation, space, region
|
|
27
|
-
from ..locations import point, pointset
|
|
27
|
+
from ..locations import point, pointset, boundingbox
|
|
28
28
|
from ..retrieval import datasets, repositories
|
|
29
29
|
from ..volumes import volume, sparsemap, parcellationmap
|
|
30
30
|
from ..volumes.providers.provider import VolumeProvider
|
|
@@ -48,7 +48,9 @@ def build_type(type_str: str):
|
|
|
48
48
|
@wraps(fn)
|
|
49
49
|
def inner(*args, **kwargs):
|
|
50
50
|
return fn(*args, **kwargs)
|
|
51
|
+
|
|
51
52
|
return inner
|
|
53
|
+
|
|
52
54
|
return outer
|
|
53
55
|
|
|
54
56
|
|
|
@@ -65,7 +67,9 @@ class Factory:
|
|
|
65
67
|
)
|
|
66
68
|
if "openminds/DatasetVersion" in spec.get("ebrains", {}):
|
|
67
69
|
result.append(
|
|
68
|
-
datasets.EbrainsV3DatasetVersion(
|
|
70
|
+
datasets.EbrainsV3DatasetVersion(
|
|
71
|
+
id=spec["ebrains"]["openminds/DatasetVersion"]
|
|
72
|
+
)
|
|
69
73
|
)
|
|
70
74
|
if "openminds/Dataset" in spec.get("ebrains", {}):
|
|
71
75
|
result.append(
|
|
@@ -78,40 +82,41 @@ class Factory:
|
|
|
78
82
|
contributors=pub["authors"],
|
|
79
83
|
url=pub["url"],
|
|
80
84
|
description=pub["description"],
|
|
81
|
-
license=pub.get("license")
|
|
85
|
+
license=pub.get("license"),
|
|
82
86
|
)
|
|
83
|
-
for pub in spec["publications"]
|
|
87
|
+
for pub in spec["publications"]
|
|
88
|
+
if pub.get("name")
|
|
84
89
|
)
|
|
85
90
|
return result
|
|
86
91
|
|
|
87
92
|
@classmethod
|
|
88
93
|
def extract_volumes(
|
|
89
|
-
cls,
|
|
90
|
-
spec,
|
|
91
|
-
space_id: str = None,
|
|
92
|
-
names: List[str] = None,
|
|
93
|
-
name_prefix: str = ""
|
|
94
|
+
cls, spec, space_id: str = None, names: List[str] = None, name_prefix: str = ""
|
|
94
95
|
):
|
|
95
96
|
volume_specs = spec.get("volumes", [])
|
|
96
97
|
if names:
|
|
97
98
|
if len(names) != len(volume_specs) and len(names) == 1:
|
|
98
|
-
variants = [vol[
|
|
99
|
+
variants = [vol["variant"] for vol in volume_specs]
|
|
99
100
|
names = [f"{name_prefix}{names[0]} {var} variant" for var in variants]
|
|
100
101
|
else:
|
|
101
102
|
names = [f"{name_prefix} - volume {i}" for i in range(len(volume_specs))]
|
|
102
103
|
for i, vspec in enumerate(volume_specs):
|
|
103
104
|
if space_id:
|
|
104
|
-
if
|
|
105
|
-
assert
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
vspec[
|
|
105
|
+
if "space" in vspec:
|
|
106
|
+
assert (
|
|
107
|
+
vspec["space"]["@id"] == space_id
|
|
108
|
+
), "Space spec {vspec['space']} in volume field must be the same with space field in the configuration."
|
|
109
|
+
vspec["space"] = {"@id": space_id}
|
|
110
|
+
if (
|
|
111
|
+
names and vspec.get("name") is None
|
|
112
|
+
): # only use provided name if the volume has no specific name
|
|
113
|
+
vspec["name"] = names[i]
|
|
109
114
|
return list(map(cls.build_volume, volume_specs))
|
|
110
115
|
|
|
111
116
|
@classmethod
|
|
112
117
|
def extract_decoder(cls, spec):
|
|
113
118
|
decoder_spec = spec.get("decoder", {})
|
|
114
|
-
if decoder_spec["@type"].endswith(
|
|
119
|
+
if decoder_spec["@type"].endswith("csv"):
|
|
115
120
|
kwargs = {k: v for k, v in decoder_spec.items() if k != "@type"}
|
|
116
121
|
return lambda b: pd.read_csv(BytesIO(b), **kwargs)
|
|
117
122
|
else:
|
|
@@ -119,52 +124,52 @@ class Factory:
|
|
|
119
124
|
|
|
120
125
|
@classmethod
|
|
121
126
|
def extract_anchor(cls, spec):
|
|
122
|
-
if spec.get(
|
|
123
|
-
region = spec[
|
|
124
|
-
elif spec.get(
|
|
127
|
+
if spec.get("region"):
|
|
128
|
+
region = spec["region"]
|
|
129
|
+
elif spec.get("parcellation", {}).get("@id"):
|
|
125
130
|
# a parcellation is a special region,
|
|
126
131
|
# and can be used if no region is found
|
|
127
|
-
region = spec[
|
|
128
|
-
elif spec.get(
|
|
129
|
-
region = spec[
|
|
132
|
+
region = spec["parcellation"]["@id"]
|
|
133
|
+
elif spec.get("parcellation", {}).get("name"):
|
|
134
|
+
region = spec["parcellation"]["name"]
|
|
130
135
|
else:
|
|
131
136
|
region = None
|
|
132
137
|
|
|
133
|
-
if
|
|
134
|
-
location = cls.from_json(spec[
|
|
138
|
+
if "location" in spec:
|
|
139
|
+
location = cls.from_json(spec["location"])
|
|
135
140
|
else:
|
|
136
141
|
location = None
|
|
137
142
|
|
|
138
143
|
if (region is None) and (location is None):
|
|
139
144
|
print(spec)
|
|
140
|
-
raise RuntimeError(
|
|
145
|
+
raise RuntimeError(
|
|
146
|
+
"Spec provides neither region or location - no anchor can be extracted."
|
|
147
|
+
)
|
|
141
148
|
|
|
142
|
-
if
|
|
143
|
-
species = Species.decode(spec[
|
|
144
|
-
elif
|
|
145
|
-
species = Species.decode(spec[
|
|
149
|
+
if "species" in spec:
|
|
150
|
+
species = Species.decode(spec["species"])
|
|
151
|
+
elif "ebrains" in spec:
|
|
152
|
+
species = Species.decode(spec["ebrains"])
|
|
146
153
|
else:
|
|
147
154
|
raise ValueError(f"No species information found in spec {spec}")
|
|
148
155
|
|
|
149
156
|
return anchor.AnatomicalAnchor(
|
|
150
|
-
region=region,
|
|
151
|
-
location=location,
|
|
152
|
-
species=species
|
|
157
|
+
region=region, location=location, species=species
|
|
153
158
|
)
|
|
154
159
|
|
|
155
160
|
@classmethod
|
|
156
161
|
def extract_connector(cls, spec):
|
|
157
|
-
repospec = spec.get(
|
|
162
|
+
repospec = spec.get("repository", {})
|
|
158
163
|
spectype = repospec["@type"]
|
|
159
164
|
if spectype == "siibra/repository/zippedfile/v1.0.0":
|
|
160
|
-
return repositories.ZipfileConnector(repospec[
|
|
165
|
+
return repositories.ZipfileConnector(repospec["url"])
|
|
161
166
|
if spectype == "siibra/repository/localfolder/v1.0.0":
|
|
162
|
-
return repositories.LocalFileRepository(repospec[
|
|
167
|
+
return repositories.LocalFileRepository(repospec["folder"])
|
|
163
168
|
if spectype == "siibra/repository/gitlab/v1.0.0":
|
|
164
169
|
return repositories.GitlabConnector(
|
|
165
|
-
server=repospec[
|
|
166
|
-
project=repospec[
|
|
167
|
-
reftag=repospec[
|
|
170
|
+
server=repospec["server"],
|
|
171
|
+
project=repospec["project"],
|
|
172
|
+
reftag=repospec["branch"],
|
|
168
173
|
)
|
|
169
174
|
|
|
170
175
|
logger.warning(
|
|
@@ -179,7 +184,8 @@ class Factory:
|
|
|
179
184
|
a = atlas.Atlas(
|
|
180
185
|
spec["@id"],
|
|
181
186
|
spec["name"],
|
|
182
|
-
species=Species.decode(spec.get(
|
|
187
|
+
species=Species.decode(spec.get("species")),
|
|
188
|
+
prerelease=spec.get("prerelease", False),
|
|
183
189
|
)
|
|
184
190
|
for space_id in spec["spaces"]:
|
|
185
191
|
a._register_space(space_id)
|
|
@@ -193,13 +199,16 @@ class Factory:
|
|
|
193
199
|
return space.Space(
|
|
194
200
|
identifier=spec["@id"],
|
|
195
201
|
name=spec["name"],
|
|
196
|
-
species=Species.decode(spec.get(
|
|
197
|
-
volumes=cls.extract_volumes(
|
|
202
|
+
species=Species.decode(spec.get("species")),
|
|
203
|
+
volumes=cls.extract_volumes(
|
|
204
|
+
spec, space_id=spec.get("@id"), names=[spec.get("name")]
|
|
205
|
+
),
|
|
198
206
|
shortname=spec.get("shortName", ""),
|
|
199
207
|
description=spec.get("description"),
|
|
200
208
|
modality=spec.get("modality"),
|
|
201
209
|
publications=spec.get("publications", []),
|
|
202
210
|
datasets=cls.extract_datasets(spec),
|
|
211
|
+
prerelease=spec.get("prerelease", False),
|
|
203
212
|
)
|
|
204
213
|
|
|
205
214
|
@classmethod
|
|
@@ -213,6 +222,7 @@ class Factory:
|
|
|
213
222
|
datasets=cls.extract_datasets(spec),
|
|
214
223
|
rgb=spec.get("rgb", None),
|
|
215
224
|
spec=spec,
|
|
225
|
+
prerelease=spec.get("prerelease", False),
|
|
216
226
|
)
|
|
217
227
|
|
|
218
228
|
@classmethod
|
|
@@ -228,17 +238,18 @@ class Factory:
|
|
|
228
238
|
p = parcellation.Parcellation(
|
|
229
239
|
identifier=spec["@id"],
|
|
230
240
|
name=spec["name"],
|
|
231
|
-
species=Species.decode(spec.get(
|
|
241
|
+
species=Species.decode(spec.get("species")),
|
|
232
242
|
regions=regions,
|
|
233
243
|
shortname=spec.get("shortName", ""),
|
|
234
244
|
description=spec.get("description", ""),
|
|
235
|
-
modality=spec.get(
|
|
245
|
+
modality=spec.get("modality", ""),
|
|
236
246
|
publications=spec.get("publications", []),
|
|
237
247
|
datasets=cls.extract_datasets(spec),
|
|
248
|
+
prerelease=spec.get("prerelease", False),
|
|
238
249
|
)
|
|
239
250
|
|
|
240
251
|
# add version object, if any is specified
|
|
241
|
-
versionspec = spec.get(
|
|
252
|
+
versionspec = spec.get("@version", None)
|
|
242
253
|
if versionspec is not None:
|
|
243
254
|
version = parcellation.ParcellationVersion(
|
|
244
255
|
name=versionspec.get("name", None),
|
|
@@ -246,35 +257,44 @@ class Factory:
|
|
|
246
257
|
collection=versionspec.get("collectionName", None),
|
|
247
258
|
prev_id=versionspec.get("@prev", None),
|
|
248
259
|
next_id=versionspec.get("@next", None),
|
|
249
|
-
deprecated=versionspec.get("deprecated", False)
|
|
260
|
+
deprecated=versionspec.get("deprecated", False),
|
|
250
261
|
)
|
|
251
262
|
p.version = version
|
|
252
263
|
|
|
253
264
|
return p
|
|
254
265
|
|
|
255
266
|
@classmethod
|
|
256
|
-
|
|
257
|
-
|
|
258
|
-
|
|
259
|
-
|
|
260
|
-
for srctype, provider_spec in spec.get("providers", {}).items():
|
|
267
|
+
def build_volumeproviders(cls, provider_specs: Dict) -> List["VolumeProvider"]:
|
|
268
|
+
providers: List[VolumeProvider] = []
|
|
269
|
+
for srctype, provider_spec in provider_specs.items():
|
|
261
270
|
for ProviderType in VolumeProvider._SUBCLASSES:
|
|
262
271
|
if srctype == ProviderType.srctype:
|
|
263
272
|
providers.append(ProviderType(provider_spec))
|
|
264
273
|
break
|
|
265
274
|
else:
|
|
266
275
|
if srctype not in cls._warnings_issued:
|
|
267
|
-
logger.warning(
|
|
276
|
+
logger.warning(
|
|
277
|
+
f"No provider defined for volume Source type {srctype}"
|
|
278
|
+
)
|
|
268
279
|
cls._warnings_issued.append(srctype)
|
|
269
|
-
|
|
270
280
|
assert all([isinstance(p, VolumeProvider) for p in providers])
|
|
281
|
+
return providers
|
|
282
|
+
|
|
283
|
+
@classmethod
|
|
284
|
+
@build_type("siibra/volume/v0.0.1")
|
|
285
|
+
def build_volume(cls, spec):
|
|
271
286
|
result = volume.Volume(
|
|
272
287
|
space_spec=spec.get("space", {}),
|
|
273
|
-
providers=providers,
|
|
288
|
+
providers=cls.build_volumeproviders(spec.get("providers")),
|
|
274
289
|
name=spec.get("name", ""),
|
|
275
290
|
variant=spec.get("variant"),
|
|
276
291
|
datasets=cls.extract_datasets(spec),
|
|
292
|
+
bbox=cls.build_boundingbox(spec),
|
|
277
293
|
)
|
|
294
|
+
if result._boundingbox is not None:
|
|
295
|
+
assert (
|
|
296
|
+
result._boundingbox._space_spec == result._space_spec
|
|
297
|
+
), "BoundingBox of a volume cannot be in a different space than the volume's space."
|
|
278
298
|
|
|
279
299
|
return result
|
|
280
300
|
|
|
@@ -283,10 +303,16 @@ class Factory:
|
|
|
283
303
|
def build_map(cls, spec):
|
|
284
304
|
# maps have no configured identifier - we require the spec filename to build one
|
|
285
305
|
assert "filename" in spec
|
|
286
|
-
basename = path.splitext(path.basename(spec[
|
|
287
|
-
name =
|
|
306
|
+
basename = path.splitext(path.basename(spec["filename"]))[0]
|
|
307
|
+
name = (
|
|
308
|
+
basename.replace("-", " ")
|
|
309
|
+
.replace("_", " ")
|
|
310
|
+
.replace("continuous", "statistical")
|
|
311
|
+
)
|
|
288
312
|
identifier = f"{spec['@type'].replace('/', '-')}_{basename}"
|
|
289
|
-
volumes = cls.extract_volumes(
|
|
313
|
+
volumes = cls.extract_volumes(
|
|
314
|
+
spec, space_id=spec["space"].get("@id"), name_prefix=basename
|
|
315
|
+
)
|
|
290
316
|
|
|
291
317
|
if spec.get("sparsemap", {}).get("is_sparsemap"):
|
|
292
318
|
Maptype = sparsemap.SparseMap
|
|
@@ -303,7 +329,8 @@ class Factory:
|
|
|
303
329
|
description=spec.get("description"),
|
|
304
330
|
modality=spec.get("modality"),
|
|
305
331
|
publications=spec.get("publications", []),
|
|
306
|
-
datasets=cls.extract_datasets(spec)
|
|
332
|
+
datasets=cls.extract_datasets(spec),
|
|
333
|
+
prerelease=spec.get("prerelease", False),
|
|
307
334
|
)
|
|
308
335
|
|
|
309
336
|
@classmethod
|
|
@@ -320,11 +347,11 @@ class Factory:
|
|
|
320
347
|
@build_type("https://openminds.ebrains.eu/sands/CoordinatePoint")
|
|
321
348
|
@build_type("siibra/location/point/v0.1")
|
|
322
349
|
def build_point(cls, spec):
|
|
323
|
-
if spec.get(
|
|
350
|
+
if spec.get("@type") == "https://openminds.ebrains.eu/sands/CoordinatePoint":
|
|
324
351
|
space_id = spec["coordinateSpace"]["@id"]
|
|
325
352
|
coord = list(np.float16(c["value"]) for c in spec["coordinates"])
|
|
326
353
|
assert all(c["unit"]["@id"] == "id.link/mm" for c in spec["coordinates"])
|
|
327
|
-
elif spec.get(
|
|
354
|
+
elif spec.get("@type") == "siibra/location/point/v0.1":
|
|
328
355
|
space_id = spec.get("space").get("@id")
|
|
329
356
|
coord = spec.get("coordinate")
|
|
330
357
|
else:
|
|
@@ -338,92 +365,110 @@ class Factory:
|
|
|
338
365
|
@build_type("tmp/poly")
|
|
339
366
|
@build_type("siibra/location/pointset/v0.1")
|
|
340
367
|
def build_pointset(cls, spec):
|
|
341
|
-
if spec.get(
|
|
368
|
+
if spec.get("@type") == "tmp/poly":
|
|
342
369
|
space_id = spec["coordinateSpace"]["@id"]
|
|
343
370
|
coords = []
|
|
344
371
|
for coord in spec["coordinates"]:
|
|
345
372
|
assert all(c["unit"]["@id"] == "id.link/mm" for c in coord)
|
|
346
373
|
coords.append(list(np.float16(c["value"]) for c in coord))
|
|
347
|
-
elif spec.get(
|
|
374
|
+
elif spec.get("@type") == "siibra/location/pointset/v0.1":
|
|
348
375
|
space_id = spec.get("space").get("@id")
|
|
349
376
|
coords = [tuple(c) for c in spec.get("coordinates")]
|
|
350
377
|
return pointset.PointSet(coords, space=space_id)
|
|
351
378
|
|
|
379
|
+
@classmethod
|
|
380
|
+
@build_type("siibra/location/boundingbox/v0.1")
|
|
381
|
+
def build_boundingbox(cls, spec):
|
|
382
|
+
bboxspec = spec.get("boundingbox", None)
|
|
383
|
+
if bboxspec is None:
|
|
384
|
+
return None
|
|
385
|
+
space_spec = bboxspec.get("space")
|
|
386
|
+
coords = [tuple(c) for c in bboxspec.get("coordinates")]
|
|
387
|
+
return boundingbox.BoundingBox(coords[0], coords[1], space=space_spec)
|
|
388
|
+
|
|
352
389
|
@classmethod
|
|
353
390
|
@build_type("siibra/feature/fingerprint/receptor/v0.1")
|
|
354
391
|
def build_receptor_density_fingerprint(cls, spec):
|
|
355
392
|
return receptor_density_fingerprint.ReceptorDensityFingerprint(
|
|
356
|
-
tsvfile=spec[
|
|
393
|
+
tsvfile=spec["file"],
|
|
357
394
|
anchor=cls.extract_anchor(spec),
|
|
358
395
|
datasets=cls.extract_datasets(spec),
|
|
359
|
-
id=spec.get("@id", None)
|
|
396
|
+
id=spec.get("@id", None),
|
|
397
|
+
prerelease=spec.get("prerelease", False),
|
|
360
398
|
)
|
|
361
399
|
|
|
362
400
|
@classmethod
|
|
363
401
|
@build_type("siibra/feature/fingerprint/celldensity/v0.1")
|
|
364
402
|
def build_cell_density_fingerprint(cls, spec):
|
|
365
403
|
return layerwise_cell_density.LayerwiseCellDensity(
|
|
366
|
-
segmentfiles=spec[
|
|
367
|
-
layerfiles=spec[
|
|
404
|
+
segmentfiles=spec["segmentfiles"],
|
|
405
|
+
layerfiles=spec["layerfiles"],
|
|
368
406
|
anchor=cls.extract_anchor(spec),
|
|
369
407
|
datasets=cls.extract_datasets(spec),
|
|
370
|
-
id=spec.get("@id", None)
|
|
408
|
+
id=spec.get("@id", None),
|
|
409
|
+
prerelease=spec.get("prerelease", False),
|
|
371
410
|
)
|
|
372
411
|
|
|
373
412
|
@classmethod
|
|
374
413
|
@build_type("siibra/feature/profile/receptor/v0.1")
|
|
375
414
|
def build_receptor_density_profile(cls, spec):
|
|
376
415
|
return receptor_density_profile.ReceptorDensityProfile(
|
|
377
|
-
receptor=spec[
|
|
378
|
-
tsvfile=spec[
|
|
416
|
+
receptor=spec["receptor"],
|
|
417
|
+
tsvfile=spec["file"],
|
|
379
418
|
anchor=cls.extract_anchor(spec),
|
|
380
419
|
datasets=cls.extract_datasets(spec),
|
|
381
|
-
id=spec.get("@id", None)
|
|
420
|
+
id=spec.get("@id", None),
|
|
421
|
+
prerelease=spec.get("prerelease", False),
|
|
382
422
|
)
|
|
383
423
|
|
|
384
424
|
@classmethod
|
|
385
425
|
@build_type("siibra/feature/profile/celldensity/v0.1")
|
|
386
426
|
def build_cell_density_profile(cls, spec):
|
|
387
427
|
return cell_density_profile.CellDensityProfile(
|
|
388
|
-
section=spec[
|
|
389
|
-
patch=spec[
|
|
390
|
-
url=spec[
|
|
428
|
+
section=spec["section"],
|
|
429
|
+
patch=spec["patch"],
|
|
430
|
+
url=spec["file"],
|
|
391
431
|
anchor=cls.extract_anchor(spec),
|
|
392
432
|
datasets=cls.extract_datasets(spec),
|
|
393
|
-
id=spec.get("@id", None)
|
|
433
|
+
id=spec.get("@id", None),
|
|
434
|
+
prerelease=spec.get("prerelease", False),
|
|
394
435
|
)
|
|
395
436
|
|
|
396
437
|
@classmethod
|
|
397
438
|
@build_type("siibra/feature/section/v0.1")
|
|
398
439
|
def build_section(cls, spec):
|
|
399
|
-
vol = cls.build_volume(spec)
|
|
400
440
|
kwargs = {
|
|
401
|
-
"name": spec.get(
|
|
402
|
-
"region": spec.get(
|
|
403
|
-
"space_spec":
|
|
404
|
-
"providers":
|
|
441
|
+
"name": spec.get("name"),
|
|
442
|
+
"region": spec.get("region", None),
|
|
443
|
+
"space_spec": spec.get("space"),
|
|
444
|
+
"providers": cls.build_volumeproviders(spec.get("providers")),
|
|
405
445
|
"datasets": cls.extract_datasets(spec),
|
|
406
|
-
"
|
|
446
|
+
"bbox": cls.build_boundingbox(spec),
|
|
447
|
+
"id": spec.get("@id", None),
|
|
448
|
+
"prerelease": spec.get("prerelease", False),
|
|
407
449
|
}
|
|
408
|
-
modality = spec.get(
|
|
450
|
+
modality = spec.get("modality", "")
|
|
409
451
|
if modality == "cell body staining":
|
|
410
452
|
return sections.CellbodyStainedSection(**kwargs)
|
|
411
453
|
else:
|
|
412
|
-
raise ValueError(
|
|
454
|
+
raise ValueError(
|
|
455
|
+
f"No method for building image section feature type {modality}."
|
|
456
|
+
)
|
|
413
457
|
|
|
414
458
|
@classmethod
|
|
415
459
|
@build_type("siibra/feature/voi/v0.1")
|
|
416
460
|
def build_volume_of_interest(cls, spec):
|
|
417
|
-
vol = cls.build_volume(spec)
|
|
418
461
|
kwargs = {
|
|
419
|
-
"name": spec.get(
|
|
420
|
-
"region": spec.get(
|
|
421
|
-
"space_spec":
|
|
422
|
-
"providers":
|
|
462
|
+
"name": spec.get("name"),
|
|
463
|
+
"region": spec.get("region", None),
|
|
464
|
+
"space_spec": spec.get("space"),
|
|
465
|
+
"providers": cls.build_volumeproviders(spec.get("providers")),
|
|
423
466
|
"datasets": cls.extract_datasets(spec),
|
|
424
|
-
"
|
|
467
|
+
"bbox": cls.build_boundingbox(spec),
|
|
468
|
+
"id": spec.get("@id", None),
|
|
469
|
+
"prerelease": spec.get("prerelease", False),
|
|
425
470
|
}
|
|
426
|
-
modality = spec.get(
|
|
471
|
+
modality = spec.get("modality", "")
|
|
427
472
|
if modality == "cell body staining":
|
|
428
473
|
return volume_of_interest.CellBodyStainedVolumeOfInterest(**kwargs)
|
|
429
474
|
elif modality == "blockface":
|
|
@@ -437,25 +482,21 @@ class Factory:
|
|
|
437
482
|
modality="transmittance", **kwargs
|
|
438
483
|
)
|
|
439
484
|
elif modality == "XPCT":
|
|
440
|
-
return volume_of_interest.XPCTVolumeOfInterest(
|
|
441
|
-
modality="XPCT", **kwargs
|
|
442
|
-
)
|
|
485
|
+
return volume_of_interest.XPCTVolumeOfInterest(modality="XPCT", **kwargs)
|
|
443
486
|
elif modality == "DTI":
|
|
444
|
-
return volume_of_interest.DTIVolumeOfInterest(
|
|
445
|
-
modality=modality, **kwargs
|
|
446
|
-
)
|
|
487
|
+
return volume_of_interest.DTIVolumeOfInterest(modality=modality, **kwargs)
|
|
447
488
|
# elif modality == "segmentation":
|
|
448
489
|
# return volume_of_interest.SegmentedVolumeOfInterest(**kwargs)
|
|
449
490
|
elif "MRI" in modality:
|
|
450
|
-
return volume_of_interest.MRIVolumeOfInterest(
|
|
451
|
-
modality=modality, **kwargs
|
|
452
|
-
)
|
|
491
|
+
return volume_of_interest.MRIVolumeOfInterest(modality=modality, **kwargs)
|
|
453
492
|
elif modality == "LSFM":
|
|
454
493
|
return volume_of_interest.LSFMVolumeOfInterest(
|
|
455
494
|
modality="Light Sheet Fluorescence Microscopy", **kwargs
|
|
456
495
|
)
|
|
457
496
|
else:
|
|
458
|
-
raise ValueError(
|
|
497
|
+
raise ValueError(
|
|
498
|
+
f"No method for building image section feature type {modality}."
|
|
499
|
+
)
|
|
459
500
|
|
|
460
501
|
@classmethod
|
|
461
502
|
@build_type("siibra/feature/connectivitymatrix/v0.3")
|
|
@@ -465,10 +506,14 @@ class Factory:
|
|
|
465
506
|
try:
|
|
466
507
|
conn_cls = getattr(connectivity, modality)
|
|
467
508
|
except Exception:
|
|
468
|
-
raise ValueError(
|
|
509
|
+
raise ValueError(
|
|
510
|
+
f"No method for building connectivity matrix of type {modality}."
|
|
511
|
+
)
|
|
469
512
|
|
|
470
513
|
decoder_func = cls.extract_decoder(spec)
|
|
471
|
-
repo_connector =
|
|
514
|
+
repo_connector = (
|
|
515
|
+
cls.extract_connector(spec) if spec.get("repository", None) else None
|
|
516
|
+
)
|
|
472
517
|
if repo_connector is None:
|
|
473
518
|
base_url = spec.get("base_url", "")
|
|
474
519
|
kwargs = {
|
|
@@ -479,7 +524,8 @@ class Factory:
|
|
|
479
524
|
"decode_func": decoder_func,
|
|
480
525
|
"anchor": cls.extract_anchor(spec),
|
|
481
526
|
"description": spec.get("description", ""),
|
|
482
|
-
"datasets": cls.extract_datasets(spec)
|
|
527
|
+
"datasets": cls.extract_datasets(spec),
|
|
528
|
+
"prerelease": spec.get("prerelease", False),
|
|
483
529
|
}
|
|
484
530
|
paradigm = spec.get("paradigm")
|
|
485
531
|
if paradigm:
|
|
@@ -488,13 +534,15 @@ class Factory:
|
|
|
488
534
|
assert files_indexed_by in ["subject", "feature"]
|
|
489
535
|
conn_by_file = []
|
|
490
536
|
for fkey, filename in files.items():
|
|
491
|
-
kwargs.update(
|
|
492
|
-
|
|
493
|
-
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
|
|
497
|
-
|
|
537
|
+
kwargs.update(
|
|
538
|
+
{
|
|
539
|
+
"filename": filename,
|
|
540
|
+
"subject": fkey if files_indexed_by == "subject" else "average",
|
|
541
|
+
"feature": fkey if files_indexed_by == "feature" else None,
|
|
542
|
+
"connector": repo_connector or base_url + filename,
|
|
543
|
+
"id": spec.get("@id", None),
|
|
544
|
+
}
|
|
545
|
+
)
|
|
498
546
|
conn_by_file.append(conn_cls(**kwargs))
|
|
499
547
|
return conn_by_file
|
|
500
548
|
|
|
@@ -517,18 +565,17 @@ class Factory:
|
|
|
517
565
|
"anchor": cls.extract_anchor(spec),
|
|
518
566
|
"description": spec.get("description", ""),
|
|
519
567
|
"datasets": cls.extract_datasets(spec),
|
|
520
|
-
"timestep": spec.get("timestep")
|
|
568
|
+
"timestep": spec.get("timestep"),
|
|
569
|
+
"prerelease": spec.get("prerelease", False),
|
|
521
570
|
}
|
|
522
571
|
paradigm = spec.get("paradigm")
|
|
523
572
|
if paradigm:
|
|
524
573
|
kwargs["paradigm"] = paradigm
|
|
525
574
|
timeseries_by_file = []
|
|
526
575
|
for fkey, filename in files.items():
|
|
527
|
-
kwargs.update(
|
|
528
|
-
"filename": filename,
|
|
529
|
-
|
|
530
|
-
"id": spec.get("@id", None)
|
|
531
|
-
})
|
|
576
|
+
kwargs.update(
|
|
577
|
+
{"filename": filename, "subject": fkey, "id": spec.get("@id", None)}
|
|
578
|
+
)
|
|
532
579
|
timeseries_by_file.append(timeseries_cls(**kwargs))
|
|
533
580
|
return timeseries_by_file
|
|
534
581
|
|
|
@@ -541,7 +588,7 @@ class Factory:
|
|
|
541
588
|
with open(spec, "r") as f:
|
|
542
589
|
spec = json.load(f)
|
|
543
590
|
assert "filename" not in spec
|
|
544
|
-
spec[
|
|
591
|
+
spec["filename"] = fname
|
|
545
592
|
else:
|
|
546
593
|
spec = json.loads(spec)
|
|
547
594
|
|