junifer 0.0.5.dev183__py3-none-any.whl → 0.0.5.dev202__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- junifer/_version.py +2 -2
- junifer/datagrabber/tests/test_datalad_base.py +4 -4
- junifer/datagrabber/tests/test_pattern_datalad.py +4 -4
- junifer/markers/base.py +49 -23
- junifer/markers/brainprint.py +56 -265
- junifer/markers/complexity/complexity_base.py +23 -43
- junifer/markers/complexity/tests/test_hurst_exponent.py +4 -3
- junifer/markers/complexity/tests/test_multiscale_entropy_auc.py +4 -3
- junifer/markers/complexity/tests/test_perm_entropy.py +4 -3
- junifer/markers/complexity/tests/test_range_entropy.py +4 -3
- junifer/markers/complexity/tests/test_range_entropy_auc.py +4 -3
- junifer/markers/complexity/tests/test_sample_entropy.py +4 -3
- junifer/markers/complexity/tests/test_weighted_perm_entropy.py +4 -3
- junifer/markers/ets_rss.py +24 -42
- junifer/markers/falff/falff_base.py +17 -46
- junifer/markers/falff/falff_parcels.py +53 -27
- junifer/markers/falff/falff_spheres.py +57 -29
- junifer/markers/falff/tests/test_falff_parcels.py +39 -23
- junifer/markers/falff/tests/test_falff_spheres.py +39 -23
- junifer/markers/functional_connectivity/crossparcellation_functional_connectivity.py +32 -48
- junifer/markers/functional_connectivity/edge_functional_connectivity_parcels.py +16 -10
- junifer/markers/functional_connectivity/edge_functional_connectivity_spheres.py +13 -9
- junifer/markers/functional_connectivity/functional_connectivity_base.py +26 -40
- junifer/markers/functional_connectivity/functional_connectivity_parcels.py +6 -6
- junifer/markers/functional_connectivity/functional_connectivity_spheres.py +6 -6
- junifer/markers/functional_connectivity/tests/test_crossparcellation_functional_connectivity.py +8 -4
- junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_parcels.py +6 -3
- junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_spheres.py +6 -3
- junifer/markers/functional_connectivity/tests/test_functional_connectivity_parcels.py +6 -3
- junifer/markers/functional_connectivity/tests/test_functional_connectivity_spheres.py +10 -5
- junifer/markers/parcel_aggregation.py +40 -59
- junifer/markers/reho/reho_base.py +6 -27
- junifer/markers/reho/reho_parcels.py +23 -15
- junifer/markers/reho/reho_spheres.py +22 -16
- junifer/markers/reho/tests/test_reho_parcels.py +8 -3
- junifer/markers/reho/tests/test_reho_spheres.py +8 -3
- junifer/markers/sphere_aggregation.py +40 -59
- junifer/markers/temporal_snr/temporal_snr_base.py +20 -32
- junifer/markers/temporal_snr/temporal_snr_parcels.py +6 -6
- junifer/markers/temporal_snr/temporal_snr_spheres.py +6 -6
- junifer/markers/temporal_snr/tests/test_temporal_snr_parcels.py +6 -3
- junifer/markers/temporal_snr/tests/test_temporal_snr_spheres.py +6 -3
- junifer/markers/tests/test_brainprint.py +23 -12
- junifer/markers/tests/test_collection.py +9 -8
- junifer/markers/tests/test_ets_rss.py +15 -9
- junifer/markers/tests/test_markers_base.py +17 -18
- junifer/markers/tests/test_parcel_aggregation.py +93 -32
- junifer/markers/tests/test_sphere_aggregation.py +72 -19
- junifer/pipeline/pipeline_step_mixin.py +11 -1
- junifer/pipeline/tests/test_registry.py +1 -1
- {junifer-0.0.5.dev183.dist-info → junifer-0.0.5.dev202.dist-info}/METADATA +1 -1
- {junifer-0.0.5.dev183.dist-info → junifer-0.0.5.dev202.dist-info}/RECORD +57 -57
- {junifer-0.0.5.dev183.dist-info → junifer-0.0.5.dev202.dist-info}/WHEEL +1 -1
- {junifer-0.0.5.dev183.dist-info → junifer-0.0.5.dev202.dist-info}/AUTHORS.rst +0 -0
- {junifer-0.0.5.dev183.dist-info → junifer-0.0.5.dev202.dist-info}/LICENSE.md +0 -0
- {junifer-0.0.5.dev183.dist-info → junifer-0.0.5.dev202.dist-info}/entry_points.txt +0 -0
- {junifer-0.0.5.dev183.dist-info → junifer-0.0.5.dev202.dist-info}/top_level.txt +0 -0
junifer/_version.py
CHANGED
@@ -12,5 +12,5 @@ __version__: str
|
|
12
12
|
__version_tuple__: VERSION_TUPLE
|
13
13
|
version_tuple: VERSION_TUPLE
|
14
14
|
|
15
|
-
__version__ = version = '0.0.5.
|
16
|
-
__version_tuple__ = version_tuple = (0, 0, 5, '
|
15
|
+
__version__ = version = '0.0.5.dev202'
|
16
|
+
__version_tuple__ = version_tuple = (0, 0, 5, 'dev202')
|
@@ -15,13 +15,13 @@ from junifer.datagrabber import DataladDataGrabber
|
|
15
15
|
_testing_dataset = {
|
16
16
|
"example_bids": {
|
17
17
|
"uri": "https://gin.g-node.org/juaml/datalad-example-bids",
|
18
|
-
"commit": "
|
19
|
-
"id": "
|
18
|
+
"commit": "b87897cbe51bf0ee5514becaa5c7dd76491db5ad",
|
19
|
+
"id": "8fddff30-6993-420a-9d1e-b5b028c59468",
|
20
20
|
},
|
21
21
|
"example_bids_ses": {
|
22
22
|
"uri": "https://gin.g-node.org/juaml/datalad-example-bids-ses",
|
23
|
-
"commit": "
|
24
|
-
"id": "
|
23
|
+
"commit": "6b163aa98af76a9eac0272273c27e14127850181",
|
24
|
+
"id": "715c17cf-a1b9-42d6-9af8-9f74c1a4a724",
|
25
25
|
},
|
26
26
|
}
|
27
27
|
|
@@ -15,13 +15,13 @@ from junifer.datagrabber import PatternDataladDataGrabber
|
|
15
15
|
_testing_dataset = {
|
16
16
|
"example_bids": {
|
17
17
|
"uri": "https://gin.g-node.org/juaml/datalad-example-bids",
|
18
|
-
"commit": "
|
19
|
-
"id": "
|
18
|
+
"commit": "b87897cbe51bf0ee5514becaa5c7dd76491db5ad",
|
19
|
+
"id": "8fddff30-6993-420a-9d1e-b5b028c59468",
|
20
20
|
},
|
21
21
|
"example_bids_ses": {
|
22
22
|
"uri": "https://gin.g-node.org/juaml/datalad-example-bids-ses",
|
23
|
-
"commit": "
|
24
|
-
"id": "
|
23
|
+
"commit": "6b163aa98af76a9eac0272273c27e14127850181",
|
24
|
+
"id": "715c17cf-a1b9-42d6-9af8-9f74c1a4a724",
|
25
25
|
},
|
26
26
|
}
|
27
27
|
|
junifer/markers/base.py
CHANGED
@@ -5,6 +5,7 @@
|
|
5
5
|
# License: AGPL
|
6
6
|
|
7
7
|
from abc import ABC, abstractmethod
|
8
|
+
from copy import deepcopy
|
8
9
|
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Union
|
9
10
|
|
10
11
|
from ..pipeline import PipelineStepMixin, UpdateMetaMixin
|
@@ -35,6 +36,8 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
35
36
|
|
36
37
|
Raises
|
37
38
|
------
|
39
|
+
AttributeError
|
40
|
+
If the marker does not have `_MARKER_INOUT_MAPPINGS` attribute.
|
38
41
|
ValueError
|
39
42
|
If required input data type(s) is(are) not found.
|
40
43
|
|
@@ -45,6 +48,12 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
45
48
|
on: Optional[Union[List[str], str]] = None,
|
46
49
|
name: Optional[str] = None,
|
47
50
|
) -> None:
|
51
|
+
# Check for missing mapping attribute
|
52
|
+
if not hasattr(self, "_MARKER_INOUT_MAPPINGS"):
|
53
|
+
raise_error(
|
54
|
+
msg=("Missing `_MARKER_INOUT_MAPPINGS` for the marker"),
|
55
|
+
klass=AttributeError,
|
56
|
+
)
|
48
57
|
# Use all data types if not provided
|
49
58
|
if on is None:
|
50
59
|
on = self.get_valid_inputs()
|
@@ -88,7 +97,6 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
88
97
|
)
|
89
98
|
return [x for x in self._on if x in input]
|
90
99
|
|
91
|
-
@abstractmethod
|
92
100
|
def get_valid_inputs(self) -> List[str]:
|
93
101
|
"""Get valid data types for input.
|
94
102
|
|
@@ -98,30 +106,25 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
98
106
|
The list of data types that can be used as input for this marker.
|
99
107
|
|
100
108
|
"""
|
101
|
-
|
102
|
-
msg="Concrete classes need to implement get_valid_inputs().",
|
103
|
-
klass=NotImplementedError,
|
104
|
-
)
|
109
|
+
return list(self._MARKER_INOUT_MAPPINGS.keys())
|
105
110
|
|
106
|
-
|
107
|
-
def get_output_type(self, input_type: str) -> str:
|
111
|
+
def get_output_type(self, input_type: str, output_feature: str) -> str:
|
108
112
|
"""Get output type.
|
109
113
|
|
110
114
|
Parameters
|
111
115
|
----------
|
112
116
|
input_type : str
|
113
117
|
The data type input to the marker.
|
118
|
+
output_feature : str
|
119
|
+
The feature output of the marker.
|
114
120
|
|
115
121
|
Returns
|
116
122
|
-------
|
117
123
|
str
|
118
|
-
The storage type output
|
124
|
+
The storage type output of the marker.
|
119
125
|
|
120
126
|
"""
|
121
|
-
|
122
|
-
msg="Concrete classes need to implement get_output_type().",
|
123
|
-
klass=NotImplementedError,
|
124
|
-
)
|
127
|
+
return self._MARKER_INOUT_MAPPINGS[input_type][output_feature]
|
125
128
|
|
126
129
|
@abstractmethod
|
127
130
|
def compute(self, input: Dict, extra_input: Optional[Dict] = None) -> Dict:
|
@@ -154,6 +157,7 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
154
157
|
def store(
|
155
158
|
self,
|
156
159
|
type_: str,
|
160
|
+
feature: str,
|
157
161
|
out: Dict[str, Any],
|
158
162
|
storage: "BaseFeatureStorage",
|
159
163
|
) -> None:
|
@@ -163,13 +167,15 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
163
167
|
----------
|
164
168
|
type_ : str
|
165
169
|
The data type to store.
|
170
|
+
feature : str
|
171
|
+
The feature to store.
|
166
172
|
out : dict
|
167
173
|
The computed result as a dictionary to store.
|
168
174
|
storage : storage-like
|
169
175
|
The storage class, for example, SQLiteFeatureStorage.
|
170
176
|
|
171
177
|
"""
|
172
|
-
output_type_ = self.get_output_type(type_)
|
178
|
+
output_type_ = self.get_output_type(type_, feature)
|
173
179
|
logger.debug(f"Storing {output_type_} in {storage}")
|
174
180
|
storage.store(kind=output_type_, **out)
|
175
181
|
|
@@ -213,15 +219,35 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
213
219
|
t_meta["type"] = type_
|
214
220
|
# Compute marker
|
215
221
|
t_out = self.compute(input=t_input, extra_input=extra_input)
|
216
|
-
|
217
|
-
|
218
|
-
|
219
|
-
#
|
220
|
-
|
221
|
-
|
222
|
-
|
223
|
-
|
224
|
-
|
225
|
-
|
222
|
+
# Initialize empty dictionary if no storage object is provided
|
223
|
+
if storage is None:
|
224
|
+
out[type_] = {}
|
225
|
+
# Store individual features
|
226
|
+
for feature_name, feature_data in t_out.items():
|
227
|
+
# Make deep copy of the feature data for manipulation
|
228
|
+
feature_data_copy = deepcopy(feature_data)
|
229
|
+
# Make deep copy of metadata and add to feature data
|
230
|
+
feature_data_copy["meta"] = deepcopy(t_meta)
|
231
|
+
# Update metadata for the feature,
|
232
|
+
# feature data is not manipulated, only meta
|
233
|
+
self.update_meta(feature_data_copy, "marker")
|
234
|
+
# Update marker feature's metadata name
|
235
|
+
feature_data_copy["meta"]["marker"][
|
236
|
+
"name"
|
237
|
+
] += f"_{feature_name}"
|
238
|
+
|
239
|
+
if storage is not None:
|
240
|
+
logger.info(f"Storing in {storage}")
|
241
|
+
self.store(
|
242
|
+
type_=type_,
|
243
|
+
feature=feature_name,
|
244
|
+
out=feature_data_copy,
|
245
|
+
storage=storage,
|
246
|
+
)
|
247
|
+
else:
|
248
|
+
logger.info(
|
249
|
+
"No storage specified, returning dictionary"
|
250
|
+
)
|
251
|
+
out[type_][feature_name] = feature_data_copy
|
226
252
|
|
227
253
|
return out
|
junifer/markers/brainprint.py
CHANGED
@@ -3,21 +3,9 @@
|
|
3
3
|
# Authors: Synchon Mandal <s.mandal@fz-juelich.de>
|
4
4
|
# License: AGPL
|
5
5
|
|
6
|
-
import sys
|
7
|
-
|
8
|
-
|
9
|
-
if sys.version_info < (3, 11): # pragma: no cover
|
10
|
-
from importlib_metadata import packages_distributions
|
11
|
-
else:
|
12
|
-
from importlib.metadata import packages_distributions
|
13
|
-
|
14
6
|
import uuid
|
15
|
-
from copy import deepcopy
|
16
|
-
from importlib.util import find_spec
|
17
|
-
from itertools import chain
|
18
7
|
from pathlib import Path
|
19
8
|
from typing import (
|
20
|
-
TYPE_CHECKING,
|
21
9
|
Any,
|
22
10
|
ClassVar,
|
23
11
|
Dict,
|
@@ -37,15 +25,10 @@ from ..external.BrainPrint.brainprint.brainprint import (
|
|
37
25
|
)
|
38
26
|
from ..external.BrainPrint.brainprint.surfaces import surf_to_vtk
|
39
27
|
from ..pipeline import WorkDirManager
|
40
|
-
from ..
|
41
|
-
from ..utils import logger, raise_error, run_ext_cmd
|
28
|
+
from ..utils import logger, run_ext_cmd
|
42
29
|
from .base import BaseMarker
|
43
30
|
|
44
31
|
|
45
|
-
if TYPE_CHECKING:
|
46
|
-
from junifer.storage import BaseFeatureStorage
|
47
|
-
|
48
|
-
|
49
32
|
__all__ = ["BrainPrint"]
|
50
33
|
|
51
34
|
|
@@ -99,6 +82,15 @@ class BrainPrint(BaseMarker):
|
|
99
82
|
|
100
83
|
_DEPENDENCIES: ClassVar[Set[str]] = {"lapy", "numpy"}
|
101
84
|
|
85
|
+
_MARKER_INOUT_MAPPINGS: ClassVar[Dict[str, Dict[str, str]]] = {
|
86
|
+
"FreeSurfer": {
|
87
|
+
"eigenvalues": "scalar_table",
|
88
|
+
"areas": "vector",
|
89
|
+
"volumes": "vector",
|
90
|
+
"distances": "vector",
|
91
|
+
}
|
92
|
+
}
|
93
|
+
|
102
94
|
def __init__(
|
103
95
|
self,
|
104
96
|
num: int = 50,
|
@@ -121,117 +113,6 @@ class BrainPrint(BaseMarker):
|
|
121
113
|
self.use_cholmod = use_cholmod
|
122
114
|
super().__init__(name=name, on="FreeSurfer")
|
123
115
|
|
124
|
-
def get_valid_inputs(self) -> List[str]:
|
125
|
-
"""Get valid data types for input.
|
126
|
-
|
127
|
-
Returns
|
128
|
-
-------
|
129
|
-
list of str
|
130
|
-
The list of data types that can be used as input for this marker.
|
131
|
-
|
132
|
-
"""
|
133
|
-
return ["FreeSurfer"]
|
134
|
-
|
135
|
-
# TODO: kept for making this class concrete; should be removed later
|
136
|
-
def get_output_type(self, input_type: str) -> str:
|
137
|
-
"""Get output type.
|
138
|
-
|
139
|
-
Parameters
|
140
|
-
----------
|
141
|
-
input_type : str
|
142
|
-
The data type input to the marker.
|
143
|
-
|
144
|
-
Returns
|
145
|
-
-------
|
146
|
-
str
|
147
|
-
The storage type output by the marker.
|
148
|
-
|
149
|
-
"""
|
150
|
-
return "vector"
|
151
|
-
|
152
|
-
# TODO: overridden to allow multiple outputs from single data type; should
|
153
|
-
# be removed later
|
154
|
-
def validate(self, input: List[str]) -> List[str]:
|
155
|
-
"""Validate the the pipeline step.
|
156
|
-
|
157
|
-
Parameters
|
158
|
-
----------
|
159
|
-
input : list of str
|
160
|
-
The input to the pipeline step.
|
161
|
-
|
162
|
-
Returns
|
163
|
-
-------
|
164
|
-
list of str
|
165
|
-
The output of the pipeline step.
|
166
|
-
|
167
|
-
"""
|
168
|
-
|
169
|
-
def _check_dependencies(obj) -> None:
|
170
|
-
"""Check obj._DEPENDENCIES.
|
171
|
-
|
172
|
-
Parameters
|
173
|
-
----------
|
174
|
-
obj : object
|
175
|
-
Object to check _DEPENDENCIES of.
|
176
|
-
|
177
|
-
Raises
|
178
|
-
------
|
179
|
-
ImportError
|
180
|
-
If the pipeline step object is missing dependencies required
|
181
|
-
for its working.
|
182
|
-
|
183
|
-
"""
|
184
|
-
# Check if _DEPENDENCIES attribute is found;
|
185
|
-
# (markers and preprocessors will have them but not datareaders
|
186
|
-
# as of now)
|
187
|
-
dependencies_not_found = []
|
188
|
-
if hasattr(obj, "_DEPENDENCIES"):
|
189
|
-
# Check if dependencies are importable
|
190
|
-
for dependency in obj._DEPENDENCIES:
|
191
|
-
# First perform an easy check
|
192
|
-
if find_spec(dependency) is None:
|
193
|
-
# Then check mapped names
|
194
|
-
if dependency not in list(
|
195
|
-
chain.from_iterable(
|
196
|
-
packages_distributions().values()
|
197
|
-
)
|
198
|
-
):
|
199
|
-
dependencies_not_found.append(dependency)
|
200
|
-
# Raise error if any dependency is not found
|
201
|
-
if dependencies_not_found:
|
202
|
-
raise_error(
|
203
|
-
msg=(
|
204
|
-
f"{dependencies_not_found} are not installed but are "
|
205
|
-
f"required for using {obj.__class__.__name__}."
|
206
|
-
),
|
207
|
-
klass=ImportError,
|
208
|
-
)
|
209
|
-
|
210
|
-
def _check_ext_dependencies(obj) -> None:
|
211
|
-
"""Check obj._EXT_DEPENDENCIES.
|
212
|
-
|
213
|
-
Parameters
|
214
|
-
----------
|
215
|
-
obj : object
|
216
|
-
Object to check _EXT_DEPENDENCIES of.
|
217
|
-
|
218
|
-
"""
|
219
|
-
# Check if _EXT_DEPENDENCIES attribute is found;
|
220
|
-
# (some markers and preprocessors might have them)
|
221
|
-
if hasattr(obj, "_EXT_DEPENDENCIES"):
|
222
|
-
for dependency in obj._EXT_DEPENDENCIES:
|
223
|
-
check_ext_dependencies(**dependency)
|
224
|
-
|
225
|
-
# Check dependencies
|
226
|
-
_check_dependencies(self)
|
227
|
-
# Check external dependencies
|
228
|
-
# _check_ext_dependencies(self)
|
229
|
-
# Validate input
|
230
|
-
_ = self.validate_input(input=input)
|
231
|
-
# Validate output type
|
232
|
-
outputs = ["scalar_table", "vector"]
|
233
|
-
return outputs
|
234
|
-
|
235
116
|
def _create_aseg_surface(
|
236
117
|
self,
|
237
118
|
aseg_path: Path,
|
@@ -426,6 +307,27 @@ class BrainPrint(BaseMarker):
|
|
426
307
|
),
|
427
308
|
}
|
428
309
|
|
310
|
+
def _fix_nan(
|
311
|
+
self,
|
312
|
+
input_data: List[Union[float, str, npt.ArrayLike]],
|
313
|
+
) -> np.ndarray:
|
314
|
+
"""Convert BrainPrint output with string NaN to ``numpy.nan``.
|
315
|
+
|
316
|
+
Parameters
|
317
|
+
----------
|
318
|
+
input_data : list of str, float or numpy.ndarray-like
|
319
|
+
The data to convert.
|
320
|
+
|
321
|
+
Returns
|
322
|
+
-------
|
323
|
+
np.ndarray
|
324
|
+
The converted data as ``numpy.ndarray``.
|
325
|
+
|
326
|
+
"""
|
327
|
+
arr = np.asarray(input_data)
|
328
|
+
arr[arr == "NaN"] = np.nan
|
329
|
+
return arr.astype(np.float64)
|
330
|
+
|
429
331
|
def compute(
|
430
332
|
self,
|
431
333
|
input: Dict[str, Any],
|
@@ -443,16 +345,32 @@ class BrainPrint(BaseMarker):
|
|
443
345
|
Returns
|
444
346
|
-------
|
445
347
|
dict
|
446
|
-
The computed result as dictionary.
|
447
|
-
|
348
|
+
The computed result as dictionary. This will be either returned
|
349
|
+
to the user or stored in the storage by calling the store method
|
350
|
+
with this as a parameter. The dictionary has the following keys:
|
351
|
+
|
352
|
+
* ``eigenvalues`` : dictionary with the following keys:
|
353
|
+
|
354
|
+
- ``data`` : eigenvalues as ``np.ndarray``
|
355
|
+
- ``col_names`` : surface labels as list of str
|
356
|
+
- ``row_names`` : eigenvalue count labels as list of str
|
357
|
+
- ``row_header_col_name`` : "eigenvalue"
|
358
|
+
()
|
359
|
+
* ``areas`` : dictionary with the following keys:
|
360
|
+
|
361
|
+
- ``data`` : areas as ``np.ndarray``
|
362
|
+
- ``col_names`` : surface labels as list of str
|
363
|
+
|
364
|
+
* ``volumes`` : dictionary with the following keys:
|
448
365
|
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
366
|
+
- ``data`` : volumes as ``np.ndarray``
|
367
|
+
- ``col_names`` : surface labels as list of str
|
368
|
+
|
369
|
+
* ``distances`` : dictionary with the following keys
|
370
|
+
if ``asymmetry = True``:
|
371
|
+
|
372
|
+
- ``data`` : distances as ``np.ndarray``
|
373
|
+
- ``col_names`` : surface labels as list of str
|
456
374
|
|
457
375
|
References
|
458
376
|
----------
|
@@ -539,130 +457,3 @@ class BrainPrint(BaseMarker):
|
|
539
457
|
"col_names": list(distances.keys()),
|
540
458
|
}
|
541
459
|
return output
|
542
|
-
|
543
|
-
def _fix_nan(
|
544
|
-
self,
|
545
|
-
input_data: List[Union[float, str, npt.ArrayLike]],
|
546
|
-
) -> np.ndarray:
|
547
|
-
"""Convert BrainPrint output with string NaN to ``numpy.nan``.
|
548
|
-
|
549
|
-
Parameters
|
550
|
-
----------
|
551
|
-
input_data : list of str, float or numpy.ndarray-like
|
552
|
-
The data to convert.
|
553
|
-
|
554
|
-
Returns
|
555
|
-
-------
|
556
|
-
np.ndarray
|
557
|
-
The converted data as ``numpy.ndarray``.
|
558
|
-
|
559
|
-
"""
|
560
|
-
arr = np.asarray(input_data)
|
561
|
-
arr[arr == "NaN"] = np.nan
|
562
|
-
return arr.astype(np.float64)
|
563
|
-
|
564
|
-
# TODO: overridden to allow storing multiple outputs from single input;
|
565
|
-
# should be removed later
|
566
|
-
def store(
|
567
|
-
self,
|
568
|
-
type_: str,
|
569
|
-
feature: str,
|
570
|
-
out: Dict[str, Any],
|
571
|
-
storage: "BaseFeatureStorage",
|
572
|
-
) -> None:
|
573
|
-
"""Store.
|
574
|
-
|
575
|
-
Parameters
|
576
|
-
----------
|
577
|
-
type_ : str
|
578
|
-
The data type to store.
|
579
|
-
feature : {"eigenvalues", "distances", "areas", "volumes"}
|
580
|
-
The feature name to store.
|
581
|
-
out : dict
|
582
|
-
The computed result as a dictionary to store.
|
583
|
-
storage : storage-like
|
584
|
-
The storage class, for example, SQLiteFeatureStorage.
|
585
|
-
|
586
|
-
Raises
|
587
|
-
------
|
588
|
-
ValueError
|
589
|
-
If ``feature`` is invalid.
|
590
|
-
|
591
|
-
"""
|
592
|
-
if feature == "eigenvalues":
|
593
|
-
output_type = "scalar_table"
|
594
|
-
elif feature in ["distances", "areas", "volumes"]:
|
595
|
-
output_type = "vector"
|
596
|
-
else:
|
597
|
-
raise_error(f"Unknown feature: {feature}")
|
598
|
-
|
599
|
-
logger.debug(f"Storing {output_type} in {storage}")
|
600
|
-
storage.store(kind=output_type, **out)
|
601
|
-
|
602
|
-
# TODO: overridden to allow storing multiple outputs from single input;
|
603
|
-
# should be removed later
|
604
|
-
def _fit_transform(
|
605
|
-
self,
|
606
|
-
input: Dict[str, Dict],
|
607
|
-
storage: Optional["BaseFeatureStorage"] = None,
|
608
|
-
) -> Dict:
|
609
|
-
"""Fit and transform.
|
610
|
-
|
611
|
-
Parameters
|
612
|
-
----------
|
613
|
-
input : dict
|
614
|
-
The Junifer Data object.
|
615
|
-
storage : storage-like, optional
|
616
|
-
The storage class, for example, SQLiteFeatureStorage.
|
617
|
-
|
618
|
-
Returns
|
619
|
-
-------
|
620
|
-
dict
|
621
|
-
The processed output as a dictionary. If `storage` is provided,
|
622
|
-
empty dictionary is returned.
|
623
|
-
|
624
|
-
"""
|
625
|
-
out = {}
|
626
|
-
for type_ in self._on:
|
627
|
-
if type_ in input.keys():
|
628
|
-
logger.info(f"Computing {type_}")
|
629
|
-
t_input = input[type_]
|
630
|
-
extra_input = input.copy()
|
631
|
-
extra_input.pop(type_)
|
632
|
-
t_meta = t_input["meta"].copy()
|
633
|
-
t_meta["type"] = type_
|
634
|
-
|
635
|
-
# Returns multiple features
|
636
|
-
t_out = self.compute(input=t_input, extra_input=extra_input)
|
637
|
-
|
638
|
-
if storage is None:
|
639
|
-
out[type_] = {}
|
640
|
-
|
641
|
-
for feature_name, feature_data in t_out.items():
|
642
|
-
# Make deep copy of the feature data for manipulation
|
643
|
-
feature_data_copy = deepcopy(feature_data)
|
644
|
-
# Make deep copy of metadata and add to feature data
|
645
|
-
feature_data_copy["meta"] = deepcopy(t_meta)
|
646
|
-
# Update metadata for the feature,
|
647
|
-
# feature data is not manipulated, only meta
|
648
|
-
self.update_meta(feature_data_copy, "marker")
|
649
|
-
# Update marker feature's metadata name
|
650
|
-
feature_data_copy["meta"]["marker"][
|
651
|
-
"name"
|
652
|
-
] += f"_{feature_name}"
|
653
|
-
|
654
|
-
if storage is not None:
|
655
|
-
logger.info(f"Storing in {storage}")
|
656
|
-
self.store(
|
657
|
-
type_=type_,
|
658
|
-
feature=feature_name,
|
659
|
-
out=feature_data_copy,
|
660
|
-
storage=storage,
|
661
|
-
)
|
662
|
-
else:
|
663
|
-
logger.info(
|
664
|
-
"No storage specified, returning dictionary"
|
665
|
-
)
|
666
|
-
out[type_][feature_name] = feature_data_copy
|
667
|
-
|
668
|
-
return out
|
@@ -53,6 +53,12 @@ class ComplexityBase(BaseMarker):
|
|
53
53
|
|
54
54
|
_DEPENDENCIES: ClassVar[Set[str]] = {"nilearn", "neurokit2"}
|
55
55
|
|
56
|
+
_MARKER_INOUT_MAPPINGS: ClassVar[Dict[str, Dict[str, str]]] = {
|
57
|
+
"BOLD": {
|
58
|
+
"complexity": "vector",
|
59
|
+
},
|
60
|
+
}
|
61
|
+
|
56
62
|
def __init__(
|
57
63
|
self,
|
58
64
|
parcellation: Union[str, List[str]],
|
@@ -78,33 +84,6 @@ class ComplexityBase(BaseMarker):
|
|
78
84
|
klass=NotImplementedError,
|
79
85
|
)
|
80
86
|
|
81
|
-
def get_valid_inputs(self) -> List[str]:
|
82
|
-
"""Get valid data types for input.
|
83
|
-
|
84
|
-
Returns
|
85
|
-
-------
|
86
|
-
list of str
|
87
|
-
The list of data types that can be used as input for this marker.
|
88
|
-
|
89
|
-
"""
|
90
|
-
return ["BOLD"]
|
91
|
-
|
92
|
-
def get_output_type(self, input_type: str) -> str:
|
93
|
-
"""Get output type.
|
94
|
-
|
95
|
-
Parameters
|
96
|
-
----------
|
97
|
-
input_type : str
|
98
|
-
The data type input to the marker.
|
99
|
-
|
100
|
-
Returns
|
101
|
-
-------
|
102
|
-
str
|
103
|
-
The storage type output by the marker.
|
104
|
-
|
105
|
-
"""
|
106
|
-
return "vector"
|
107
|
-
|
108
87
|
def compute(
|
109
88
|
self,
|
110
89
|
input: Dict[str, Any],
|
@@ -124,29 +103,30 @@ class ComplexityBase(BaseMarker):
|
|
124
103
|
Returns
|
125
104
|
-------
|
126
105
|
dict
|
127
|
-
The computed result as dictionary.
|
128
|
-
|
106
|
+
The computed result as dictionary. This will be either returned
|
107
|
+
to the user or stored in the storage by calling the store method
|
108
|
+
with this as a parameter. The dictionary has the following keys:
|
109
|
+
|
110
|
+
* ``complexity`` : dictionary with the following keys:
|
129
111
|
|
130
|
-
|
131
|
-
|
112
|
+
- ``data`` : ROI-wise complexity measures as ``numpy.ndarray``
|
113
|
+
- ``col_names`` : ROI labels as list of str
|
132
114
|
|
133
115
|
"""
|
134
|
-
#
|
116
|
+
# Extract the 2D time series using ParcelAggregation
|
135
117
|
parcel_aggregation = ParcelAggregation(
|
136
118
|
parcellation=self.parcellation,
|
137
119
|
method=self.agg_method,
|
138
120
|
method_params=self.agg_method_params,
|
139
121
|
masks=self.masks,
|
140
122
|
on="BOLD",
|
141
|
-
)
|
142
|
-
# Extract the 2D time series using parcel aggregation
|
143
|
-
parcel_aggregation_map = parcel_aggregation.compute(
|
144
|
-
input=input, extra_input=extra_input
|
145
|
-
)
|
146
|
-
|
123
|
+
).compute(input=input, extra_input=extra_input)
|
147
124
|
# Compute complexity measure
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
125
|
+
return {
|
126
|
+
"complexity": {
|
127
|
+
"data": self.compute_complexity(
|
128
|
+
parcel_aggregation["aggregation"]["data"]
|
129
|
+
),
|
130
|
+
"col_names": parcel_aggregation["aggregation"]["col_names"],
|
131
|
+
}
|
132
|
+
}
|
@@ -40,13 +40,14 @@ def test_compute() -> None:
|
|
40
40
|
# Compute the marker
|
41
41
|
feature_map = marker.fit_transform(element_data)
|
42
42
|
# Assert the dimension of timeseries
|
43
|
-
assert feature_map["BOLD"]["data"].ndim == 2
|
43
|
+
assert feature_map["BOLD"]["complexity"]["data"].ndim == 2
|
44
44
|
|
45
45
|
|
46
46
|
def test_get_output_type() -> None:
|
47
47
|
"""Test HurstExponent get_output_type()."""
|
48
|
-
|
49
|
-
|
48
|
+
assert "vector" == HurstExponent(
|
49
|
+
parcellation=PARCELLATION
|
50
|
+
).get_output_type(input_type="BOLD", output_feature="complexity")
|
50
51
|
|
51
52
|
|
52
53
|
@pytest.mark.skipif(
|