junifer 0.0.3.dev188__py3-none-any.whl → 0.0.4__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- junifer/_version.py +14 -2
- junifer/api/cli.py +162 -17
- junifer/api/functions.py +87 -419
- junifer/api/parser.py +24 -0
- junifer/api/queue_context/__init__.py +8 -0
- junifer/api/queue_context/gnu_parallel_local_adapter.py +258 -0
- junifer/api/queue_context/htcondor_adapter.py +365 -0
- junifer/api/queue_context/queue_context_adapter.py +60 -0
- junifer/api/queue_context/tests/test_gnu_parallel_local_adapter.py +192 -0
- junifer/api/queue_context/tests/test_htcondor_adapter.py +257 -0
- junifer/api/res/afni/run_afni_docker.sh +6 -6
- junifer/api/res/ants/ResampleImage +3 -0
- junifer/api/res/ants/antsApplyTransforms +3 -0
- junifer/api/res/ants/antsApplyTransformsToPoints +3 -0
- junifer/api/res/ants/run_ants_docker.sh +39 -0
- junifer/api/res/fsl/applywarp +3 -0
- junifer/api/res/fsl/flirt +3 -0
- junifer/api/res/fsl/img2imgcoord +3 -0
- junifer/api/res/fsl/run_fsl_docker.sh +39 -0
- junifer/api/res/fsl/std2imgcoord +3 -0
- junifer/api/res/run_conda.sh +4 -4
- junifer/api/res/run_venv.sh +22 -0
- junifer/api/tests/data/partly_cloudy_agg_mean_tian.yml +16 -0
- junifer/api/tests/test_api_utils.py +21 -3
- junifer/api/tests/test_cli.py +232 -9
- junifer/api/tests/test_functions.py +211 -439
- junifer/api/tests/test_parser.py +1 -1
- junifer/configs/juseless/datagrabbers/aomic_id1000_vbm.py +6 -1
- junifer/configs/juseless/datagrabbers/camcan_vbm.py +6 -1
- junifer/configs/juseless/datagrabbers/ixi_vbm.py +6 -1
- junifer/configs/juseless/datagrabbers/tests/test_ucla.py +8 -8
- junifer/configs/juseless/datagrabbers/ucla.py +44 -26
- junifer/configs/juseless/datagrabbers/ukb_vbm.py +6 -1
- junifer/data/VOIs/meta/AutobiographicalMemory_VOIs.txt +23 -0
- junifer/data/VOIs/meta/Power2013_MNI_VOIs.tsv +264 -0
- junifer/data/__init__.py +4 -0
- junifer/data/coordinates.py +298 -31
- junifer/data/masks.py +360 -28
- junifer/data/parcellations.py +621 -188
- junifer/data/template_spaces.py +190 -0
- junifer/data/tests/test_coordinates.py +34 -3
- junifer/data/tests/test_data_utils.py +1 -0
- junifer/data/tests/test_masks.py +202 -86
- junifer/data/tests/test_parcellations.py +266 -55
- junifer/data/tests/test_template_spaces.py +104 -0
- junifer/data/utils.py +4 -2
- junifer/datagrabber/__init__.py +1 -0
- junifer/datagrabber/aomic/id1000.py +111 -70
- junifer/datagrabber/aomic/piop1.py +116 -53
- junifer/datagrabber/aomic/piop2.py +116 -53
- junifer/datagrabber/aomic/tests/test_id1000.py +27 -27
- junifer/datagrabber/aomic/tests/test_piop1.py +27 -27
- junifer/datagrabber/aomic/tests/test_piop2.py +27 -27
- junifer/datagrabber/base.py +62 -10
- junifer/datagrabber/datalad_base.py +0 -2
- junifer/datagrabber/dmcc13_benchmark.py +372 -0
- junifer/datagrabber/hcp1200/datalad_hcp1200.py +5 -0
- junifer/datagrabber/hcp1200/hcp1200.py +30 -13
- junifer/datagrabber/pattern.py +133 -27
- junifer/datagrabber/pattern_datalad.py +111 -13
- junifer/datagrabber/tests/test_base.py +57 -6
- junifer/datagrabber/tests/test_datagrabber_utils.py +204 -76
- junifer/datagrabber/tests/test_datalad_base.py +0 -6
- junifer/datagrabber/tests/test_dmcc13_benchmark.py +256 -0
- junifer/datagrabber/tests/test_multiple.py +43 -10
- junifer/datagrabber/tests/test_pattern.py +125 -178
- junifer/datagrabber/tests/test_pattern_datalad.py +44 -25
- junifer/datagrabber/utils.py +151 -16
- junifer/datareader/default.py +36 -10
- junifer/external/nilearn/junifer_nifti_spheres_masker.py +6 -0
- junifer/markers/base.py +25 -16
- junifer/markers/collection.py +35 -16
- junifer/markers/complexity/__init__.py +27 -0
- junifer/markers/complexity/complexity_base.py +149 -0
- junifer/markers/complexity/hurst_exponent.py +136 -0
- junifer/markers/complexity/multiscale_entropy_auc.py +140 -0
- junifer/markers/complexity/perm_entropy.py +132 -0
- junifer/markers/complexity/range_entropy.py +136 -0
- junifer/markers/complexity/range_entropy_auc.py +145 -0
- junifer/markers/complexity/sample_entropy.py +134 -0
- junifer/markers/complexity/tests/test_complexity_base.py +19 -0
- junifer/markers/complexity/tests/test_hurst_exponent.py +69 -0
- junifer/markers/complexity/tests/test_multiscale_entropy_auc.py +68 -0
- junifer/markers/complexity/tests/test_perm_entropy.py +68 -0
- junifer/markers/complexity/tests/test_range_entropy.py +69 -0
- junifer/markers/complexity/tests/test_range_entropy_auc.py +69 -0
- junifer/markers/complexity/tests/test_sample_entropy.py +68 -0
- junifer/markers/complexity/tests/test_weighted_perm_entropy.py +68 -0
- junifer/markers/complexity/weighted_perm_entropy.py +133 -0
- junifer/markers/falff/_afni_falff.py +153 -0
- junifer/markers/falff/_junifer_falff.py +142 -0
- junifer/markers/falff/falff_base.py +91 -84
- junifer/markers/falff/falff_parcels.py +61 -45
- junifer/markers/falff/falff_spheres.py +64 -48
- junifer/markers/falff/tests/test_falff_parcels.py +89 -121
- junifer/markers/falff/tests/test_falff_spheres.py +92 -127
- junifer/markers/functional_connectivity/crossparcellation_functional_connectivity.py +1 -0
- junifer/markers/functional_connectivity/edge_functional_connectivity_parcels.py +1 -0
- junifer/markers/functional_connectivity/functional_connectivity_base.py +1 -0
- junifer/markers/functional_connectivity/tests/test_crossparcellation_functional_connectivity.py +46 -44
- junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_parcels.py +34 -39
- junifer/markers/functional_connectivity/tests/test_edge_functional_connectivity_spheres.py +40 -52
- junifer/markers/functional_connectivity/tests/test_functional_connectivity_parcels.py +62 -70
- junifer/markers/functional_connectivity/tests/test_functional_connectivity_spheres.py +99 -85
- junifer/markers/parcel_aggregation.py +60 -38
- junifer/markers/reho/_afni_reho.py +192 -0
- junifer/markers/reho/_junifer_reho.py +281 -0
- junifer/markers/reho/reho_base.py +69 -34
- junifer/markers/reho/reho_parcels.py +26 -16
- junifer/markers/reho/reho_spheres.py +23 -9
- junifer/markers/reho/tests/test_reho_parcels.py +93 -92
- junifer/markers/reho/tests/test_reho_spheres.py +88 -86
- junifer/markers/sphere_aggregation.py +54 -9
- junifer/markers/temporal_snr/temporal_snr_base.py +1 -0
- junifer/markers/temporal_snr/tests/test_temporal_snr_parcels.py +38 -37
- junifer/markers/temporal_snr/tests/test_temporal_snr_spheres.py +34 -38
- junifer/markers/tests/test_collection.py +43 -42
- junifer/markers/tests/test_ets_rss.py +29 -37
- junifer/markers/tests/test_parcel_aggregation.py +587 -468
- junifer/markers/tests/test_sphere_aggregation.py +209 -157
- junifer/markers/utils.py +2 -40
- junifer/onthefly/read_transform.py +13 -6
- junifer/pipeline/__init__.py +1 -0
- junifer/pipeline/pipeline_step_mixin.py +105 -41
- junifer/pipeline/registry.py +17 -0
- junifer/pipeline/singleton.py +45 -0
- junifer/pipeline/tests/test_pipeline_step_mixin.py +139 -51
- junifer/pipeline/tests/test_update_meta_mixin.py +1 -0
- junifer/pipeline/tests/test_workdir_manager.py +104 -0
- junifer/pipeline/update_meta_mixin.py +8 -2
- junifer/pipeline/utils.py +154 -15
- junifer/pipeline/workdir_manager.py +246 -0
- junifer/preprocess/__init__.py +3 -0
- junifer/preprocess/ants/__init__.py +4 -0
- junifer/preprocess/ants/ants_apply_transforms_warper.py +185 -0
- junifer/preprocess/ants/tests/test_ants_apply_transforms_warper.py +56 -0
- junifer/preprocess/base.py +96 -69
- junifer/preprocess/bold_warper.py +265 -0
- junifer/preprocess/confounds/fmriprep_confound_remover.py +91 -134
- junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +106 -111
- junifer/preprocess/fsl/__init__.py +4 -0
- junifer/preprocess/fsl/apply_warper.py +179 -0
- junifer/preprocess/fsl/tests/test_apply_warper.py +45 -0
- junifer/preprocess/tests/test_bold_warper.py +159 -0
- junifer/preprocess/tests/test_preprocess_base.py +6 -6
- junifer/preprocess/warping/__init__.py +6 -0
- junifer/preprocess/warping/_ants_warper.py +167 -0
- junifer/preprocess/warping/_fsl_warper.py +109 -0
- junifer/preprocess/warping/space_warper.py +213 -0
- junifer/preprocess/warping/tests/test_space_warper.py +198 -0
- junifer/stats.py +18 -4
- junifer/storage/base.py +9 -1
- junifer/storage/hdf5.py +8 -3
- junifer/storage/pandas_base.py +2 -1
- junifer/storage/sqlite.py +1 -0
- junifer/storage/tests/test_hdf5.py +2 -1
- junifer/storage/tests/test_sqlite.py +8 -8
- junifer/storage/tests/test_utils.py +6 -6
- junifer/storage/utils.py +1 -0
- junifer/testing/datagrabbers.py +11 -7
- junifer/testing/utils.py +1 -0
- junifer/tests/test_stats.py +2 -0
- junifer/utils/__init__.py +1 -0
- junifer/utils/helpers.py +53 -0
- junifer/utils/logging.py +14 -3
- junifer/utils/tests/test_helpers.py +35 -0
- {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/METADATA +59 -28
- junifer-0.0.4.dist-info/RECORD +257 -0
- {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/WHEEL +1 -1
- junifer/markers/falff/falff_estimator.py +0 -334
- junifer/markers/falff/tests/test_falff_estimator.py +0 -238
- junifer/markers/reho/reho_estimator.py +0 -515
- junifer/markers/reho/tests/test_reho_estimator.py +0 -260
- junifer-0.0.3.dev188.dist-info/RECORD +0 -199
- {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/AUTHORS.rst +0 -0
- {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/LICENSE.md +0 -0
- {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/entry_points.txt +0 -0
- {junifer-0.0.3.dev188.dist-info → junifer-0.0.4.dist-info}/top_level.txt +0 -0
junifer/datagrabber/utils.py
CHANGED
@@ -6,7 +6,60 @@
|
|
6
6
|
|
7
7
|
from typing import Dict, List
|
8
8
|
|
9
|
-
from ..utils import raise_error
|
9
|
+
from ..utils import logger, raise_error
|
10
|
+
|
11
|
+
|
12
|
+
# Define schema for pattern-based datagrabber's patterns
|
13
|
+
PATTERNS_SCHEMA = {
|
14
|
+
"T1w": {
|
15
|
+
"mandatory": ["pattern", "space"],
|
16
|
+
"optional": ["mask_item"],
|
17
|
+
},
|
18
|
+
"T1w_mask": {
|
19
|
+
"mandatory": ["pattern", "space"],
|
20
|
+
"optional": [],
|
21
|
+
},
|
22
|
+
"T2w": {
|
23
|
+
"mandatory": ["pattern", "space"],
|
24
|
+
"optional": ["mask_item"],
|
25
|
+
},
|
26
|
+
"T2w_mask": {
|
27
|
+
"mandatory": ["pattern", "space"],
|
28
|
+
"optional": [],
|
29
|
+
},
|
30
|
+
"BOLD": {
|
31
|
+
"mandatory": ["pattern", "space"],
|
32
|
+
"optional": ["mask_item"],
|
33
|
+
},
|
34
|
+
"BOLD_confounds": {
|
35
|
+
"mandatory": ["pattern", "format"],
|
36
|
+
"optional": [],
|
37
|
+
},
|
38
|
+
"BOLD_mask": {
|
39
|
+
"mandatory": ["pattern", "space"],
|
40
|
+
"optional": [],
|
41
|
+
},
|
42
|
+
"Warp": {
|
43
|
+
"mandatory": ["pattern", "src", "dst"],
|
44
|
+
"optional": [],
|
45
|
+
},
|
46
|
+
"VBM_GM": {
|
47
|
+
"mandatory": ["pattern", "space"],
|
48
|
+
"optional": [],
|
49
|
+
},
|
50
|
+
"VBM_WM": {
|
51
|
+
"mandatory": ["pattern", "space"],
|
52
|
+
"optional": [],
|
53
|
+
},
|
54
|
+
"VBM_CSF": {
|
55
|
+
"mandatory": ["pattern", "space"],
|
56
|
+
"optional": [],
|
57
|
+
},
|
58
|
+
"DWI": {
|
59
|
+
"mandatory": ["pattern"],
|
60
|
+
"optional": [],
|
61
|
+
},
|
62
|
+
}
|
10
63
|
|
11
64
|
|
12
65
|
def validate_types(types: List[str]) -> None:
|
@@ -17,6 +70,11 @@ def validate_types(types: List[str]) -> None:
|
|
17
70
|
types : list of str
|
18
71
|
The object to validate.
|
19
72
|
|
73
|
+
Raises
|
74
|
+
------
|
75
|
+
TypeError
|
76
|
+
If ``types`` is not a list or if the values are not string.
|
77
|
+
|
20
78
|
"""
|
21
79
|
if not isinstance(types, list):
|
22
80
|
raise_error(msg="`types` must be a list", klass=TypeError)
|
@@ -25,7 +83,7 @@ def validate_types(types: List[str]) -> None:
|
|
25
83
|
|
26
84
|
|
27
85
|
def validate_replacements(
|
28
|
-
replacements: List[str], patterns: Dict[str, str]
|
86
|
+
replacements: List[str], patterns: Dict[str, Dict[str, str]]
|
29
87
|
) -> None:
|
30
88
|
"""Validate the replacements.
|
31
89
|
|
@@ -36,32 +94,44 @@ def validate_replacements(
|
|
36
94
|
patterns : dict
|
37
95
|
The patterns to validate against.
|
38
96
|
|
97
|
+
Raises
|
98
|
+
------
|
99
|
+
TypeError
|
100
|
+
If ``replacements`` is not a list or if the values are not string.
|
101
|
+
ValueError
|
102
|
+
If a value in ``replacements`` is not part of a data type pattern or
|
103
|
+
if no data type patterns contain all values in ``replacements``.
|
104
|
+
|
39
105
|
"""
|
40
106
|
if not isinstance(replacements, list):
|
41
107
|
raise_error(msg="`replacements` must be a list.", klass=TypeError)
|
42
108
|
|
43
|
-
if not isinstance(patterns, dict):
|
44
|
-
raise_error(msg="`patterns` must be a dict.", klass=TypeError)
|
45
|
-
|
46
109
|
if any(not isinstance(x, str) for x in replacements):
|
47
110
|
raise_error(
|
48
111
|
msg="`replacements` must be a list of strings.", klass=TypeError
|
49
112
|
)
|
50
113
|
|
51
114
|
for x in replacements:
|
52
|
-
if all(
|
53
|
-
|
115
|
+
if all(
|
116
|
+
x not in y
|
117
|
+
for y in [
|
118
|
+
data_type_val["pattern"] for data_type_val in patterns.values()
|
119
|
+
]
|
120
|
+
):
|
121
|
+
raise_error(msg=f"Replacement: {x} is not part of any pattern.")
|
54
122
|
|
55
123
|
# Check that at least one pattern has all the replacements
|
56
124
|
at_least_one = False
|
57
|
-
for
|
58
|
-
if all(x in
|
125
|
+
for data_type_val in patterns.values():
|
126
|
+
if all(x in data_type_val["pattern"] for x in replacements):
|
59
127
|
at_least_one = True
|
60
128
|
if at_least_one is False:
|
61
129
|
raise_error(msg="At least one pattern must contain all replacements.")
|
62
130
|
|
63
131
|
|
64
|
-
def validate_patterns(
|
132
|
+
def validate_patterns(
|
133
|
+
types: List[str], patterns: Dict[str, Dict[str, str]]
|
134
|
+
) -> None:
|
65
135
|
"""Validate the patterns.
|
66
136
|
|
67
137
|
Parameters
|
@@ -71,6 +141,20 @@ def validate_patterns(types: List[str], patterns: Dict[str, str]) -> None:
|
|
71
141
|
patterns : dict
|
72
142
|
The object to validate.
|
73
143
|
|
144
|
+
Raises
|
145
|
+
------
|
146
|
+
KeyError
|
147
|
+
If any mandatory key is missing for a data type.
|
148
|
+
RuntimeError
|
149
|
+
If an unknown key is found for a data type.
|
150
|
+
TypeError
|
151
|
+
If ``patterns`` is not a dictionary.
|
152
|
+
ValueError
|
153
|
+
If length of ``types`` and ``patterns`` are different or
|
154
|
+
if ``patterns`` is missing entries from ``types`` or
|
155
|
+
if unknown data type is found in ``patterns`` or
|
156
|
+
if data type pattern key contains '*' as value.
|
157
|
+
|
74
158
|
"""
|
75
159
|
# Validate the types
|
76
160
|
validate_types(types)
|
@@ -87,9 +171,60 @@ def validate_patterns(types: List[str], patterns: Dict[str, str]) -> None:
|
|
87
171
|
raise_error(
|
88
172
|
msg="`patterns` must contain all `types`", klass=ValueError
|
89
173
|
)
|
90
|
-
#
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
174
|
+
# Check against schema
|
175
|
+
for data_type_key, data_type_val in patterns.items():
|
176
|
+
# Check if valid data type is provided
|
177
|
+
if data_type_key not in PATTERNS_SCHEMA:
|
178
|
+
raise_error(
|
179
|
+
f"Unknown data type: {data_type_key}, "
|
180
|
+
f"should be one of: {list(PATTERNS_SCHEMA.keys())}"
|
181
|
+
)
|
182
|
+
# Check mandatory keys for data type
|
183
|
+
for mandatory_key in PATTERNS_SCHEMA[data_type_key]["mandatory"]:
|
184
|
+
if mandatory_key not in data_type_val:
|
185
|
+
raise_error(
|
186
|
+
msg=(
|
187
|
+
f"Mandatory key: `{mandatory_key}` missing for "
|
188
|
+
f"{data_type_key}"
|
189
|
+
),
|
190
|
+
klass=KeyError,
|
191
|
+
)
|
192
|
+
else:
|
193
|
+
logger.debug(
|
194
|
+
f"Mandatory key: `{mandatory_key}` found for "
|
195
|
+
f"{data_type_key}"
|
196
|
+
)
|
197
|
+
# Check optional keys for data type
|
198
|
+
for optional_key in PATTERNS_SCHEMA[data_type_key]["optional"]:
|
199
|
+
if optional_key not in data_type_val:
|
200
|
+
logger.debug(
|
201
|
+
f"Optional key: `{optional_key}` missing for "
|
202
|
+
f"{data_type_key}"
|
203
|
+
)
|
204
|
+
else:
|
205
|
+
logger.debug(
|
206
|
+
f"Optional key: `{optional_key}` found for "
|
207
|
+
f"{data_type_key}"
|
208
|
+
)
|
209
|
+
# Check stray key for data type
|
210
|
+
for key in data_type_val.keys():
|
211
|
+
if key not in (
|
212
|
+
PATTERNS_SCHEMA[data_type_key]["mandatory"]
|
213
|
+
+ PATTERNS_SCHEMA[data_type_key]["optional"]
|
214
|
+
):
|
215
|
+
raise_error(
|
216
|
+
msg=(
|
217
|
+
f"Key: {key} not accepted for {data_type_key} "
|
218
|
+
"pattern, remove it to proceed"
|
219
|
+
),
|
220
|
+
klass=RuntimeError,
|
221
|
+
)
|
222
|
+
# Wildcard check in patterns
|
223
|
+
if "}*" in data_type_val["pattern"]:
|
224
|
+
raise_error(
|
225
|
+
msg=(
|
226
|
+
f"`{data_type_key}.pattern` must not contain `*` "
|
227
|
+
"following a replacement"
|
228
|
+
),
|
229
|
+
klass=ValueError,
|
230
|
+
)
|
junifer/datareader/default.py
CHANGED
@@ -53,23 +53,22 @@ class DefaultDataReader(PipelineStepMixin, UpdateMetaMixin):
|
|
53
53
|
# Nothing to validate, any input is fine
|
54
54
|
return input
|
55
55
|
|
56
|
-
def get_output_type(self,
|
56
|
+
def get_output_type(self, input_type: str) -> str:
|
57
57
|
"""Get output type.
|
58
58
|
|
59
59
|
Parameters
|
60
60
|
----------
|
61
|
-
|
62
|
-
The input to the reader.
|
63
|
-
available Junifer Data dictionary keys.
|
61
|
+
input_type : str
|
62
|
+
The data type input to the reader.
|
64
63
|
|
65
64
|
Returns
|
66
65
|
-------
|
67
|
-
|
68
|
-
The
|
66
|
+
str
|
67
|
+
The data type output by the reader.
|
69
68
|
|
70
69
|
"""
|
71
70
|
# It will output the same type of data as the input
|
72
|
-
return
|
71
|
+
return input_type
|
73
72
|
|
74
73
|
def _fit_transform(
|
75
74
|
self,
|
@@ -91,42 +90,69 @@ class DefaultDataReader(PipelineStepMixin, UpdateMetaMixin):
|
|
91
90
|
The processed output as dictionary. The "data" key is added to
|
92
91
|
each data type dictionary.
|
93
92
|
|
93
|
+
Warns
|
94
|
+
-----
|
95
|
+
RuntimeWarning
|
96
|
+
If input data type has no key called ``"path"``.
|
97
|
+
|
94
98
|
"""
|
95
|
-
#
|
99
|
+
# Copy input to not modify the original
|
96
100
|
out = input.copy()
|
101
|
+
# Set default extra parameters
|
97
102
|
if params is None:
|
98
103
|
params = {}
|
104
|
+
# For each type of data, try to read it
|
99
105
|
for type_ in input.keys():
|
106
|
+
# Skip Warp data type
|
107
|
+
if type_ == "Warp":
|
108
|
+
continue
|
109
|
+
|
110
|
+
# Check for malformed datagrabber specification
|
100
111
|
if "path" not in input[type_]:
|
101
112
|
warn_with_log(
|
102
113
|
f"Input type {type_} does not provide a path. Skipping."
|
103
114
|
)
|
104
115
|
continue
|
116
|
+
|
117
|
+
# Retrieve actual path
|
105
118
|
t_path = input[type_]["path"]
|
119
|
+
# Retrieve loading params for the data type
|
106
120
|
t_params = params.get(type_, {})
|
107
121
|
|
108
|
-
# Convert to Path
|
122
|
+
# Convert str to Path
|
109
123
|
if not isinstance(t_path, Path):
|
110
124
|
t_path = Path(t_path)
|
111
125
|
out[type_]["path"] = t_path
|
126
|
+
|
112
127
|
logger.info(f"Reading {type_} from {t_path.as_posix()}")
|
128
|
+
# Initialize variable for file data
|
113
129
|
fread = None
|
114
|
-
|
130
|
+
# Lowercase path
|
115
131
|
fname = t_path.name.lower()
|
132
|
+
# Loop through extensions to find the correct one
|
116
133
|
for ext, ftype in _extensions.items():
|
117
134
|
if fname.endswith(ext):
|
118
135
|
logger.info(f"{type_} is type {ftype}")
|
136
|
+
# Retrieve reader function
|
119
137
|
reader_func = _readers[ftype]["func"]
|
138
|
+
# Retrieve reader function params
|
120
139
|
reader_params = _readers[ftype]["params"]
|
140
|
+
# Update reader function params
|
121
141
|
if reader_params is not None:
|
122
142
|
t_params.update(reader_params)
|
123
143
|
logger.debug(f"Calling {reader_func} with {t_params}")
|
144
|
+
# Read data
|
124
145
|
fread = reader_func(t_path, **t_params)
|
125
146
|
break
|
147
|
+
# If no file data is found due to unknown extension
|
126
148
|
if fread is None:
|
127
149
|
logger.info(
|
128
150
|
f"Unknown file type {t_path.as_posix()}, skipping reading"
|
129
151
|
)
|
152
|
+
|
153
|
+
# Set file data for output
|
130
154
|
out[type_]["data"] = fread
|
155
|
+
# Update metadata for step
|
131
156
|
self.update_meta(out[type_], "datareader")
|
157
|
+
|
132
158
|
return out
|
@@ -98,6 +98,7 @@ def _apply_mask_and_get_affinity(
|
|
98
98
|
A : scipy.sparse.lil_matrix
|
99
99
|
Contains the boolean indices for each sphere.
|
100
100
|
shape: (number of seeds, number of voxels)
|
101
|
+
|
101
102
|
"""
|
102
103
|
seeds = list(seeds)
|
103
104
|
|
@@ -204,6 +205,7 @@ def _iter_signals_from_spheres(
|
|
204
205
|
mask_img : Niimg-like object, optional
|
205
206
|
See :ref:`extracting_data`.
|
206
207
|
Mask to apply to regions before extracting signals.
|
208
|
+
|
207
209
|
"""
|
208
210
|
X, A = _apply_mask_and_get_affinity(
|
209
211
|
seeds, niimg, radius, allow_overlap, mask_img=mask_img
|
@@ -397,6 +399,10 @@ class JuniferNiftiSpheresMasker(NiftiSpheresMasker):
|
|
397
399
|
|
398
400
|
params = get_params(NiftiSpheresMasker, self)
|
399
401
|
|
402
|
+
# New in nilearn 0.10.1
|
403
|
+
if hasattr(self, "clean_kwargs"):
|
404
|
+
params["clean_kwargs"] = self.clean_kwargs
|
405
|
+
|
400
406
|
signals, _ = self._cache(
|
401
407
|
_filter_and_extract, ignore=["verbose", "memory", "memory_level"]
|
402
408
|
)(
|
junifer/markers/base.py
CHANGED
@@ -27,6 +27,11 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
27
27
|
The name of the marker. By default, it will use the class name as the
|
28
28
|
name of the marker (default None).
|
29
29
|
|
30
|
+
Raises
|
31
|
+
------
|
32
|
+
ValueError
|
33
|
+
If required input data type(s) is(are) not found.
|
34
|
+
|
30
35
|
"""
|
31
36
|
|
32
37
|
def __init__(
|
@@ -34,31 +39,20 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
34
39
|
on: Optional[Union[List[str], str]] = None,
|
35
40
|
name: Optional[str] = None,
|
36
41
|
) -> None:
|
42
|
+
# Use all data types if not provided
|
37
43
|
if on is None:
|
38
44
|
on = self.get_valid_inputs()
|
45
|
+
# Convert data types to list
|
39
46
|
if not isinstance(on, list):
|
40
47
|
on = [on]
|
48
|
+
# Set default name if not provided
|
41
49
|
self.name = self.__class__.__name__ if name is None else name
|
42
|
-
|
50
|
+
# Check if required inputs are found
|
43
51
|
if any(x not in self.get_valid_inputs() for x in on):
|
44
52
|
wrong_on = [x for x in on if x not in self.get_valid_inputs()]
|
45
|
-
|
53
|
+
raise_error(f"{self.name} cannot be computed on {wrong_on}")
|
46
54
|
self._on = on
|
47
55
|
|
48
|
-
@abstractmethod
|
49
|
-
def get_valid_inputs(self) -> List[str]:
|
50
|
-
"""Get valid data types for input.
|
51
|
-
|
52
|
-
Returns
|
53
|
-
-------
|
54
|
-
list of str
|
55
|
-
The list of data types that can be used as input for this marker.
|
56
|
-
"""
|
57
|
-
raise_error(
|
58
|
-
msg="Concrete classes need to implement get_valid_inputs().",
|
59
|
-
klass=NotImplementedError,
|
60
|
-
)
|
61
|
-
|
62
56
|
def validate_input(self, input: List[str]) -> List[str]:
|
63
57
|
"""Validate input.
|
64
58
|
|
@@ -88,6 +82,21 @@ class BaseMarker(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
88
82
|
)
|
89
83
|
return [x for x in self._on if x in input]
|
90
84
|
|
85
|
+
@abstractmethod
|
86
|
+
def get_valid_inputs(self) -> List[str]:
|
87
|
+
"""Get valid data types for input.
|
88
|
+
|
89
|
+
Returns
|
90
|
+
-------
|
91
|
+
list of str
|
92
|
+
The list of data types that can be used as input for this marker.
|
93
|
+
|
94
|
+
"""
|
95
|
+
raise_error(
|
96
|
+
msg="Concrete classes need to implement get_valid_inputs().",
|
97
|
+
klass=NotImplementedError,
|
98
|
+
)
|
99
|
+
|
91
100
|
@abstractmethod
|
92
101
|
def get_output_type(self, input_type: str) -> str:
|
93
102
|
"""Get output type.
|
junifer/markers/collection.py
CHANGED
@@ -9,9 +9,10 @@ from typing import TYPE_CHECKING, Dict, List, Optional
|
|
9
9
|
|
10
10
|
from ..datareader.default import DefaultDataReader
|
11
11
|
from ..markers.base import BaseMarker
|
12
|
-
from ..pipeline import PipelineStepMixin
|
12
|
+
from ..pipeline import PipelineStepMixin, WorkDirManager
|
13
|
+
from ..preprocess.base import BasePreprocessor
|
13
14
|
from ..storage.base import BaseFeatureStorage
|
14
|
-
from ..utils import logger
|
15
|
+
from ..utils import logger, raise_error
|
15
16
|
|
16
17
|
|
17
18
|
if TYPE_CHECKING:
|
@@ -27,25 +28,30 @@ class MarkerCollection:
|
|
27
28
|
The markers to compute.
|
28
29
|
datareader : DataReader-like object, optional
|
29
30
|
The DataReader to use (default None).
|
30
|
-
|
31
|
-
The
|
31
|
+
preprocessors : list of preprocessing-like, optional
|
32
|
+
The preprocessors to apply (default None).
|
32
33
|
storage : storage-like, optional
|
33
34
|
The storage to use (default None).
|
34
35
|
|
36
|
+
Raises
|
37
|
+
------
|
38
|
+
ValueError
|
39
|
+
If ``markers`` have same names.
|
40
|
+
|
35
41
|
"""
|
36
42
|
|
37
43
|
def __init__(
|
38
44
|
self,
|
39
45
|
markers: List[BaseMarker],
|
40
46
|
datareader: Optional[PipelineStepMixin] = None,
|
41
|
-
|
47
|
+
preprocessors: Optional[List[BasePreprocessor]] = None,
|
42
48
|
storage: Optional[BaseFeatureStorage] = None,
|
43
49
|
):
|
44
50
|
# Check that the markers have different names
|
45
51
|
marker_names = [m.name for m in markers]
|
46
52
|
if len(set(marker_names)) != len(marker_names):
|
47
53
|
counts = Counter(marker_names)
|
48
|
-
|
54
|
+
raise_error(
|
49
55
|
"Markers must have different names. "
|
50
56
|
f"Current names are: {counts}"
|
51
57
|
)
|
@@ -53,7 +59,7 @@ class MarkerCollection:
|
|
53
59
|
if datareader is None:
|
54
60
|
datareader = DefaultDataReader()
|
55
61
|
self._datareader = datareader
|
56
|
-
self.
|
62
|
+
self._preprocessors = preprocessors
|
57
63
|
self._storage = storage
|
58
64
|
|
59
65
|
def fit(self, input: Dict[str, Dict]) -> Optional[Dict]:
|
@@ -79,9 +85,14 @@ class MarkerCollection:
|
|
79
85
|
data = self._datareader.fit_transform(input)
|
80
86
|
|
81
87
|
# Apply preprocessing steps
|
82
|
-
if self.
|
83
|
-
|
84
|
-
|
88
|
+
if self._preprocessors is not None:
|
89
|
+
for preprocessor in self._preprocessors:
|
90
|
+
logger.info(
|
91
|
+
"Preprocessing data with "
|
92
|
+
f"{preprocessor.__class__.__name__}"
|
93
|
+
)
|
94
|
+
# Mutate data after every iteration
|
95
|
+
data = preprocessor.fit_transform(data)
|
85
96
|
|
86
97
|
# Compute markers
|
87
98
|
out = {}
|
@@ -92,15 +103,18 @@ class MarkerCollection:
|
|
92
103
|
out[marker.name] = m_value
|
93
104
|
logger.info("Marker collection fitting done")
|
94
105
|
|
106
|
+
# Cleanup element directory
|
107
|
+
WorkDirManager().cleanup_elementdir()
|
108
|
+
|
95
109
|
return None if self._storage else out
|
96
110
|
|
97
111
|
def validate(self, datagrabber: "BaseDataGrabber") -> None:
|
98
112
|
"""Validate the pipeline.
|
99
113
|
|
100
114
|
Without doing any computation, check if the marker collection can
|
101
|
-
be
|
115
|
+
be fitted without problems i.e., the data required for each marker is
|
102
116
|
present and streamed down the steps. Also, if a storage is configured,
|
103
|
-
check that the storage can handle the markers output.
|
117
|
+
check that the storage can handle the markers' output.
|
104
118
|
|
105
119
|
Parameters
|
106
120
|
----------
|
@@ -116,10 +130,15 @@ class MarkerCollection:
|
|
116
130
|
t_data = self._datareader.validate(t_data)
|
117
131
|
logger.info(f"Data Reader output type: {t_data}")
|
118
132
|
|
119
|
-
if self.
|
120
|
-
|
121
|
-
|
122
|
-
|
133
|
+
if self._preprocessors is not None:
|
134
|
+
for preprocessor in self._preprocessors:
|
135
|
+
logger.info(
|
136
|
+
"Validating Preprocessor: "
|
137
|
+
f"{preprocessor.__class__.__name__}"
|
138
|
+
)
|
139
|
+
# Validate preprocessor
|
140
|
+
t_data = preprocessor.validate(t_data)
|
141
|
+
logger.info(f"Preprocess output type: {t_data}")
|
123
142
|
|
124
143
|
for marker in self._markers:
|
125
144
|
logger.info(f"Validating Marker: {marker.name}")
|
@@ -0,0 +1,27 @@
|
|
1
|
+
"""Provide imports for complexity sub-package."""
|
2
|
+
|
3
|
+
# Authors: Amir Omidvarnia <a.omidvarnia@fz-juelich.de>
|
4
|
+
# Synchon Mandal <s.mandal@fz-juelich.de>
|
5
|
+
# License: AGPL
|
6
|
+
|
7
|
+
|
8
|
+
from importlib.util import find_spec
|
9
|
+
|
10
|
+
from ..utils import raise_error
|
11
|
+
|
12
|
+
|
13
|
+
# Check if neurokit2 is found
|
14
|
+
if find_spec("neurokit2") is None:
|
15
|
+
raise_error(
|
16
|
+
msg="Could not find `neurokit2`, make sure the package is installed.",
|
17
|
+
klass=ImportError,
|
18
|
+
)
|
19
|
+
else:
|
20
|
+
# Import markers
|
21
|
+
from .hurst_exponent import HurstExponent
|
22
|
+
from .range_entropy import RangeEntropy
|
23
|
+
from .range_entropy_auc import RangeEntropyAUC
|
24
|
+
from .perm_entropy import PermEntropy
|
25
|
+
from .weighted_perm_entropy import WeightedPermEntropy
|
26
|
+
from .sample_entropy import SampleEntropy
|
27
|
+
from .multiscale_entropy_auc import MultiscaleEntropyAUC
|