junifer 0.0.5.dev68__py3-none-any.whl → 0.0.5.dev86__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- junifer/_version.py +2 -2
- junifer/api/functions.py +1 -1
- junifer/configs/juseless/datagrabbers/tests/test_ucla.py +1 -3
- junifer/configs/juseless/datagrabbers/ucla.py +9 -9
- junifer/data/masks.py +10 -22
- junifer/data/parcellations.py +1 -1
- junifer/data/tests/test_masks.py +8 -28
- junifer/datagrabber/aomic/id1000.py +34 -38
- junifer/datagrabber/aomic/piop1.py +33 -37
- junifer/datagrabber/aomic/piop2.py +35 -39
- junifer/datagrabber/aomic/tests/test_id1000.py +10 -11
- junifer/datagrabber/aomic/tests/test_piop1.py +10 -11
- junifer/datagrabber/aomic/tests/test_piop2.py +10 -11
- junifer/datagrabber/datalad_base.py +10 -1
- junifer/datagrabber/dmcc13_benchmark.py +36 -54
- junifer/datagrabber/pattern.py +116 -46
- junifer/datagrabber/pattern_datalad.py +22 -12
- junifer/datagrabber/tests/test_datagrabber_utils.py +15 -9
- junifer/datagrabber/tests/test_dmcc13_benchmark.py +46 -19
- junifer/datagrabber/utils.py +127 -54
- junifer/datareader/default.py +91 -42
- junifer/preprocess/base.py +2 -2
- junifer/preprocess/confounds/fmriprep_confound_remover.py +44 -60
- junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +72 -113
- junifer/testing/datagrabbers.py +5 -5
- junifer/testing/tests/test_partlycloudytesting_datagrabber.py +7 -7
- {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev86.dist-info}/METADATA +1 -1
- {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev86.dist-info}/RECORD +33 -33
- {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev86.dist-info}/AUTHORS.rst +0 -0
- {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev86.dist-info}/LICENSE.md +0 -0
- {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev86.dist-info}/WHEEL +0 -0
- {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev86.dist-info}/entry_points.txt +0 -0
- {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev86.dist-info}/top_level.txt +0 -0
junifer/datagrabber/utils.py
CHANGED
@@ -13,51 +13,45 @@ from ..utils import logger, raise_error
|
|
13
13
|
PATTERNS_SCHEMA = {
|
14
14
|
"T1w": {
|
15
15
|
"mandatory": ["pattern", "space"],
|
16
|
-
"optional":
|
17
|
-
|
18
|
-
|
19
|
-
"mandatory": ["pattern", "space"],
|
20
|
-
"optional": [],
|
16
|
+
"optional": {
|
17
|
+
"mask": {"mandatory": ["pattern", "space"], "optional": []},
|
18
|
+
},
|
21
19
|
},
|
22
20
|
"T2w": {
|
23
21
|
"mandatory": ["pattern", "space"],
|
24
|
-
"optional":
|
25
|
-
|
26
|
-
|
27
|
-
"mandatory": ["pattern", "space"],
|
28
|
-
"optional": [],
|
22
|
+
"optional": {
|
23
|
+
"mask": {"mandatory": ["pattern", "space"], "optional": []},
|
24
|
+
},
|
29
25
|
},
|
30
26
|
"BOLD": {
|
31
27
|
"mandatory": ["pattern", "space"],
|
32
|
-
"optional":
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
"mandatory": ["pattern", "space"],
|
40
|
-
"optional": [],
|
28
|
+
"optional": {
|
29
|
+
"mask": {"mandatory": ["pattern", "space"], "optional": []},
|
30
|
+
"confounds": {
|
31
|
+
"mandatory": ["pattern", "format"],
|
32
|
+
"optional": ["mappings"],
|
33
|
+
},
|
34
|
+
},
|
41
35
|
},
|
42
36
|
"Warp": {
|
43
37
|
"mandatory": ["pattern", "src", "dst"],
|
44
|
-
"optional":
|
38
|
+
"optional": {},
|
45
39
|
},
|
46
40
|
"VBM_GM": {
|
47
41
|
"mandatory": ["pattern", "space"],
|
48
|
-
"optional":
|
42
|
+
"optional": {},
|
49
43
|
},
|
50
44
|
"VBM_WM": {
|
51
45
|
"mandatory": ["pattern", "space"],
|
52
|
-
"optional":
|
46
|
+
"optional": {},
|
53
47
|
},
|
54
48
|
"VBM_CSF": {
|
55
49
|
"mandatory": ["pattern", "space"],
|
56
|
-
"optional":
|
50
|
+
"optional": {},
|
57
51
|
},
|
58
52
|
"DWI": {
|
59
53
|
"mandatory": ["pattern"],
|
60
|
-
"optional":
|
54
|
+
"optional": {},
|
61
55
|
},
|
62
56
|
}
|
63
57
|
|
@@ -129,6 +123,67 @@ def validate_replacements(
|
|
129
123
|
raise_error(msg="At least one pattern must contain all replacements.")
|
130
124
|
|
131
125
|
|
126
|
+
def _validate_mandatory_keys(
|
127
|
+
keys: List[str], schema: List[str], data_type: str
|
128
|
+
) -> None:
|
129
|
+
"""Validate mandatory keys.
|
130
|
+
|
131
|
+
Parameters
|
132
|
+
----------
|
133
|
+
keys : list of str
|
134
|
+
The keys to validate.
|
135
|
+
schema : list of str
|
136
|
+
The schema to validate against.
|
137
|
+
data_type : str
|
138
|
+
The data type being validated.
|
139
|
+
|
140
|
+
Raises
|
141
|
+
------
|
142
|
+
KeyError
|
143
|
+
If any mandatory key is missing for a data type.
|
144
|
+
|
145
|
+
"""
|
146
|
+
for key in schema:
|
147
|
+
if key not in keys:
|
148
|
+
raise_error(
|
149
|
+
msg=f"Mandatory key: `{key}` missing for {data_type}",
|
150
|
+
klass=KeyError,
|
151
|
+
)
|
152
|
+
else:
|
153
|
+
logger.debug(f"Mandatory key: `{key}` found for {data_type}")
|
154
|
+
|
155
|
+
|
156
|
+
def _identify_stray_keys(
|
157
|
+
keys: List[str], schema: List[str], data_type: str
|
158
|
+
) -> None:
|
159
|
+
"""Identify stray keys.
|
160
|
+
|
161
|
+
Parameters
|
162
|
+
----------
|
163
|
+
keys : list of str
|
164
|
+
The keys to check.
|
165
|
+
schema : list of str
|
166
|
+
The schema to check against.
|
167
|
+
data_type : str
|
168
|
+
The data type being checked.
|
169
|
+
|
170
|
+
Raises
|
171
|
+
------
|
172
|
+
RuntimeError
|
173
|
+
If an unknown key is found for a data type.
|
174
|
+
|
175
|
+
"""
|
176
|
+
for key in keys:
|
177
|
+
if key not in schema:
|
178
|
+
raise_error(
|
179
|
+
msg=(
|
180
|
+
f"Key: {key} not accepted for {data_type} "
|
181
|
+
"pattern, remove it to proceed"
|
182
|
+
),
|
183
|
+
klass=RuntimeError,
|
184
|
+
)
|
185
|
+
|
186
|
+
|
132
187
|
def validate_patterns(
|
133
188
|
types: List[str], patterns: Dict[str, Dict[str, str]]
|
134
189
|
) -> None:
|
@@ -143,10 +198,6 @@ def validate_patterns(
|
|
143
198
|
|
144
199
|
Raises
|
145
200
|
------
|
146
|
-
KeyError
|
147
|
-
If any mandatory key is missing for a data type.
|
148
|
-
RuntimeError
|
149
|
-
If an unknown key is found for a data type.
|
150
201
|
TypeError
|
151
202
|
If ``patterns`` is not a dictionary.
|
152
203
|
ValueError
|
@@ -180,22 +231,15 @@ def validate_patterns(
|
|
180
231
|
f"should be one of: {list(PATTERNS_SCHEMA.keys())}"
|
181
232
|
)
|
182
233
|
# Check mandatory keys for data type
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
f"{data_type_key}"
|
189
|
-
),
|
190
|
-
klass=KeyError,
|
191
|
-
)
|
192
|
-
else:
|
193
|
-
logger.debug(
|
194
|
-
f"Mandatory key: `{mandatory_key}` found for "
|
195
|
-
f"{data_type_key}"
|
196
|
-
)
|
234
|
+
_validate_mandatory_keys(
|
235
|
+
keys=list(data_type_val),
|
236
|
+
schema=PATTERNS_SCHEMA[data_type_key]["mandatory"],
|
237
|
+
data_type=data_type_key,
|
238
|
+
)
|
197
239
|
# Check optional keys for data type
|
198
|
-
for optional_key in PATTERNS_SCHEMA[data_type_key][
|
240
|
+
for optional_key, optional_val in PATTERNS_SCHEMA[data_type_key][
|
241
|
+
"optional"
|
242
|
+
].items():
|
199
243
|
if optional_key not in data_type_val:
|
200
244
|
logger.debug(
|
201
245
|
f"Optional key: `{optional_key}` missing for "
|
@@ -206,19 +250,48 @@ def validate_patterns(
|
|
206
250
|
f"Optional key: `{optional_key}` found for "
|
207
251
|
f"{data_type_key}"
|
208
252
|
)
|
253
|
+
# Set nested type name for easier access
|
254
|
+
nested_data_type = f"{data_type_key}.{optional_key}"
|
255
|
+
nested_mandatory_keys_schema = PATTERNS_SCHEMA[data_type_key][
|
256
|
+
"optional"
|
257
|
+
][optional_key]["mandatory"]
|
258
|
+
nested_optional_keys_schema = PATTERNS_SCHEMA[data_type_key][
|
259
|
+
"optional"
|
260
|
+
][optional_key]["optional"]
|
261
|
+
# Check mandatory keys for nested type
|
262
|
+
_validate_mandatory_keys(
|
263
|
+
keys=list(optional_val["mandatory"]),
|
264
|
+
schema=nested_mandatory_keys_schema,
|
265
|
+
data_type=nested_data_type,
|
266
|
+
)
|
267
|
+
# Check optional keys for nested type
|
268
|
+
for nested_optional_key in nested_optional_keys_schema:
|
269
|
+
if nested_optional_key not in optional_val["optional"]:
|
270
|
+
logger.debug(
|
271
|
+
f"Optional key: `{nested_optional_key}` missing "
|
272
|
+
f"for {nested_data_type}"
|
273
|
+
)
|
274
|
+
else:
|
275
|
+
logger.debug(
|
276
|
+
f"Optional key: `{nested_optional_key}` found for "
|
277
|
+
f"{nested_data_type}"
|
278
|
+
)
|
279
|
+
# Check stray key for nested data type
|
280
|
+
_identify_stray_keys(
|
281
|
+
keys=optional_val["mandatory"] + optional_val["optional"],
|
282
|
+
schema=nested_mandatory_keys_schema
|
283
|
+
+ nested_optional_keys_schema,
|
284
|
+
data_type=nested_data_type,
|
285
|
+
)
|
209
286
|
# Check stray key for data type
|
210
|
-
|
211
|
-
|
287
|
+
_identify_stray_keys(
|
288
|
+
keys=list(data_type_val.keys()),
|
289
|
+
schema=(
|
212
290
|
PATTERNS_SCHEMA[data_type_key]["mandatory"]
|
213
|
-
+ PATTERNS_SCHEMA[data_type_key]["optional"]
|
214
|
-
)
|
215
|
-
|
216
|
-
|
217
|
-
f"Key: {key} not accepted for {data_type_key} "
|
218
|
-
"pattern, remove it to proceed"
|
219
|
-
),
|
220
|
-
klass=RuntimeError,
|
221
|
-
)
|
291
|
+
+ list(PATTERNS_SCHEMA[data_type_key]["optional"].keys())
|
292
|
+
),
|
293
|
+
data_type=data_type_key,
|
294
|
+
)
|
222
295
|
# Wildcard check in patterns
|
223
296
|
if "}*" in data_type_val["pattern"]:
|
224
297
|
raise_error(
|
junifer/datareader/default.py
CHANGED
@@ -5,7 +5,7 @@
|
|
5
5
|
# License: AGPL
|
6
6
|
|
7
7
|
from pathlib import Path
|
8
|
-
from typing import Dict, List, Optional
|
8
|
+
from typing import Dict, List, Optional, Union
|
9
9
|
|
10
10
|
import nibabel as nib
|
11
11
|
import pandas as pd
|
@@ -102,57 +102,106 @@ class DefaultDataReader(PipelineStepMixin, UpdateMetaMixin):
|
|
102
102
|
if params is None:
|
103
103
|
params = {}
|
104
104
|
# For each type of data, try to read it
|
105
|
-
for
|
105
|
+
for type_key, type_val in input.items():
|
106
106
|
# Skip Warp data type
|
107
|
-
if
|
107
|
+
if type_key == "Warp":
|
108
108
|
continue
|
109
109
|
|
110
110
|
# Check for malformed datagrabber specification
|
111
|
-
if "path" not in
|
111
|
+
if "path" not in type_val:
|
112
112
|
warn_with_log(
|
113
|
-
f"Input type {
|
113
|
+
f"Input type {type_key} does not provide a path. Skipping."
|
114
114
|
)
|
115
115
|
continue
|
116
116
|
|
117
|
-
#
|
118
|
-
|
119
|
-
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
# Lowercase path
|
131
|
-
fname = t_path.name.lower()
|
132
|
-
# Loop through extensions to find the correct one
|
133
|
-
for ext, ftype in _extensions.items():
|
134
|
-
if fname.endswith(ext):
|
135
|
-
logger.info(f"{type_} is type {ftype}")
|
136
|
-
# Retrieve reader function
|
137
|
-
reader_func = _readers[ftype]["func"]
|
138
|
-
# Retrieve reader function params
|
139
|
-
reader_params = _readers[ftype]["params"]
|
140
|
-
# Update reader function params
|
141
|
-
if reader_params is not None:
|
142
|
-
t_params.update(reader_params)
|
143
|
-
logger.debug(f"Calling {reader_func} with {t_params}")
|
117
|
+
# Iterate to check for nested "types" like mask;
|
118
|
+
# need to copy to avoid runtime error for changing dict size
|
119
|
+
for k, v in type_val.copy().items():
|
120
|
+
# Read data for base data type
|
121
|
+
if k == "path":
|
122
|
+
# Convert str to Path
|
123
|
+
if not isinstance(v, Path):
|
124
|
+
v = Path(v)
|
125
|
+
# Update path
|
126
|
+
out[type_key]["path"] = v
|
127
|
+
logger.info(f"Reading {type_key} from {v.absolute()!s}")
|
128
|
+
# Retrieve loading params for the data type
|
129
|
+
t_params = params.get(type_key, {})
|
144
130
|
# Read data
|
145
|
-
|
146
|
-
|
147
|
-
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
131
|
+
out[type_key]["data"] = _read_data(
|
132
|
+
data_type=type_key, path=v, read_params=t_params
|
133
|
+
)
|
134
|
+
# Read data for nested data type
|
135
|
+
if isinstance(v, dict) and "path" in v:
|
136
|
+
# Set path
|
137
|
+
nested_path = v["path"]
|
138
|
+
# Convert str to Path
|
139
|
+
if not isinstance(nested_path, Path):
|
140
|
+
nested_path = Path(nested_path)
|
141
|
+
# Update path
|
142
|
+
out[type_key][k]["path"] = nested_path
|
143
|
+
# Set nested type key for easier access
|
144
|
+
nested_type = f"{type_key}.{k}"
|
145
|
+
logger.info(
|
146
|
+
f"Reading {nested_type} from "
|
147
|
+
f"{nested_path.absolute()!s}"
|
148
|
+
)
|
149
|
+
# Retrieve loading params for the nested data type
|
150
|
+
nested_params = params.get(nested_type, {})
|
151
|
+
# Read data
|
152
|
+
out[type_key][k]["data"] = _read_data(
|
153
|
+
data_type=nested_type,
|
154
|
+
path=nested_path,
|
155
|
+
read_params=nested_params,
|
156
|
+
)
|
152
157
|
|
153
|
-
# Set file data for output
|
154
|
-
out[type_]["data"] = fread
|
155
158
|
# Update metadata for step
|
156
|
-
self.update_meta(out[
|
159
|
+
self.update_meta(out[type_key], "datareader")
|
157
160
|
|
158
161
|
return out
|
162
|
+
|
163
|
+
|
164
|
+
def _read_data(
|
165
|
+
data_type: str, path: Path, read_params: Dict
|
166
|
+
) -> Union[nib.Nifti1Image, pd.DataFrame, None]:
|
167
|
+
"""Read data for data type.
|
168
|
+
|
169
|
+
Parameters
|
170
|
+
----------
|
171
|
+
data_type : str
|
172
|
+
The data type being read.
|
173
|
+
path : pathlib.Path
|
174
|
+
The path to read data from.
|
175
|
+
read_params : dict
|
176
|
+
Parameters for reader function.
|
177
|
+
|
178
|
+
Returns
|
179
|
+
-------
|
180
|
+
nibabel.Nifti1Image or pandas.DataFrame or pandas.TextFileReader or None
|
181
|
+
The data loaded in memory if file type is known else None.
|
182
|
+
|
183
|
+
"""
|
184
|
+
# Initialize variable for file data
|
185
|
+
fread = None
|
186
|
+
# Lowercase path
|
187
|
+
fname = path.name.lower()
|
188
|
+
# Loop through extensions to find the correct one
|
189
|
+
for ext, ftype in _extensions.items():
|
190
|
+
if fname.endswith(ext):
|
191
|
+
logger.info(f"{data_type} is of type {ftype}")
|
192
|
+
# Retrieve reader function
|
193
|
+
reader_func = _readers[ftype]["func"]
|
194
|
+
# Retrieve reader function params
|
195
|
+
reader_params = _readers[ftype]["params"]
|
196
|
+
# Update reader function params
|
197
|
+
if reader_params is not None:
|
198
|
+
read_params.update(reader_params)
|
199
|
+
logger.debug(f"Calling {reader_func!s} with {read_params}")
|
200
|
+
# Read data
|
201
|
+
fread = reader_func(path, **read_params)
|
202
|
+
break
|
203
|
+
# If no file data is found due to unknown extension
|
204
|
+
if fread is None:
|
205
|
+
logger.info(f"Unknown file type {path.absolute()!s}, skipping reading")
|
206
|
+
|
207
|
+
return fread
|
junifer/preprocess/base.py
CHANGED
@@ -146,8 +146,8 @@ class BasePreprocessor(ABC, PipelineStepMixin, UpdateMetaMixin):
|
|
146
146
|
The computed result as dictionary.
|
147
147
|
dict or None
|
148
148
|
Extra "helper" data types as dictionary to add to the Junifer Data
|
149
|
-
object.
|
150
|
-
|
149
|
+
object. If no new "helper" data type(s) is(are) created, None is to
|
150
|
+
be passed.
|
151
151
|
|
152
152
|
"""
|
153
153
|
raise_error(
|
@@ -203,9 +203,7 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
203
203
|
"include it in the future",
|
204
204
|
klass=ValueError,
|
205
205
|
)
|
206
|
-
super().__init__(
|
207
|
-
on="BOLD", required_data_types=["BOLD", "BOLD_confounds"]
|
208
|
-
)
|
206
|
+
super().__init__(on="BOLD", required_data_types=["BOLD"])
|
209
207
|
|
210
208
|
def get_valid_inputs(self) -> List[str]:
|
211
209
|
"""Get valid data types for input.
|
@@ -361,7 +359,7 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
361
359
|
Parameters
|
362
360
|
----------
|
363
361
|
input : dict
|
364
|
-
Dictionary containing the ``
|
362
|
+
Dictionary containing the ``BOLD.confounds`` value from the
|
365
363
|
Junifer Data object.
|
366
364
|
|
367
365
|
Returns
|
@@ -370,7 +368,6 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
370
368
|
Dataframe containing the relevant confounds.
|
371
369
|
|
372
370
|
"""
|
373
|
-
|
374
371
|
confounds_format = input["format"]
|
375
372
|
if confounds_format == "adhoc":
|
376
373
|
self._map_adhoc_to_fmriprep(input)
|
@@ -416,50 +413,42 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
416
413
|
def _validate_data(
|
417
414
|
self,
|
418
415
|
input: Dict[str, Any],
|
419
|
-
extra_input: Optional[Dict[str, Any]] = None,
|
420
416
|
) -> None:
|
421
417
|
"""Validate input data.
|
422
418
|
|
423
419
|
Parameters
|
424
420
|
----------
|
425
421
|
input : dict
|
426
|
-
Dictionary containing the ``BOLD``
|
422
|
+
Dictionary containing the ``BOLD`` data from the
|
427
423
|
Junifer Data object.
|
428
|
-
extra_input : dict, optional
|
429
|
-
Dictionary containing the rest of the Junifer Data object. Must
|
430
|
-
include the ``BOLD_confounds`` key.
|
431
424
|
|
432
425
|
Raises
|
433
426
|
------
|
434
427
|
ValueError
|
435
|
-
If ``
|
436
|
-
if ``"
|
437
|
-
if ``"data"``
|
438
|
-
if ``"data"`` is not pandas.DataFrame or
|
428
|
+
If ``"confounds"`` is not found in ``input`` or
|
429
|
+
if ``"data"`` key is not found in ``"input.confounds"`` or
|
430
|
+
if ``"input.confounds.data"`` is not pandas.DataFrame or
|
439
431
|
if image time series and confounds have different lengths or
|
440
|
-
if ``
|
441
|
-
|
442
|
-
|
443
|
-
not found or
|
432
|
+
if ``format = "adhoc"`` and ``"mappings"`` key is not found or
|
433
|
+
``"fmriprep"`` key is not found in ``"mappings"`` or
|
434
|
+
``"fmriprep"`` has incorrect fMRIPrep mappings or required
|
435
|
+
fMRIPrep mappings are not found or
|
436
|
+
if invalid confounds format is found.
|
444
437
|
|
445
438
|
"""
|
446
439
|
# BOLD must be 4D niimg
|
447
440
|
check_niimg_4d(input["data"])
|
448
|
-
# Check for
|
449
|
-
if
|
450
|
-
raise_error(
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
if
|
455
|
-
|
456
|
-
|
457
|
-
|
458
|
-
|
459
|
-
if not isinstance(extra_input["BOLD_confounds"]["data"], pd.DataFrame):
|
460
|
-
raise_error("`BOLD_confounds.data` must be a `pandas.DataFrame`")
|
461
|
-
|
462
|
-
confound_df = extra_input["BOLD_confounds"]["data"]
|
441
|
+
# Check for confound data
|
442
|
+
if "confounds" not in input:
|
443
|
+
raise_error("`BOLD.confounds` data type not provided")
|
444
|
+
if "data" not in input["confounds"]:
|
445
|
+
raise_error("`BOLD.confounds.data` not provided")
|
446
|
+
# Confounds must be a pandas.DataFrame;
|
447
|
+
# if extension is unknown, will not be read, which will give None
|
448
|
+
if not isinstance(input["confounds"]["data"], pd.DataFrame):
|
449
|
+
raise_error("`BOLD.confounds.data` must be a `pandas.DataFrame`")
|
450
|
+
|
451
|
+
confound_df = input["confounds"]["data"]
|
463
452
|
bold_img = input["data"]
|
464
453
|
if bold_img.get_fdata().shape[3] != len(confound_df):
|
465
454
|
raise_error(
|
@@ -469,23 +458,19 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
469
458
|
)
|
470
459
|
|
471
460
|
# Check format
|
472
|
-
|
473
|
-
raise_error("`BOLD_confounds.format` not provided")
|
474
|
-
t_format = extra_input["BOLD_confounds"]["format"]
|
461
|
+
t_format = input["confounds"]["format"]
|
475
462
|
if t_format == "adhoc":
|
476
|
-
if "mappings" not in
|
463
|
+
if "mappings" not in input["confounds"]:
|
477
464
|
raise_error(
|
478
|
-
"`
|
479
|
-
"`
|
465
|
+
"`BOLD.confounds.mappings` need to be set when "
|
466
|
+
"`BOLD.confounds.format == 'adhoc'`"
|
480
467
|
)
|
481
|
-
if "fmriprep" not in
|
468
|
+
if "fmriprep" not in input["confounds"]["mappings"]:
|
482
469
|
raise_error(
|
483
|
-
"`
|
484
|
-
"`
|
470
|
+
"`BOLD.confounds.mappings.fmriprep` need to be set when "
|
471
|
+
"`BOLD.confounds.format == 'adhoc'`"
|
485
472
|
)
|
486
|
-
fmriprep_mappings =
|
487
|
-
"fmriprep"
|
488
|
-
]
|
473
|
+
fmriprep_mappings = input["confounds"]["mappings"]["fmriprep"]
|
489
474
|
wrong_names = [
|
490
475
|
x
|
491
476
|
for x in fmriprep_mappings.values()
|
@@ -525,22 +510,22 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
525
510
|
input : dict
|
526
511
|
A single input from the Junifer Data object to preprocess.
|
527
512
|
extra_input : dict, optional
|
528
|
-
The other fields in the Junifer Data object.
|
529
|
-
``BOLD_confounds`` key.
|
513
|
+
The other fields in the Junifer Data object.
|
530
514
|
|
531
515
|
Returns
|
532
516
|
-------
|
533
517
|
dict
|
534
|
-
The computed result as dictionary.
|
535
|
-
|
536
|
-
|
537
|
-
|
518
|
+
The computed result as dictionary. If `self.masks` is not None,
|
519
|
+
then the target data computed mask is updated for further steps.
|
520
|
+
None
|
521
|
+
Extra "helper" data types as dictionary to add to the Junifer Data
|
522
|
+
object.
|
538
523
|
|
539
524
|
"""
|
540
525
|
# Validate data
|
541
|
-
self._validate_data(input
|
526
|
+
self._validate_data(input)
|
542
527
|
# Pick confounds
|
543
|
-
confounds_df = self._pick_confounds(
|
528
|
+
confounds_df = self._pick_confounds(input["confounds"]) # type: ignore
|
544
529
|
# Get BOLD data
|
545
530
|
bold_img = input["data"]
|
546
531
|
# Set t_r
|
@@ -553,7 +538,6 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
553
538
|
)
|
554
539
|
# Set mask data
|
555
540
|
mask_img = None
|
556
|
-
bold_mask_dict = None
|
557
541
|
if self.masks is not None:
|
558
542
|
logger.debug(f"Masking with {self.masks}")
|
559
543
|
mask_img = get_mask(
|
@@ -561,15 +545,15 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
561
545
|
)
|
562
546
|
# Return the BOLD mask and link it to the BOLD data type dict;
|
563
547
|
# this allows to use "inherit" down the pipeline
|
564
|
-
|
565
|
-
|
566
|
-
|
567
|
-
|
568
|
-
"BOLD_mask": {
|
548
|
+
logger.debug("Setting `BOLD.mask`")
|
549
|
+
input.update(
|
550
|
+
{
|
551
|
+
"mask": {
|
569
552
|
"data": mask_img,
|
570
553
|
"space": input["space"],
|
571
554
|
}
|
572
555
|
}
|
556
|
+
)
|
573
557
|
# Clean image
|
574
558
|
logger.info("Cleaning image using nilearn")
|
575
559
|
logger.debug(f"\tdetrend: {self.detrend}")
|
@@ -587,4 +571,4 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
|
|
587
571
|
mask_img=mask_img,
|
588
572
|
)
|
589
573
|
|
590
|
-
return input,
|
574
|
+
return input, None
|