junifer 0.0.5.dev68__py3-none-any.whl → 0.0.5.dev93__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (36) hide show
  1. junifer/_version.py +2 -2
  2. junifer/api/functions.py +1 -1
  3. junifer/configs/juseless/datagrabbers/tests/test_ucla.py +1 -3
  4. junifer/configs/juseless/datagrabbers/ucla.py +9 -9
  5. junifer/data/masks.py +10 -22
  6. junifer/data/parcellations.py +1 -1
  7. junifer/data/tests/test_masks.py +8 -28
  8. junifer/datagrabber/aomic/id1000.py +34 -38
  9. junifer/datagrabber/aomic/piop1.py +33 -37
  10. junifer/datagrabber/aomic/piop2.py +35 -39
  11. junifer/datagrabber/aomic/tests/test_id1000.py +10 -11
  12. junifer/datagrabber/aomic/tests/test_piop1.py +10 -11
  13. junifer/datagrabber/aomic/tests/test_piop2.py +10 -11
  14. junifer/datagrabber/datalad_base.py +10 -1
  15. junifer/datagrabber/dmcc13_benchmark.py +36 -54
  16. junifer/datagrabber/pattern.py +116 -46
  17. junifer/datagrabber/pattern_datalad.py +22 -12
  18. junifer/datagrabber/tests/test_datagrabber_utils.py +15 -9
  19. junifer/datagrabber/tests/test_dmcc13_benchmark.py +46 -19
  20. junifer/datagrabber/utils.py +127 -54
  21. junifer/datareader/default.py +91 -42
  22. junifer/preprocess/base.py +2 -2
  23. junifer/preprocess/confounds/fmriprep_confound_remover.py +44 -60
  24. junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +72 -113
  25. junifer/storage/base.py +37 -1
  26. junifer/storage/hdf5.py +68 -9
  27. junifer/storage/tests/test_hdf5.py +82 -10
  28. junifer/testing/datagrabbers.py +5 -5
  29. junifer/testing/tests/test_partlycloudytesting_datagrabber.py +7 -7
  30. {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev93.dist-info}/METADATA +1 -1
  31. {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev93.dist-info}/RECORD +36 -36
  32. {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev93.dist-info}/AUTHORS.rst +0 -0
  33. {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev93.dist-info}/LICENSE.md +0 -0
  34. {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev93.dist-info}/WHEEL +0 -0
  35. {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev93.dist-info}/entry_points.txt +0 -0
  36. {junifer-0.0.5.dev68.dist-info → junifer-0.0.5.dev93.dist-info}/top_level.txt +0 -0
@@ -13,51 +13,45 @@ from ..utils import logger, raise_error
13
13
  PATTERNS_SCHEMA = {
14
14
  "T1w": {
15
15
  "mandatory": ["pattern", "space"],
16
- "optional": ["mask_item"],
17
- },
18
- "T1w_mask": {
19
- "mandatory": ["pattern", "space"],
20
- "optional": [],
16
+ "optional": {
17
+ "mask": {"mandatory": ["pattern", "space"], "optional": []},
18
+ },
21
19
  },
22
20
  "T2w": {
23
21
  "mandatory": ["pattern", "space"],
24
- "optional": ["mask_item"],
25
- },
26
- "T2w_mask": {
27
- "mandatory": ["pattern", "space"],
28
- "optional": [],
22
+ "optional": {
23
+ "mask": {"mandatory": ["pattern", "space"], "optional": []},
24
+ },
29
25
  },
30
26
  "BOLD": {
31
27
  "mandatory": ["pattern", "space"],
32
- "optional": ["mask_item"],
33
- },
34
- "BOLD_confounds": {
35
- "mandatory": ["pattern", "format"],
36
- "optional": [],
37
- },
38
- "BOLD_mask": {
39
- "mandatory": ["pattern", "space"],
40
- "optional": [],
28
+ "optional": {
29
+ "mask": {"mandatory": ["pattern", "space"], "optional": []},
30
+ "confounds": {
31
+ "mandatory": ["pattern", "format"],
32
+ "optional": ["mappings"],
33
+ },
34
+ },
41
35
  },
42
36
  "Warp": {
43
37
  "mandatory": ["pattern", "src", "dst"],
44
- "optional": [],
38
+ "optional": {},
45
39
  },
46
40
  "VBM_GM": {
47
41
  "mandatory": ["pattern", "space"],
48
- "optional": [],
42
+ "optional": {},
49
43
  },
50
44
  "VBM_WM": {
51
45
  "mandatory": ["pattern", "space"],
52
- "optional": [],
46
+ "optional": {},
53
47
  },
54
48
  "VBM_CSF": {
55
49
  "mandatory": ["pattern", "space"],
56
- "optional": [],
50
+ "optional": {},
57
51
  },
58
52
  "DWI": {
59
53
  "mandatory": ["pattern"],
60
- "optional": [],
54
+ "optional": {},
61
55
  },
62
56
  }
63
57
 
@@ -129,6 +123,67 @@ def validate_replacements(
129
123
  raise_error(msg="At least one pattern must contain all replacements.")
130
124
 
131
125
 
126
+ def _validate_mandatory_keys(
127
+ keys: List[str], schema: List[str], data_type: str
128
+ ) -> None:
129
+ """Validate mandatory keys.
130
+
131
+ Parameters
132
+ ----------
133
+ keys : list of str
134
+ The keys to validate.
135
+ schema : list of str
136
+ The schema to validate against.
137
+ data_type : str
138
+ The data type being validated.
139
+
140
+ Raises
141
+ ------
142
+ KeyError
143
+ If any mandatory key is missing for a data type.
144
+
145
+ """
146
+ for key in schema:
147
+ if key not in keys:
148
+ raise_error(
149
+ msg=f"Mandatory key: `{key}` missing for {data_type}",
150
+ klass=KeyError,
151
+ )
152
+ else:
153
+ logger.debug(f"Mandatory key: `{key}` found for {data_type}")
154
+
155
+
156
+ def _identify_stray_keys(
157
+ keys: List[str], schema: List[str], data_type: str
158
+ ) -> None:
159
+ """Identify stray keys.
160
+
161
+ Parameters
162
+ ----------
163
+ keys : list of str
164
+ The keys to check.
165
+ schema : list of str
166
+ The schema to check against.
167
+ data_type : str
168
+ The data type being checked.
169
+
170
+ Raises
171
+ ------
172
+ RuntimeError
173
+ If an unknown key is found for a data type.
174
+
175
+ """
176
+ for key in keys:
177
+ if key not in schema:
178
+ raise_error(
179
+ msg=(
180
+ f"Key: {key} not accepted for {data_type} "
181
+ "pattern, remove it to proceed"
182
+ ),
183
+ klass=RuntimeError,
184
+ )
185
+
186
+
132
187
  def validate_patterns(
133
188
  types: List[str], patterns: Dict[str, Dict[str, str]]
134
189
  ) -> None:
@@ -143,10 +198,6 @@ def validate_patterns(
143
198
 
144
199
  Raises
145
200
  ------
146
- KeyError
147
- If any mandatory key is missing for a data type.
148
- RuntimeError
149
- If an unknown key is found for a data type.
150
201
  TypeError
151
202
  If ``patterns`` is not a dictionary.
152
203
  ValueError
@@ -180,22 +231,15 @@ def validate_patterns(
180
231
  f"should be one of: {list(PATTERNS_SCHEMA.keys())}"
181
232
  )
182
233
  # Check mandatory keys for data type
183
- for mandatory_key in PATTERNS_SCHEMA[data_type_key]["mandatory"]:
184
- if mandatory_key not in data_type_val:
185
- raise_error(
186
- msg=(
187
- f"Mandatory key: `{mandatory_key}` missing for "
188
- f"{data_type_key}"
189
- ),
190
- klass=KeyError,
191
- )
192
- else:
193
- logger.debug(
194
- f"Mandatory key: `{mandatory_key}` found for "
195
- f"{data_type_key}"
196
- )
234
+ _validate_mandatory_keys(
235
+ keys=list(data_type_val),
236
+ schema=PATTERNS_SCHEMA[data_type_key]["mandatory"],
237
+ data_type=data_type_key,
238
+ )
197
239
  # Check optional keys for data type
198
- for optional_key in PATTERNS_SCHEMA[data_type_key]["optional"]:
240
+ for optional_key, optional_val in PATTERNS_SCHEMA[data_type_key][
241
+ "optional"
242
+ ].items():
199
243
  if optional_key not in data_type_val:
200
244
  logger.debug(
201
245
  f"Optional key: `{optional_key}` missing for "
@@ -206,19 +250,48 @@ def validate_patterns(
206
250
  f"Optional key: `{optional_key}` found for "
207
251
  f"{data_type_key}"
208
252
  )
253
+ # Set nested type name for easier access
254
+ nested_data_type = f"{data_type_key}.{optional_key}"
255
+ nested_mandatory_keys_schema = PATTERNS_SCHEMA[data_type_key][
256
+ "optional"
257
+ ][optional_key]["mandatory"]
258
+ nested_optional_keys_schema = PATTERNS_SCHEMA[data_type_key][
259
+ "optional"
260
+ ][optional_key]["optional"]
261
+ # Check mandatory keys for nested type
262
+ _validate_mandatory_keys(
263
+ keys=list(optional_val["mandatory"]),
264
+ schema=nested_mandatory_keys_schema,
265
+ data_type=nested_data_type,
266
+ )
267
+ # Check optional keys for nested type
268
+ for nested_optional_key in nested_optional_keys_schema:
269
+ if nested_optional_key not in optional_val["optional"]:
270
+ logger.debug(
271
+ f"Optional key: `{nested_optional_key}` missing "
272
+ f"for {nested_data_type}"
273
+ )
274
+ else:
275
+ logger.debug(
276
+ f"Optional key: `{nested_optional_key}` found for "
277
+ f"{nested_data_type}"
278
+ )
279
+ # Check stray key for nested data type
280
+ _identify_stray_keys(
281
+ keys=optional_val["mandatory"] + optional_val["optional"],
282
+ schema=nested_mandatory_keys_schema
283
+ + nested_optional_keys_schema,
284
+ data_type=nested_data_type,
285
+ )
209
286
  # Check stray key for data type
210
- for key in data_type_val.keys():
211
- if key not in (
287
+ _identify_stray_keys(
288
+ keys=list(data_type_val.keys()),
289
+ schema=(
212
290
  PATTERNS_SCHEMA[data_type_key]["mandatory"]
213
- + PATTERNS_SCHEMA[data_type_key]["optional"]
214
- ):
215
- raise_error(
216
- msg=(
217
- f"Key: {key} not accepted for {data_type_key} "
218
- "pattern, remove it to proceed"
219
- ),
220
- klass=RuntimeError,
221
- )
291
+ + list(PATTERNS_SCHEMA[data_type_key]["optional"].keys())
292
+ ),
293
+ data_type=data_type_key,
294
+ )
222
295
  # Wildcard check in patterns
223
296
  if "}*" in data_type_val["pattern"]:
224
297
  raise_error(
@@ -5,7 +5,7 @@
5
5
  # License: AGPL
6
6
 
7
7
  from pathlib import Path
8
- from typing import Dict, List, Optional
8
+ from typing import Dict, List, Optional, Union
9
9
 
10
10
  import nibabel as nib
11
11
  import pandas as pd
@@ -102,57 +102,106 @@ class DefaultDataReader(PipelineStepMixin, UpdateMetaMixin):
102
102
  if params is None:
103
103
  params = {}
104
104
  # For each type of data, try to read it
105
- for type_ in input.keys():
105
+ for type_key, type_val in input.items():
106
106
  # Skip Warp data type
107
- if type_ == "Warp":
107
+ if type_key == "Warp":
108
108
  continue
109
109
 
110
110
  # Check for malformed datagrabber specification
111
- if "path" not in input[type_]:
111
+ if "path" not in type_val:
112
112
  warn_with_log(
113
- f"Input type {type_} does not provide a path. Skipping."
113
+ f"Input type {type_key} does not provide a path. Skipping."
114
114
  )
115
115
  continue
116
116
 
117
- # Retrieve actual path
118
- t_path = input[type_]["path"]
119
- # Retrieve loading params for the data type
120
- t_params = params.get(type_, {})
121
-
122
- # Convert str to Path
123
- if not isinstance(t_path, Path):
124
- t_path = Path(t_path)
125
- out[type_]["path"] = t_path
126
-
127
- logger.info(f"Reading {type_} from {t_path.as_posix()}")
128
- # Initialize variable for file data
129
- fread = None
130
- # Lowercase path
131
- fname = t_path.name.lower()
132
- # Loop through extensions to find the correct one
133
- for ext, ftype in _extensions.items():
134
- if fname.endswith(ext):
135
- logger.info(f"{type_} is type {ftype}")
136
- # Retrieve reader function
137
- reader_func = _readers[ftype]["func"]
138
- # Retrieve reader function params
139
- reader_params = _readers[ftype]["params"]
140
- # Update reader function params
141
- if reader_params is not None:
142
- t_params.update(reader_params)
143
- logger.debug(f"Calling {reader_func} with {t_params}")
117
+ # Iterate to check for nested "types" like mask;
118
+ # need to copy to avoid runtime error for changing dict size
119
+ for k, v in type_val.copy().items():
120
+ # Read data for base data type
121
+ if k == "path":
122
+ # Convert str to Path
123
+ if not isinstance(v, Path):
124
+ v = Path(v)
125
+ # Update path
126
+ out[type_key]["path"] = v
127
+ logger.info(f"Reading {type_key} from {v.absolute()!s}")
128
+ # Retrieve loading params for the data type
129
+ t_params = params.get(type_key, {})
144
130
  # Read data
145
- fread = reader_func(t_path, **t_params)
146
- break
147
- # If no file data is found due to unknown extension
148
- if fread is None:
149
- logger.info(
150
- f"Unknown file type {t_path.as_posix()}, skipping reading"
151
- )
131
+ out[type_key]["data"] = _read_data(
132
+ data_type=type_key, path=v, read_params=t_params
133
+ )
134
+ # Read data for nested data type
135
+ if isinstance(v, dict) and "path" in v:
136
+ # Set path
137
+ nested_path = v["path"]
138
+ # Convert str to Path
139
+ if not isinstance(nested_path, Path):
140
+ nested_path = Path(nested_path)
141
+ # Update path
142
+ out[type_key][k]["path"] = nested_path
143
+ # Set nested type key for easier access
144
+ nested_type = f"{type_key}.{k}"
145
+ logger.info(
146
+ f"Reading {nested_type} from "
147
+ f"{nested_path.absolute()!s}"
148
+ )
149
+ # Retrieve loading params for the nested data type
150
+ nested_params = params.get(nested_type, {})
151
+ # Read data
152
+ out[type_key][k]["data"] = _read_data(
153
+ data_type=nested_type,
154
+ path=nested_path,
155
+ read_params=nested_params,
156
+ )
152
157
 
153
- # Set file data for output
154
- out[type_]["data"] = fread
155
158
  # Update metadata for step
156
- self.update_meta(out[type_], "datareader")
159
+ self.update_meta(out[type_key], "datareader")
157
160
 
158
161
  return out
162
+
163
+
164
+ def _read_data(
165
+ data_type: str, path: Path, read_params: Dict
166
+ ) -> Union[nib.Nifti1Image, pd.DataFrame, None]:
167
+ """Read data for data type.
168
+
169
+ Parameters
170
+ ----------
171
+ data_type : str
172
+ The data type being read.
173
+ path : pathlib.Path
174
+ The path to read data from.
175
+ read_params : dict
176
+ Parameters for reader function.
177
+
178
+ Returns
179
+ -------
180
+ nibabel.Nifti1Image or pandas.DataFrame or pandas.TextFileReader or None
181
+ The data loaded in memory if file type is known else None.
182
+
183
+ """
184
+ # Initialize variable for file data
185
+ fread = None
186
+ # Lowercase path
187
+ fname = path.name.lower()
188
+ # Loop through extensions to find the correct one
189
+ for ext, ftype in _extensions.items():
190
+ if fname.endswith(ext):
191
+ logger.info(f"{data_type} is of type {ftype}")
192
+ # Retrieve reader function
193
+ reader_func = _readers[ftype]["func"]
194
+ # Retrieve reader function params
195
+ reader_params = _readers[ftype]["params"]
196
+ # Update reader function params
197
+ if reader_params is not None:
198
+ read_params.update(reader_params)
199
+ logger.debug(f"Calling {reader_func!s} with {read_params}")
200
+ # Read data
201
+ fread = reader_func(path, **read_params)
202
+ break
203
+ # If no file data is found due to unknown extension
204
+ if fread is None:
205
+ logger.info(f"Unknown file type {path.absolute()!s}, skipping reading")
206
+
207
+ return fread
@@ -146,8 +146,8 @@ class BasePreprocessor(ABC, PipelineStepMixin, UpdateMetaMixin):
146
146
  The computed result as dictionary.
147
147
  dict or None
148
148
  Extra "helper" data types as dictionary to add to the Junifer Data
149
- object. For example, computed BOLD mask can be passed via this.
150
- If no new "helper" data types is created, None is to be passed.
149
+ object. If no new "helper" data type(s) is(are) created, None is to
150
+ be passed.
151
151
 
152
152
  """
153
153
  raise_error(
@@ -203,9 +203,7 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
203
203
  "include it in the future",
204
204
  klass=ValueError,
205
205
  )
206
- super().__init__(
207
- on="BOLD", required_data_types=["BOLD", "BOLD_confounds"]
208
- )
206
+ super().__init__(on="BOLD", required_data_types=["BOLD"])
209
207
 
210
208
  def get_valid_inputs(self) -> List[str]:
211
209
  """Get valid data types for input.
@@ -361,7 +359,7 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
361
359
  Parameters
362
360
  ----------
363
361
  input : dict
364
- Dictionary containing the ``BOLD_confounds`` value from the
362
+ Dictionary containing the ``BOLD.confounds`` value from the
365
363
  Junifer Data object.
366
364
 
367
365
  Returns
@@ -370,7 +368,6 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
370
368
  Dataframe containing the relevant confounds.
371
369
 
372
370
  """
373
-
374
371
  confounds_format = input["format"]
375
372
  if confounds_format == "adhoc":
376
373
  self._map_adhoc_to_fmriprep(input)
@@ -416,50 +413,42 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
416
413
  def _validate_data(
417
414
  self,
418
415
  input: Dict[str, Any],
419
- extra_input: Optional[Dict[str, Any]] = None,
420
416
  ) -> None:
421
417
  """Validate input data.
422
418
 
423
419
  Parameters
424
420
  ----------
425
421
  input : dict
426
- Dictionary containing the ``BOLD`` value from the
422
+ Dictionary containing the ``BOLD`` data from the
427
423
  Junifer Data object.
428
- extra_input : dict, optional
429
- Dictionary containing the rest of the Junifer Data object. Must
430
- include the ``BOLD_confounds`` key.
431
424
 
432
425
  Raises
433
426
  ------
434
427
  ValueError
435
- If ``extra_input`` is None or
436
- if ``"BOLD_confounds"`` is not found in ``extra_input`` or
437
- if ``"data"`` key is not found in ``"BOLD_confounds"`` or
438
- if ``"data"`` is not pandas.DataFrame or
428
+ If ``"confounds"`` is not found in ``input`` or
429
+ if ``"data"`` key is not found in ``"input.confounds"`` or
430
+ if ``"input.confounds.data"`` is not pandas.DataFrame or
439
431
  if image time series and confounds have different lengths or
440
- if ``"format"`` is not found in ``"BOLD_confounds"`` or
441
- if ``format = "adhoc"`` and ``"mappings"`` key or ``"fmriprep"``
442
- key or correct fMRIPrep mappings or required fMRIPrep mappings are
443
- not found or if invalid confounds format is found.
432
+ if ``format = "adhoc"`` and ``"mappings"`` key is not found or
433
+ ``"fmriprep"`` key is not found in ``"mappings"`` or
434
+ ``"fmriprep"`` has incorrect fMRIPrep mappings or required
435
+ fMRIPrep mappings are not found or
436
+ if invalid confounds format is found.
444
437
 
445
438
  """
446
439
  # BOLD must be 4D niimg
447
440
  check_niimg_4d(input["data"])
448
- # Check for extra inputs
449
- if extra_input is None:
450
- raise_error(
451
- "No extra input provided, requires `BOLD_confounds` data type "
452
- "in particular"
453
- )
454
- if "BOLD_confounds" not in extra_input:
455
- raise_error("`BOLD_confounds` data type not provided")
456
- if "data" not in extra_input["BOLD_confounds"]:
457
- raise_error("`BOLD_confounds.data` not provided")
458
- # Confounds must be a pandas.DataFrame
459
- if not isinstance(extra_input["BOLD_confounds"]["data"], pd.DataFrame):
460
- raise_error("`BOLD_confounds.data` must be a `pandas.DataFrame`")
461
-
462
- confound_df = extra_input["BOLD_confounds"]["data"]
441
+ # Check for confound data
442
+ if "confounds" not in input:
443
+ raise_error("`BOLD.confounds` data type not provided")
444
+ if "data" not in input["confounds"]:
445
+ raise_error("`BOLD.confounds.data` not provided")
446
+ # Confounds must be a pandas.DataFrame;
447
+ # if extension is unknown, will not be read, which will give None
448
+ if not isinstance(input["confounds"]["data"], pd.DataFrame):
449
+ raise_error("`BOLD.confounds.data` must be a `pandas.DataFrame`")
450
+
451
+ confound_df = input["confounds"]["data"]
463
452
  bold_img = input["data"]
464
453
  if bold_img.get_fdata().shape[3] != len(confound_df):
465
454
  raise_error(
@@ -469,23 +458,19 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
469
458
  )
470
459
 
471
460
  # Check format
472
- if "format" not in extra_input["BOLD_confounds"]:
473
- raise_error("`BOLD_confounds.format` not provided")
474
- t_format = extra_input["BOLD_confounds"]["format"]
461
+ t_format = input["confounds"]["format"]
475
462
  if t_format == "adhoc":
476
- if "mappings" not in extra_input["BOLD_confounds"]:
463
+ if "mappings" not in input["confounds"]:
477
464
  raise_error(
478
- "`BOLD_confounds.mappings` need to be set when "
479
- "`BOLD_confounds.format == 'adhoc'`"
465
+ "`BOLD.confounds.mappings` need to be set when "
466
+ "`BOLD.confounds.format == 'adhoc'`"
480
467
  )
481
- if "fmriprep" not in extra_input["BOLD_confounds"]["mappings"]:
468
+ if "fmriprep" not in input["confounds"]["mappings"]:
482
469
  raise_error(
483
- "`BOLD_confounds.mappings.fmriprep` need to be set when "
484
- "`BOLD_confounds.format == 'adhoc'`"
470
+ "`BOLD.confounds.mappings.fmriprep` need to be set when "
471
+ "`BOLD.confounds.format == 'adhoc'`"
485
472
  )
486
- fmriprep_mappings = extra_input["BOLD_confounds"]["mappings"][
487
- "fmriprep"
488
- ]
473
+ fmriprep_mappings = input["confounds"]["mappings"]["fmriprep"]
489
474
  wrong_names = [
490
475
  x
491
476
  for x in fmriprep_mappings.values()
@@ -525,22 +510,22 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
525
510
  input : dict
526
511
  A single input from the Junifer Data object to preprocess.
527
512
  extra_input : dict, optional
528
- The other fields in the Junifer Data object. Must include the
529
- ``BOLD_confounds`` key.
513
+ The other fields in the Junifer Data object.
530
514
 
531
515
  Returns
532
516
  -------
533
517
  dict
534
- The computed result as dictionary.
535
- dict or None
536
- If `self.masks` is not None, then the target data computed mask is
537
- returned else None.
518
+ The computed result as dictionary. If `self.masks` is not None,
519
+ then the target data computed mask is updated for further steps.
520
+ None
521
+ Extra "helper" data types as dictionary to add to the Junifer Data
522
+ object.
538
523
 
539
524
  """
540
525
  # Validate data
541
- self._validate_data(input, extra_input)
526
+ self._validate_data(input)
542
527
  # Pick confounds
543
- confounds_df = self._pick_confounds(extra_input["BOLD_confounds"]) # type: ignore
528
+ confounds_df = self._pick_confounds(input["confounds"]) # type: ignore
544
529
  # Get BOLD data
545
530
  bold_img = input["data"]
546
531
  # Set t_r
@@ -553,7 +538,6 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
553
538
  )
554
539
  # Set mask data
555
540
  mask_img = None
556
- bold_mask_dict = None
557
541
  if self.masks is not None:
558
542
  logger.debug(f"Masking with {self.masks}")
559
543
  mask_img = get_mask(
@@ -561,15 +545,15 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
561
545
  )
562
546
  # Return the BOLD mask and link it to the BOLD data type dict;
563
547
  # this allows to use "inherit" down the pipeline
564
- if extra_input is not None:
565
- logger.debug("Setting `BOLD.mask_item`")
566
- input["mask_item"] = "BOLD_mask"
567
- bold_mask_dict = {
568
- "BOLD_mask": {
548
+ logger.debug("Setting `BOLD.mask`")
549
+ input.update(
550
+ {
551
+ "mask": {
569
552
  "data": mask_img,
570
553
  "space": input["space"],
571
554
  }
572
555
  }
556
+ )
573
557
  # Clean image
574
558
  logger.info("Cleaning image using nilearn")
575
559
  logger.debug(f"\tdetrend: {self.detrend}")
@@ -587,4 +571,4 @@ class fMRIPrepConfoundRemover(BasePreprocessor):
587
571
  mask_img=mask_img,
588
572
  )
589
573
 
590
- return input, bold_mask_dict
574
+ return input, None