junifer 0.0.5.dev62__py3-none-any.whl → 0.0.5.dev86__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. junifer/_version.py +2 -2
  2. junifer/api/cli.py +26 -0
  3. junifer/api/functions.py +1 -1
  4. junifer/api/res/freesurfer/mri_binarize +3 -0
  5. junifer/api/res/freesurfer/mri_mc +3 -0
  6. junifer/api/res/freesurfer/mri_pretess +3 -0
  7. junifer/api/res/freesurfer/mris_convert +3 -0
  8. junifer/api/res/freesurfer/run_freesurfer_docker.sh +61 -0
  9. junifer/configs/juseless/datagrabbers/tests/test_ucla.py +1 -3
  10. junifer/configs/juseless/datagrabbers/ucla.py +9 -9
  11. junifer/data/masks.py +10 -22
  12. junifer/data/parcellations.py +1 -1
  13. junifer/data/tests/test_masks.py +8 -28
  14. junifer/datagrabber/aomic/id1000.py +34 -38
  15. junifer/datagrabber/aomic/piop1.py +33 -37
  16. junifer/datagrabber/aomic/piop2.py +35 -39
  17. junifer/datagrabber/aomic/tests/test_id1000.py +10 -11
  18. junifer/datagrabber/aomic/tests/test_piop1.py +10 -11
  19. junifer/datagrabber/aomic/tests/test_piop2.py +10 -11
  20. junifer/datagrabber/datalad_base.py +10 -1
  21. junifer/datagrabber/dmcc13_benchmark.py +36 -54
  22. junifer/datagrabber/pattern.py +116 -46
  23. junifer/datagrabber/pattern_datalad.py +22 -12
  24. junifer/datagrabber/tests/test_datagrabber_utils.py +15 -9
  25. junifer/datagrabber/tests/test_dmcc13_benchmark.py +46 -19
  26. junifer/datagrabber/utils.py +127 -54
  27. junifer/datareader/default.py +91 -42
  28. junifer/pipeline/utils.py +64 -1
  29. junifer/preprocess/base.py +2 -2
  30. junifer/preprocess/confounds/fmriprep_confound_remover.py +44 -60
  31. junifer/preprocess/confounds/tests/test_fmriprep_confound_remover.py +72 -113
  32. junifer/testing/datagrabbers.py +5 -5
  33. junifer/testing/tests/test_partlycloudytesting_datagrabber.py +7 -7
  34. {junifer-0.0.5.dev62.dist-info → junifer-0.0.5.dev86.dist-info}/METADATA +1 -1
  35. {junifer-0.0.5.dev62.dist-info → junifer-0.0.5.dev86.dist-info}/RECORD +40 -35
  36. {junifer-0.0.5.dev62.dist-info → junifer-0.0.5.dev86.dist-info}/AUTHORS.rst +0 -0
  37. {junifer-0.0.5.dev62.dist-info → junifer-0.0.5.dev86.dist-info}/LICENSE.md +0 -0
  38. {junifer-0.0.5.dev62.dist-info → junifer-0.0.5.dev86.dist-info}/WHEEL +0 -0
  39. {junifer-0.0.5.dev62.dist-info → junifer-0.0.5.dev86.dist-info}/entry_points.txt +0 -0
  40. {junifer-0.0.5.dev62.dist-info → junifer-0.0.5.dev86.dist-info}/top_level.txt +0 -0
@@ -93,13 +93,10 @@ def test_DMCC13Benchmark(
93
93
  # Available data types
94
94
  data_types = [
95
95
  "BOLD",
96
- "BOLD_confounds",
97
- "BOLD_mask",
98
96
  "VBM_CSF",
99
97
  "VBM_GM",
100
98
  "VBM_WM",
101
99
  "T1w",
102
- "T1w_mask",
103
100
  ]
104
101
  # Add Warp if native T1w is accessed
105
102
  if native_t1w:
@@ -111,14 +108,6 @@ def test_DMCC13Benchmark(
111
108
  f"sub-01_{ses}_task-{task}_acq-mb4{phase}_run-{run}_"
112
109
  "space-MNI152NLin2009cAsym_desc-preproc_bold.nii.gz"
113
110
  ),
114
- (
115
- f"sub-01_{ses}_task-{task}_acq-mb4{phase}_run-{run}_"
116
- "desc-confounds_regressors.tsv"
117
- ),
118
- (
119
- f"sub-01_{ses}_task-{task}_acq-mb4{phase}_run-{run}_"
120
- "space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz"
121
- ),
122
111
  "sub-01_space-MNI152NLin2009cAsym_label-CSF_probseg.nii.gz",
123
112
  "sub-01_space-MNI152NLin2009cAsym_label-GM_probseg.nii.gz",
124
113
  "sub-01_space-MNI152NLin2009cAsym_label-WM_probseg.nii.gz",
@@ -127,16 +116,12 @@ def test_DMCC13Benchmark(
127
116
  data_file_names.extend(
128
117
  [
129
118
  "sub-01_desc-preproc_T1w.nii.gz",
130
- "sub-01_desc-brain_mask.nii.gz",
131
119
  "sub-01_from-MNI152NLin2009cAsym_to-T1w_mode-image_xfm.h5",
132
120
  ]
133
121
  )
134
122
  else:
135
- data_file_names.extend(
136
- [
137
- "sub-01_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz",
138
- "sub-01_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz",
139
- ]
123
+ data_file_names.append(
124
+ "sub-01_space-MNI152NLin2009cAsym_desc-preproc_T1w.nii.gz"
140
125
  )
141
126
 
142
127
  for data_type, data_file_name in zip(data_types, data_file_names):
@@ -151,6 +136,48 @@ def test_DMCC13Benchmark(
151
136
  # Assert metadata
152
137
  assert "meta" in out[data_type]
153
138
 
139
+ # Check BOLD nested data types
140
+ for type_, file_name in zip(
141
+ ("mask", "confounds"),
142
+ (
143
+ (
144
+ f"sub-01_{ses}_task-{task}_acq-mb4{phase}_run-{run}_"
145
+ "space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz"
146
+ ),
147
+ (
148
+ f"sub-01_{ses}_task-{task}_acq-mb4{phase}_run-{run}_"
149
+ "desc-confounds_regressors.tsv"
150
+ ),
151
+ ),
152
+ ):
153
+ # Assert data type
154
+ assert type_ in out["BOLD"]
155
+ # Assert data file path exists
156
+ assert out["BOLD"][type_]["path"].exists()
157
+ # Assert data file path is a file
158
+ assert out["BOLD"][type_]["path"].is_file()
159
+ # Assert data file name
160
+ assert out["BOLD"][type_]["path"].name == file_name
161
+
162
+ # Check T1w nested data types
163
+ # Assert data type
164
+ assert "mask" in out["T1w"]
165
+ # Assert data file path exists
166
+ assert out["T1w"]["mask"]["path"].exists()
167
+ # Assert data file path is a file
168
+ assert out["T1w"]["mask"]["path"].is_file()
169
+ # Assert data file name
170
+ if native_t1w:
171
+ assert (
172
+ out["T1w"]["mask"]["path"].name
173
+ == "sub-01_desc-brain_mask.nii.gz"
174
+ )
175
+ else:
176
+ assert (
177
+ out["T1w"]["mask"]["path"].name
178
+ == "sub-01_space-MNI152NLin2009cAsym_desc-brain_mask.nii.gz"
179
+ )
180
+
154
181
 
155
182
  @pytest.mark.parametrize(
156
183
  "types, native_t1w",
@@ -165,8 +192,8 @@ def test_DMCC13Benchmark(
165
192
  ("VBM_GM", False),
166
193
  ("VBM_WM", True),
167
194
  ("VBM_WM", False),
168
- (["BOLD", "BOLD_confounds"], True),
169
- (["BOLD", "BOLD_confounds"], False),
195
+ (["BOLD", "VBM_CSF"], True),
196
+ (["BOLD", "VBM_CSF"], False),
170
197
  (["T1w", "VBM_CSF"], True),
171
198
  (["T1w", "VBM_CSF"], False),
172
199
  (["VBM_GM", "VBM_WM"], True),
@@ -13,51 +13,45 @@ from ..utils import logger, raise_error
13
13
  PATTERNS_SCHEMA = {
14
14
  "T1w": {
15
15
  "mandatory": ["pattern", "space"],
16
- "optional": ["mask_item"],
17
- },
18
- "T1w_mask": {
19
- "mandatory": ["pattern", "space"],
20
- "optional": [],
16
+ "optional": {
17
+ "mask": {"mandatory": ["pattern", "space"], "optional": []},
18
+ },
21
19
  },
22
20
  "T2w": {
23
21
  "mandatory": ["pattern", "space"],
24
- "optional": ["mask_item"],
25
- },
26
- "T2w_mask": {
27
- "mandatory": ["pattern", "space"],
28
- "optional": [],
22
+ "optional": {
23
+ "mask": {"mandatory": ["pattern", "space"], "optional": []},
24
+ },
29
25
  },
30
26
  "BOLD": {
31
27
  "mandatory": ["pattern", "space"],
32
- "optional": ["mask_item"],
33
- },
34
- "BOLD_confounds": {
35
- "mandatory": ["pattern", "format"],
36
- "optional": [],
37
- },
38
- "BOLD_mask": {
39
- "mandatory": ["pattern", "space"],
40
- "optional": [],
28
+ "optional": {
29
+ "mask": {"mandatory": ["pattern", "space"], "optional": []},
30
+ "confounds": {
31
+ "mandatory": ["pattern", "format"],
32
+ "optional": ["mappings"],
33
+ },
34
+ },
41
35
  },
42
36
  "Warp": {
43
37
  "mandatory": ["pattern", "src", "dst"],
44
- "optional": [],
38
+ "optional": {},
45
39
  },
46
40
  "VBM_GM": {
47
41
  "mandatory": ["pattern", "space"],
48
- "optional": [],
42
+ "optional": {},
49
43
  },
50
44
  "VBM_WM": {
51
45
  "mandatory": ["pattern", "space"],
52
- "optional": [],
46
+ "optional": {},
53
47
  },
54
48
  "VBM_CSF": {
55
49
  "mandatory": ["pattern", "space"],
56
- "optional": [],
50
+ "optional": {},
57
51
  },
58
52
  "DWI": {
59
53
  "mandatory": ["pattern"],
60
- "optional": [],
54
+ "optional": {},
61
55
  },
62
56
  }
63
57
 
@@ -129,6 +123,67 @@ def validate_replacements(
129
123
  raise_error(msg="At least one pattern must contain all replacements.")
130
124
 
131
125
 
126
+ def _validate_mandatory_keys(
127
+ keys: List[str], schema: List[str], data_type: str
128
+ ) -> None:
129
+ """Validate mandatory keys.
130
+
131
+ Parameters
132
+ ----------
133
+ keys : list of str
134
+ The keys to validate.
135
+ schema : list of str
136
+ The schema to validate against.
137
+ data_type : str
138
+ The data type being validated.
139
+
140
+ Raises
141
+ ------
142
+ KeyError
143
+ If any mandatory key is missing for a data type.
144
+
145
+ """
146
+ for key in schema:
147
+ if key not in keys:
148
+ raise_error(
149
+ msg=f"Mandatory key: `{key}` missing for {data_type}",
150
+ klass=KeyError,
151
+ )
152
+ else:
153
+ logger.debug(f"Mandatory key: `{key}` found for {data_type}")
154
+
155
+
156
+ def _identify_stray_keys(
157
+ keys: List[str], schema: List[str], data_type: str
158
+ ) -> None:
159
+ """Identify stray keys.
160
+
161
+ Parameters
162
+ ----------
163
+ keys : list of str
164
+ The keys to check.
165
+ schema : list of str
166
+ The schema to check against.
167
+ data_type : str
168
+ The data type being checked.
169
+
170
+ Raises
171
+ ------
172
+ RuntimeError
173
+ If an unknown key is found for a data type.
174
+
175
+ """
176
+ for key in keys:
177
+ if key not in schema:
178
+ raise_error(
179
+ msg=(
180
+ f"Key: {key} not accepted for {data_type} "
181
+ "pattern, remove it to proceed"
182
+ ),
183
+ klass=RuntimeError,
184
+ )
185
+
186
+
132
187
  def validate_patterns(
133
188
  types: List[str], patterns: Dict[str, Dict[str, str]]
134
189
  ) -> None:
@@ -143,10 +198,6 @@ def validate_patterns(
143
198
 
144
199
  Raises
145
200
  ------
146
- KeyError
147
- If any mandatory key is missing for a data type.
148
- RuntimeError
149
- If an unknown key is found for a data type.
150
201
  TypeError
151
202
  If ``patterns`` is not a dictionary.
152
203
  ValueError
@@ -180,22 +231,15 @@ def validate_patterns(
180
231
  f"should be one of: {list(PATTERNS_SCHEMA.keys())}"
181
232
  )
182
233
  # Check mandatory keys for data type
183
- for mandatory_key in PATTERNS_SCHEMA[data_type_key]["mandatory"]:
184
- if mandatory_key not in data_type_val:
185
- raise_error(
186
- msg=(
187
- f"Mandatory key: `{mandatory_key}` missing for "
188
- f"{data_type_key}"
189
- ),
190
- klass=KeyError,
191
- )
192
- else:
193
- logger.debug(
194
- f"Mandatory key: `{mandatory_key}` found for "
195
- f"{data_type_key}"
196
- )
234
+ _validate_mandatory_keys(
235
+ keys=list(data_type_val),
236
+ schema=PATTERNS_SCHEMA[data_type_key]["mandatory"],
237
+ data_type=data_type_key,
238
+ )
197
239
  # Check optional keys for data type
198
- for optional_key in PATTERNS_SCHEMA[data_type_key]["optional"]:
240
+ for optional_key, optional_val in PATTERNS_SCHEMA[data_type_key][
241
+ "optional"
242
+ ].items():
199
243
  if optional_key not in data_type_val:
200
244
  logger.debug(
201
245
  f"Optional key: `{optional_key}` missing for "
@@ -206,19 +250,48 @@ def validate_patterns(
206
250
  f"Optional key: `{optional_key}` found for "
207
251
  f"{data_type_key}"
208
252
  )
253
+ # Set nested type name for easier access
254
+ nested_data_type = f"{data_type_key}.{optional_key}"
255
+ nested_mandatory_keys_schema = PATTERNS_SCHEMA[data_type_key][
256
+ "optional"
257
+ ][optional_key]["mandatory"]
258
+ nested_optional_keys_schema = PATTERNS_SCHEMA[data_type_key][
259
+ "optional"
260
+ ][optional_key]["optional"]
261
+ # Check mandatory keys for nested type
262
+ _validate_mandatory_keys(
263
+ keys=list(optional_val["mandatory"]),
264
+ schema=nested_mandatory_keys_schema,
265
+ data_type=nested_data_type,
266
+ )
267
+ # Check optional keys for nested type
268
+ for nested_optional_key in nested_optional_keys_schema:
269
+ if nested_optional_key not in optional_val["optional"]:
270
+ logger.debug(
271
+ f"Optional key: `{nested_optional_key}` missing "
272
+ f"for {nested_data_type}"
273
+ )
274
+ else:
275
+ logger.debug(
276
+ f"Optional key: `{nested_optional_key}` found for "
277
+ f"{nested_data_type}"
278
+ )
279
+ # Check stray key for nested data type
280
+ _identify_stray_keys(
281
+ keys=optional_val["mandatory"] + optional_val["optional"],
282
+ schema=nested_mandatory_keys_schema
283
+ + nested_optional_keys_schema,
284
+ data_type=nested_data_type,
285
+ )
209
286
  # Check stray key for data type
210
- for key in data_type_val.keys():
211
- if key not in (
287
+ _identify_stray_keys(
288
+ keys=list(data_type_val.keys()),
289
+ schema=(
212
290
  PATTERNS_SCHEMA[data_type_key]["mandatory"]
213
- + PATTERNS_SCHEMA[data_type_key]["optional"]
214
- ):
215
- raise_error(
216
- msg=(
217
- f"Key: {key} not accepted for {data_type_key} "
218
- "pattern, remove it to proceed"
219
- ),
220
- klass=RuntimeError,
221
- )
291
+ + list(PATTERNS_SCHEMA[data_type_key]["optional"].keys())
292
+ ),
293
+ data_type=data_type_key,
294
+ )
222
295
  # Wildcard check in patterns
223
296
  if "}*" in data_type_val["pattern"]:
224
297
  raise_error(
@@ -5,7 +5,7 @@
5
5
  # License: AGPL
6
6
 
7
7
  from pathlib import Path
8
- from typing import Dict, List, Optional
8
+ from typing import Dict, List, Optional, Union
9
9
 
10
10
  import nibabel as nib
11
11
  import pandas as pd
@@ -102,57 +102,106 @@ class DefaultDataReader(PipelineStepMixin, UpdateMetaMixin):
102
102
  if params is None:
103
103
  params = {}
104
104
  # For each type of data, try to read it
105
- for type_ in input.keys():
105
+ for type_key, type_val in input.items():
106
106
  # Skip Warp data type
107
- if type_ == "Warp":
107
+ if type_key == "Warp":
108
108
  continue
109
109
 
110
110
  # Check for malformed datagrabber specification
111
- if "path" not in input[type_]:
111
+ if "path" not in type_val:
112
112
  warn_with_log(
113
- f"Input type {type_} does not provide a path. Skipping."
113
+ f"Input type {type_key} does not provide a path. Skipping."
114
114
  )
115
115
  continue
116
116
 
117
- # Retrieve actual path
118
- t_path = input[type_]["path"]
119
- # Retrieve loading params for the data type
120
- t_params = params.get(type_, {})
121
-
122
- # Convert str to Path
123
- if not isinstance(t_path, Path):
124
- t_path = Path(t_path)
125
- out[type_]["path"] = t_path
126
-
127
- logger.info(f"Reading {type_} from {t_path.as_posix()}")
128
- # Initialize variable for file data
129
- fread = None
130
- # Lowercase path
131
- fname = t_path.name.lower()
132
- # Loop through extensions to find the correct one
133
- for ext, ftype in _extensions.items():
134
- if fname.endswith(ext):
135
- logger.info(f"{type_} is type {ftype}")
136
- # Retrieve reader function
137
- reader_func = _readers[ftype]["func"]
138
- # Retrieve reader function params
139
- reader_params = _readers[ftype]["params"]
140
- # Update reader function params
141
- if reader_params is not None:
142
- t_params.update(reader_params)
143
- logger.debug(f"Calling {reader_func} with {t_params}")
117
+ # Iterate to check for nested "types" like mask;
118
+ # need to copy to avoid runtime error for changing dict size
119
+ for k, v in type_val.copy().items():
120
+ # Read data for base data type
121
+ if k == "path":
122
+ # Convert str to Path
123
+ if not isinstance(v, Path):
124
+ v = Path(v)
125
+ # Update path
126
+ out[type_key]["path"] = v
127
+ logger.info(f"Reading {type_key} from {v.absolute()!s}")
128
+ # Retrieve loading params for the data type
129
+ t_params = params.get(type_key, {})
144
130
  # Read data
145
- fread = reader_func(t_path, **t_params)
146
- break
147
- # If no file data is found due to unknown extension
148
- if fread is None:
149
- logger.info(
150
- f"Unknown file type {t_path.as_posix()}, skipping reading"
151
- )
131
+ out[type_key]["data"] = _read_data(
132
+ data_type=type_key, path=v, read_params=t_params
133
+ )
134
+ # Read data for nested data type
135
+ if isinstance(v, dict) and "path" in v:
136
+ # Set path
137
+ nested_path = v["path"]
138
+ # Convert str to Path
139
+ if not isinstance(nested_path, Path):
140
+ nested_path = Path(nested_path)
141
+ # Update path
142
+ out[type_key][k]["path"] = nested_path
143
+ # Set nested type key for easier access
144
+ nested_type = f"{type_key}.{k}"
145
+ logger.info(
146
+ f"Reading {nested_type} from "
147
+ f"{nested_path.absolute()!s}"
148
+ )
149
+ # Retrieve loading params for the nested data type
150
+ nested_params = params.get(nested_type, {})
151
+ # Read data
152
+ out[type_key][k]["data"] = _read_data(
153
+ data_type=nested_type,
154
+ path=nested_path,
155
+ read_params=nested_params,
156
+ )
152
157
 
153
- # Set file data for output
154
- out[type_]["data"] = fread
155
158
  # Update metadata for step
156
- self.update_meta(out[type_], "datareader")
159
+ self.update_meta(out[type_key], "datareader")
157
160
 
158
161
  return out
162
+
163
+
164
+ def _read_data(
165
+ data_type: str, path: Path, read_params: Dict
166
+ ) -> Union[nib.Nifti1Image, pd.DataFrame, None]:
167
+ """Read data for data type.
168
+
169
+ Parameters
170
+ ----------
171
+ data_type : str
172
+ The data type being read.
173
+ path : pathlib.Path
174
+ The path to read data from.
175
+ read_params : dict
176
+ Parameters for reader function.
177
+
178
+ Returns
179
+ -------
180
+ nibabel.Nifti1Image or pandas.DataFrame or pandas.TextFileReader or None
181
+ The data loaded in memory if file type is known else None.
182
+
183
+ """
184
+ # Initialize variable for file data
185
+ fread = None
186
+ # Lowercase path
187
+ fname = path.name.lower()
188
+ # Loop through extensions to find the correct one
189
+ for ext, ftype in _extensions.items():
190
+ if fname.endswith(ext):
191
+ logger.info(f"{data_type} is of type {ftype}")
192
+ # Retrieve reader function
193
+ reader_func = _readers[ftype]["func"]
194
+ # Retrieve reader function params
195
+ reader_params = _readers[ftype]["params"]
196
+ # Update reader function params
197
+ if reader_params is not None:
198
+ read_params.update(reader_params)
199
+ logger.debug(f"Calling {reader_func!s} with {read_params}")
200
+ # Read data
201
+ fread = reader_func(path, **read_params)
202
+ break
203
+ # If no file data is found due to unknown extension
204
+ if fread is None:
205
+ logger.info(f"Unknown file type {path.absolute()!s}, skipping reading")
206
+
207
+ return fread
junifer/pipeline/utils.py CHANGED
@@ -37,7 +37,7 @@ def check_ext_dependencies(
37
37
  If ``name`` is mandatory and is not found.
38
38
 
39
39
  """
40
- valid_ext_dependencies = ("afni", "fsl", "ants")
40
+ valid_ext_dependencies = ("afni", "fsl", "ants", "freesurfer")
41
41
  if name not in valid_ext_dependencies:
42
42
  raise_error(
43
43
  "Invalid value for `name`, should be one of: "
@@ -52,6 +52,9 @@ def check_ext_dependencies(
52
52
  # Check for ants
53
53
  elif name == "ants":
54
54
  found = _check_ants(**kwargs)
55
+ # Check for freesurfer
56
+ elif name == "freesurfer":
57
+ found = _check_freesurfer(**kwargs)
55
58
 
56
59
  # Check if the dependency is mandatory in case it's not found
57
60
  if not found and not optional:
@@ -245,3 +248,63 @@ def _check_ants(commands: Optional[List[str]] = None) -> bool:
245
248
  f"{commands_found_results}"
246
249
  )
247
250
  return ants_found
251
+
252
+
253
+ def _check_freesurfer(commands: Optional[List[str]] = None) -> bool:
254
+ """Check if FreeSurfer is present in the system.
255
+
256
+ Parameters
257
+ ----------
258
+ commands : list of str, optional
259
+ The commands to specifically check for from FreeSurfer. If None, only
260
+ the basic FreeSurfer help would be looked up, else, would also
261
+ check for specific commands (default None).
262
+
263
+ Returns
264
+ -------
265
+ bool
266
+ Whether FreeSurfer is found or not.
267
+
268
+ """
269
+ completed_process = subprocess.run(
270
+ "recon-all -help",
271
+ stdin=subprocess.DEVNULL,
272
+ stdout=subprocess.DEVNULL,
273
+ stderr=subprocess.STDOUT,
274
+ shell=True, # is unsafe but kept for resolution via PATH
275
+ check=False,
276
+ )
277
+ fs_found = completed_process.returncode == 0
278
+
279
+ # Check for specific commands
280
+ if fs_found and commands is not None:
281
+ if not isinstance(commands, list):
282
+ commands = [commands]
283
+ # Store command found results
284
+ commands_found_results = {}
285
+ # Set all commands found flag to True
286
+ all_commands_found = True
287
+ # Check commands' existence
288
+ for command in commands:
289
+ command_process = subprocess.run(
290
+ [command],
291
+ stdin=subprocess.DEVNULL,
292
+ stdout=subprocess.DEVNULL,
293
+ stderr=subprocess.STDOUT,
294
+ shell=True, # is unsafe but kept for resolution via PATH
295
+ check=False,
296
+ )
297
+ command_found = command_process.returncode == 0
298
+ commands_found_results[command] = (
299
+ "found" if command_found else "not found"
300
+ )
301
+ # Set flag to trigger warning
302
+ all_commands_found = all_commands_found and command_found
303
+ # One or more commands were missing
304
+ if not all_commands_found:
305
+ warn_with_log(
306
+ "FreeSurfer is installed but some of the required commands "
307
+ "were not found. These are the results: "
308
+ f"{commands_found_results}"
309
+ )
310
+ return fs_found
@@ -146,8 +146,8 @@ class BasePreprocessor(ABC, PipelineStepMixin, UpdateMetaMixin):
146
146
  The computed result as dictionary.
147
147
  dict or None
148
148
  Extra "helper" data types as dictionary to add to the Junifer Data
149
- object. For example, computed BOLD mask can be passed via this.
150
- If no new "helper" data types is created, None is to be passed.
149
+ object. If no new "helper" data type(s) is(are) created, None is to
150
+ be passed.
151
151
 
152
152
  """
153
153
  raise_error(