voxelops 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- voxelops/__init__.py +98 -0
- voxelops/exceptions.py +158 -0
- voxelops/runners/__init__.py +13 -0
- voxelops/runners/_base.py +191 -0
- voxelops/runners/heudiconv.py +202 -0
- voxelops/runners/qsiparc.py +150 -0
- voxelops/runners/qsiprep.py +187 -0
- voxelops/runners/qsirecon.py +173 -0
- voxelops/schemas/__init__.py +41 -0
- voxelops/schemas/heudiconv.py +121 -0
- voxelops/schemas/qsiparc.py +107 -0
- voxelops/schemas/qsiprep.py +140 -0
- voxelops/schemas/qsirecon.py +154 -0
- voxelops/utils/__init__.py +1 -0
- voxelops/utils/bids.py +486 -0
- voxelops-0.1.0.dist-info/METADATA +221 -0
- voxelops-0.1.0.dist-info/RECORD +19 -0
- voxelops-0.1.0.dist-info/WHEEL +4 -0
- voxelops-0.1.0.dist-info/licenses/LICENSE +21 -0
|
@@ -0,0 +1,150 @@
|
|
|
1
|
+
"""QSIParc parcellation runner using parcellate package."""
|
|
2
|
+
|
|
3
|
+
import logging
|
|
4
|
+
from datetime import datetime
|
|
5
|
+
from typing import Any, Dict, Optional
|
|
6
|
+
|
|
7
|
+
from parcellate.interfaces.qsirecon.models import QSIReconConfig
|
|
8
|
+
from parcellate.interfaces.qsirecon.qsirecon import run_parcellations
|
|
9
|
+
|
|
10
|
+
from voxelops.exceptions import ProcedureExecutionError
|
|
11
|
+
from voxelops.runners._base import (
|
|
12
|
+
validate_input_dir,
|
|
13
|
+
validate_participant,
|
|
14
|
+
)
|
|
15
|
+
from voxelops.schemas.qsiparc import (
|
|
16
|
+
QSIParcDefaults,
|
|
17
|
+
QSIParcInputs,
|
|
18
|
+
QSIParcOutputs,
|
|
19
|
+
)
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
def run_qsiparc(
|
|
23
|
+
inputs: QSIParcInputs, config: Optional[QSIParcDefaults] = None, **overrides
|
|
24
|
+
) -> Dict[str, Any]:
|
|
25
|
+
"""Run parcellation on QSIRecon outputs using parcellate.
|
|
26
|
+
|
|
27
|
+
Atlases are auto-discovered from the QSIRecon derivatives directory
|
|
28
|
+
(BIDS dseg files). No manual atlas list is needed.
|
|
29
|
+
|
|
30
|
+
Parameters
|
|
31
|
+
----------
|
|
32
|
+
inputs : QSIParcInputs
|
|
33
|
+
Required inputs (qsirecon_dir, participant, etc.).
|
|
34
|
+
config : Optional[QSIParcDefaults], optional
|
|
35
|
+
Configuration (uses brain bank defaults if not provided), by default None.
|
|
36
|
+
**overrides
|
|
37
|
+
Override any config parameter.
|
|
38
|
+
|
|
39
|
+
Returns
|
|
40
|
+
-------
|
|
41
|
+
Dict[str, Any]
|
|
42
|
+
Execution record with:
|
|
43
|
+
- tool: "qsiparc"
|
|
44
|
+
- participant: Participant label
|
|
45
|
+
- start_time, end_time: ISO format timestamps
|
|
46
|
+
- duration_seconds, duration_human: Execution duration
|
|
47
|
+
- success: Boolean success status
|
|
48
|
+
- output_files: List of output TSV paths
|
|
49
|
+
- inputs: QSIParcInputs instance
|
|
50
|
+
- config: QSIParcDefaults instance
|
|
51
|
+
- expected_outputs: QSIParcOutputs instance
|
|
52
|
+
|
|
53
|
+
Raises
|
|
54
|
+
------
|
|
55
|
+
InputValidationError
|
|
56
|
+
If inputs are invalid.
|
|
57
|
+
ProcedureExecutionError
|
|
58
|
+
If parcellation fails.
|
|
59
|
+
|
|
60
|
+
Examples
|
|
61
|
+
--------
|
|
62
|
+
>>> inputs = QSIParcInputs(
|
|
63
|
+
... qsirecon_dir=Path("/data/derivatives/qsirecon"),
|
|
64
|
+
... participant="01",
|
|
65
|
+
... )
|
|
66
|
+
>>> result = run_qsiparc(inputs)
|
|
67
|
+
>>> print(result['output_files'])
|
|
68
|
+
"""
|
|
69
|
+
|
|
70
|
+
# Use brain bank defaults if config not provided
|
|
71
|
+
config = config or QSIParcDefaults()
|
|
72
|
+
|
|
73
|
+
# Apply overrides
|
|
74
|
+
for key, value in overrides.items():
|
|
75
|
+
if hasattr(config, key):
|
|
76
|
+
setattr(config, key, value)
|
|
77
|
+
|
|
78
|
+
# Validate inputs
|
|
79
|
+
validate_input_dir(inputs.qsirecon_dir, "QSIRecon")
|
|
80
|
+
validate_participant(inputs.qsirecon_dir, inputs.participant)
|
|
81
|
+
|
|
82
|
+
# Setup output directory
|
|
83
|
+
output_dir = inputs.output_dir or inputs.qsirecon_dir.parent
|
|
84
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
85
|
+
|
|
86
|
+
# Generate expected outputs
|
|
87
|
+
expected_outputs = QSIParcOutputs.from_inputs(inputs, output_dir)
|
|
88
|
+
|
|
89
|
+
# Build parcellate config
|
|
90
|
+
log_level = getattr(logging, config.log_level.upper(), logging.INFO)
|
|
91
|
+
parcellate_config = QSIReconConfig(
|
|
92
|
+
input_root=inputs.qsirecon_dir,
|
|
93
|
+
output_dir=output_dir,
|
|
94
|
+
subjects=[inputs.participant],
|
|
95
|
+
sessions=[inputs.session] if inputs.session else None,
|
|
96
|
+
mask=config.mask,
|
|
97
|
+
background_label=config.background_label,
|
|
98
|
+
resampling_target=config.resampling_target,
|
|
99
|
+
force=config.force,
|
|
100
|
+
log_level=log_level,
|
|
101
|
+
atlases=inputs.atlases or [],
|
|
102
|
+
n_jobs=inputs.n_jobs or config.n_jobs,
|
|
103
|
+
n_procs=inputs.n_procs or config.n_procs,
|
|
104
|
+
)
|
|
105
|
+
|
|
106
|
+
print(f"\n{'='*80}")
|
|
107
|
+
print(f"Running qsiparc for participant {inputs.participant}")
|
|
108
|
+
print(f"{'='*80}")
|
|
109
|
+
print(f"Input: {inputs.qsirecon_dir}")
|
|
110
|
+
print(f"Output: {output_dir}")
|
|
111
|
+
print(f"{'='*80}\n")
|
|
112
|
+
|
|
113
|
+
start_time = datetime.now()
|
|
114
|
+
|
|
115
|
+
try:
|
|
116
|
+
output_files = run_parcellations(parcellate_config)
|
|
117
|
+
|
|
118
|
+
end_time = datetime.now()
|
|
119
|
+
duration = end_time - start_time
|
|
120
|
+
|
|
121
|
+
record = {
|
|
122
|
+
"tool": "qsiparc",
|
|
123
|
+
"participant": inputs.participant,
|
|
124
|
+
"start_time": start_time.isoformat(),
|
|
125
|
+
"end_time": end_time.isoformat(),
|
|
126
|
+
"duration_seconds": duration.total_seconds(),
|
|
127
|
+
"duration_human": str(duration),
|
|
128
|
+
"success": True,
|
|
129
|
+
"output_files": output_files,
|
|
130
|
+
"inputs": inputs,
|
|
131
|
+
"config": config,
|
|
132
|
+
"expected_outputs": expected_outputs,
|
|
133
|
+
}
|
|
134
|
+
|
|
135
|
+
print(f"\n{'='*80}")
|
|
136
|
+
print("qsiparc completed successfully")
|
|
137
|
+
print(f"Duration: {duration}")
|
|
138
|
+
print(f"Output files: {len(output_files)}")
|
|
139
|
+
print(f"{'='*80}\n")
|
|
140
|
+
|
|
141
|
+
return record
|
|
142
|
+
|
|
143
|
+
except Exception as e:
|
|
144
|
+
if isinstance(e, ProcedureExecutionError):
|
|
145
|
+
raise
|
|
146
|
+
raise ProcedureExecutionError(
|
|
147
|
+
procedure_name="qsiparc",
|
|
148
|
+
message=str(e),
|
|
149
|
+
original_error=e,
|
|
150
|
+
) from e
|
|
@@ -0,0 +1,187 @@
|
|
|
1
|
+
"""QSIPrep diffusion preprocessing runner."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Any, Dict, Optional
|
|
6
|
+
|
|
7
|
+
from voxelops.runners._base import (
|
|
8
|
+
run_docker,
|
|
9
|
+
validate_input_dir,
|
|
10
|
+
validate_participant,
|
|
11
|
+
)
|
|
12
|
+
from voxelops.schemas.qsiprep import (
|
|
13
|
+
QSIPrepDefaults,
|
|
14
|
+
QSIPrepInputs,
|
|
15
|
+
QSIPrepOutputs,
|
|
16
|
+
)
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
def _build_qsiprep_docker_command(
|
|
20
|
+
inputs: QSIPrepInputs,
|
|
21
|
+
config: QSIPrepDefaults,
|
|
22
|
+
output_dir: Path,
|
|
23
|
+
work_dir: Path,
|
|
24
|
+
) -> list[str]:
|
|
25
|
+
"""Builds the Docker command for QSIPrep."""
|
|
26
|
+
uid = os.getuid()
|
|
27
|
+
gid = os.getgid()
|
|
28
|
+
|
|
29
|
+
cmd = [
|
|
30
|
+
"docker",
|
|
31
|
+
"run",
|
|
32
|
+
"-ti",
|
|
33
|
+
"--rm",
|
|
34
|
+
"--user",
|
|
35
|
+
f"{uid}:{gid}",
|
|
36
|
+
"-v",
|
|
37
|
+
f"{inputs.bids_dir}:/data:ro",
|
|
38
|
+
"-v",
|
|
39
|
+
f"{output_dir}:/out",
|
|
40
|
+
"-v",
|
|
41
|
+
f"{work_dir}:/work",
|
|
42
|
+
]
|
|
43
|
+
|
|
44
|
+
# Add FreeSurfer license if provided
|
|
45
|
+
if config.fs_license and config.fs_license.exists():
|
|
46
|
+
cmd.extend(["-v", f"{config.fs_license}:/license.txt:ro"])
|
|
47
|
+
# Add BIDS filters if provided
|
|
48
|
+
if inputs.bids_filters and inputs.bids_filters.exists():
|
|
49
|
+
cmd.extend(["-v", f"{inputs.bids_filters}:/bids_filters.json:ro"])
|
|
50
|
+
|
|
51
|
+
# Container image
|
|
52
|
+
cmd.append(config.docker_image)
|
|
53
|
+
|
|
54
|
+
# QSIPrep arguments
|
|
55
|
+
cmd.extend(
|
|
56
|
+
[
|
|
57
|
+
"/data",
|
|
58
|
+
"/out",
|
|
59
|
+
"participant",
|
|
60
|
+
"--participant-label",
|
|
61
|
+
inputs.participant,
|
|
62
|
+
"--output-resolution",
|
|
63
|
+
str(config.output_resolution),
|
|
64
|
+
"--nprocs",
|
|
65
|
+
str(config.nprocs),
|
|
66
|
+
"--mem-mb",
|
|
67
|
+
str(config.mem_mb),
|
|
68
|
+
"--work-dir",
|
|
69
|
+
"/work",
|
|
70
|
+
]
|
|
71
|
+
)
|
|
72
|
+
|
|
73
|
+
# Output spaces
|
|
74
|
+
for space in config.anatomical_template:
|
|
75
|
+
cmd.extend(["--anatomical-template", space])
|
|
76
|
+
|
|
77
|
+
# Optional flags
|
|
78
|
+
if config.longitudinal:
|
|
79
|
+
cmd.append("--longitudinal")
|
|
80
|
+
|
|
81
|
+
# Optional subject anatomical reference
|
|
82
|
+
if (
|
|
83
|
+
hasattr(config, "subject_anatomical_reference")
|
|
84
|
+
and config.subject_anatomical_reference
|
|
85
|
+
):
|
|
86
|
+
cmd.extend(
|
|
87
|
+
["--subject-anatomical-reference", config.subject_anatomical_reference]
|
|
88
|
+
)
|
|
89
|
+
|
|
90
|
+
if config.skip_bids_validation:
|
|
91
|
+
cmd.append("--skip-bids-validation")
|
|
92
|
+
|
|
93
|
+
if config.fs_license and config.fs_license.exists():
|
|
94
|
+
cmd.extend(["--fs-license-file", "/license.txt"])
|
|
95
|
+
|
|
96
|
+
if inputs.bids_filters and inputs.bids_filters.exists():
|
|
97
|
+
cmd.extend(["--bids-filter-file", "/bids_filters.json"])
|
|
98
|
+
|
|
99
|
+
return cmd
|
|
100
|
+
|
|
101
|
+
|
|
102
|
+
def run_qsiprep(
|
|
103
|
+
inputs: QSIPrepInputs, config: Optional[QSIPrepDefaults] = None, **overrides
|
|
104
|
+
) -> Dict[str, Any]:
|
|
105
|
+
"""Run QSIPrep diffusion MRI preprocessing.
|
|
106
|
+
|
|
107
|
+
Parameters
|
|
108
|
+
----------
|
|
109
|
+
inputs : QSIPrepInputs
|
|
110
|
+
Required inputs (bids_dir, participant, etc.).
|
|
111
|
+
config : Optional[QSIPrepDefaults], optional
|
|
112
|
+
Configuration (uses brain bank defaults if not provided), by default None.
|
|
113
|
+
**overrides
|
|
114
|
+
Override any config parameter (e.g., nprocs=16).
|
|
115
|
+
|
|
116
|
+
Returns
|
|
117
|
+
-------
|
|
118
|
+
Dict[str, Any]
|
|
119
|
+
Execution record with:
|
|
120
|
+
- tool: "qsiprep"
|
|
121
|
+
- participant: Participant label
|
|
122
|
+
- command: Full Docker command executed
|
|
123
|
+
- exit_code: Process exit code
|
|
124
|
+
- start_time, end_time: ISO format timestamps
|
|
125
|
+
- duration_seconds, duration_human: Execution duration
|
|
126
|
+
- success: Boolean success status
|
|
127
|
+
- log_file: Path to JSON log
|
|
128
|
+
- inputs: QSIPrepInputs instance
|
|
129
|
+
- config: QSIPrepDefaults instance
|
|
130
|
+
- expected_outputs: QSIPrepOutputs instance
|
|
131
|
+
|
|
132
|
+
Raises
|
|
133
|
+
------
|
|
134
|
+
InputValidationError
|
|
135
|
+
If inputs are invalid.
|
|
136
|
+
ProcedureExecutionError
|
|
137
|
+
If preprocessing fails.
|
|
138
|
+
|
|
139
|
+
Examples
|
|
140
|
+
--------
|
|
141
|
+
>>> inputs = QSIPrepInputs(
|
|
142
|
+
... bids_dir=Path("/data/bids"),
|
|
143
|
+
... participant="01",
|
|
144
|
+
... )
|
|
145
|
+
>>> result = run_qsiprep(inputs, nprocs=16) # Override default nprocs
|
|
146
|
+
>>> print(f"Completed in {result['duration_human']}")
|
|
147
|
+
>>> print(f"Outputs in: {result['expected_outputs'].qsiprep_dir}")
|
|
148
|
+
"""
|
|
149
|
+
# Use brain bank defaults if config not provided
|
|
150
|
+
config = config or QSIPrepDefaults()
|
|
151
|
+
|
|
152
|
+
# Apply overrides
|
|
153
|
+
for key, value in overrides.items():
|
|
154
|
+
if hasattr(config, key):
|
|
155
|
+
setattr(config, key, value)
|
|
156
|
+
|
|
157
|
+
# Validate inputs
|
|
158
|
+
validate_input_dir(inputs.bids_dir, "BIDS")
|
|
159
|
+
validate_participant(inputs.bids_dir, inputs.participant)
|
|
160
|
+
|
|
161
|
+
# Setup directories
|
|
162
|
+
output_dir = inputs.output_dir or (inputs.bids_dir.parent / "derivatives")
|
|
163
|
+
work_dir = inputs.work_dir or (output_dir.parent / "work" / "qsiprep")
|
|
164
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
165
|
+
work_dir.mkdir(parents=True, exist_ok=True)
|
|
166
|
+
|
|
167
|
+
# Generate expected outputs
|
|
168
|
+
expected_outputs = QSIPrepOutputs.from_inputs(inputs, output_dir, work_dir)
|
|
169
|
+
|
|
170
|
+
# Build Docker command
|
|
171
|
+
cmd = _build_qsiprep_docker_command(inputs, config, output_dir, work_dir)
|
|
172
|
+
|
|
173
|
+
# Execute
|
|
174
|
+
log_dir = output_dir.parent / "logs"
|
|
175
|
+
result = run_docker(
|
|
176
|
+
cmd=cmd,
|
|
177
|
+
tool_name="qsiprep",
|
|
178
|
+
participant=inputs.participant,
|
|
179
|
+
log_dir=log_dir,
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
# Add inputs, config, and expected outputs to result
|
|
183
|
+
result["inputs"] = inputs
|
|
184
|
+
result["config"] = config
|
|
185
|
+
result["expected_outputs"] = expected_outputs
|
|
186
|
+
|
|
187
|
+
return result
|
|
@@ -0,0 +1,173 @@
|
|
|
1
|
+
"""QSIRecon diffusion reconstruction runner."""
|
|
2
|
+
|
|
3
|
+
import os
|
|
4
|
+
from typing import Any, Dict, Optional
|
|
5
|
+
|
|
6
|
+
from voxelops.runners._base import (
|
|
7
|
+
run_docker,
|
|
8
|
+
validate_input_dir,
|
|
9
|
+
validate_participant,
|
|
10
|
+
)
|
|
11
|
+
from voxelops.schemas.qsirecon import (
|
|
12
|
+
QSIReconDefaults,
|
|
13
|
+
QSIReconInputs,
|
|
14
|
+
QSIReconOutputs,
|
|
15
|
+
)
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
def run_qsirecon(
|
|
19
|
+
inputs: QSIReconInputs, config: Optional[QSIReconDefaults] = None, **overrides
|
|
20
|
+
) -> Dict[str, Any]:
|
|
21
|
+
"""Run QSIRecon diffusion reconstruction and connectivity.
|
|
22
|
+
|
|
23
|
+
Parameters
|
|
24
|
+
----------
|
|
25
|
+
inputs : QSIReconInputs
|
|
26
|
+
Required inputs (qsiprep_dir, participant, etc.).
|
|
27
|
+
config : Optional[QSIReconDefaults], optional
|
|
28
|
+
Configuration (uses brain bank defaults if not provided), by default None.
|
|
29
|
+
**overrides
|
|
30
|
+
Override any config parameter.
|
|
31
|
+
|
|
32
|
+
Returns
|
|
33
|
+
-------
|
|
34
|
+
Dict[str, Any]
|
|
35
|
+
Execution record with:
|
|
36
|
+
- tool: "qsirecon"
|
|
37
|
+
- participant: Participant label
|
|
38
|
+
- command: Full Docker command executed
|
|
39
|
+
- exit_code: Process exit code
|
|
40
|
+
- start_time, end_time: ISO format timestamps
|
|
41
|
+
- duration_seconds, duration_human: Execution duration
|
|
42
|
+
- success: Boolean success status
|
|
43
|
+
- log_file: Path to JSON log
|
|
44
|
+
- inputs: QSIReconInputs instance
|
|
45
|
+
- config: QSIReconDefaults instance
|
|
46
|
+
- expected_outputs: QSIReconOutputs instance
|
|
47
|
+
|
|
48
|
+
Raises
|
|
49
|
+
------
|
|
50
|
+
InputValidationError
|
|
51
|
+
If inputs are invalid.
|
|
52
|
+
ProcedureExecutionError
|
|
53
|
+
If reconstruction fails.
|
|
54
|
+
|
|
55
|
+
Examples
|
|
56
|
+
--------
|
|
57
|
+
>>> inputs = QSIReconInputs(
|
|
58
|
+
... qsiprep_dir=Path("/data/derivatives/qsiprep"),
|
|
59
|
+
... participant="01",
|
|
60
|
+
... )
|
|
61
|
+
>>> result = run_qsirecon(inputs, atlases=["schaefer100"])
|
|
62
|
+
>>> print(result['expected_outputs'].qsirecon_dir)
|
|
63
|
+
"""
|
|
64
|
+
# Use brain bank defaults if config not provided
|
|
65
|
+
config = config or QSIReconDefaults()
|
|
66
|
+
|
|
67
|
+
# Apply overrides
|
|
68
|
+
for key, value in overrides.items():
|
|
69
|
+
if hasattr(config, key):
|
|
70
|
+
setattr(config, key, value)
|
|
71
|
+
|
|
72
|
+
# Validate inputs
|
|
73
|
+
validate_input_dir(inputs.qsiprep_dir, "QSIPrep")
|
|
74
|
+
validate_participant(inputs.qsiprep_dir, inputs.participant)
|
|
75
|
+
|
|
76
|
+
# Setup directories
|
|
77
|
+
output_dir = inputs.output_dir or (inputs.qsiprep_dir.parent)
|
|
78
|
+
work_dir = inputs.work_dir or (output_dir.parent / "work" / "qsirecon")
|
|
79
|
+
output_dir.mkdir(parents=True, exist_ok=True)
|
|
80
|
+
work_dir.mkdir(parents=True, exist_ok=True)
|
|
81
|
+
|
|
82
|
+
# Generate expected outputs
|
|
83
|
+
expected_outputs = QSIReconOutputs.from_inputs(inputs, output_dir, work_dir)
|
|
84
|
+
|
|
85
|
+
# Get current user/group IDs for Docker
|
|
86
|
+
uid = os.getuid()
|
|
87
|
+
gid = os.getgid()
|
|
88
|
+
|
|
89
|
+
# Build Docker command
|
|
90
|
+
cmd = [
|
|
91
|
+
"docker",
|
|
92
|
+
"run",
|
|
93
|
+
"-it",
|
|
94
|
+
"--rm",
|
|
95
|
+
"--user",
|
|
96
|
+
f"{uid}:{gid}",
|
|
97
|
+
"-v",
|
|
98
|
+
f"{inputs.qsiprep_dir}:/data:ro",
|
|
99
|
+
"-v",
|
|
100
|
+
f"{output_dir}:/out",
|
|
101
|
+
"-v",
|
|
102
|
+
f"{work_dir}:/work",
|
|
103
|
+
"-v",
|
|
104
|
+
f"{inputs.recon_spec}:/recon_spec.yaml:ro",
|
|
105
|
+
]
|
|
106
|
+
|
|
107
|
+
# Add optional mounts
|
|
108
|
+
if config.fs_license and config.fs_license.exists():
|
|
109
|
+
cmd.extend(["-v", f"{config.fs_license}:/license.txt:ro"])
|
|
110
|
+
|
|
111
|
+
if config.fs_subjects_dir and config.fs_subjects_dir.exists():
|
|
112
|
+
cmd.extend(["-v", f"{config.fs_subjects_dir}:/subjects:ro"])
|
|
113
|
+
|
|
114
|
+
if inputs.datasets:
|
|
115
|
+
for name, path in inputs.datasets.items():
|
|
116
|
+
cmd.extend(["-v", f"{path}:/datasets/{name}:ro"])
|
|
117
|
+
|
|
118
|
+
# Container image
|
|
119
|
+
cmd.append(config.docker_image)
|
|
120
|
+
|
|
121
|
+
# QSIRecon arguments
|
|
122
|
+
cmd.extend(
|
|
123
|
+
[
|
|
124
|
+
"/data",
|
|
125
|
+
"/out",
|
|
126
|
+
"participant",
|
|
127
|
+
"--participant-label",
|
|
128
|
+
inputs.participant,
|
|
129
|
+
"--nprocs",
|
|
130
|
+
str(config.nprocs),
|
|
131
|
+
"--mem-mb",
|
|
132
|
+
str(config.mem_mb),
|
|
133
|
+
"--work-dir",
|
|
134
|
+
"/work",
|
|
135
|
+
]
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
# Datasets
|
|
139
|
+
if inputs.datasets:
|
|
140
|
+
cmd.extend(["--datasets"])
|
|
141
|
+
for name in inputs.datasets.keys():
|
|
142
|
+
addition = f"{name}=/datasets/{name}"
|
|
143
|
+
cmd.extend([addition])
|
|
144
|
+
# Atlases
|
|
145
|
+
if config.atlases:
|
|
146
|
+
cmd.extend(["--atlases", *config.atlases])
|
|
147
|
+
if inputs.atlases:
|
|
148
|
+
cmd.extend([*inputs.atlases])
|
|
149
|
+
# Optional arguments
|
|
150
|
+
if inputs.recon_spec and inputs.recon_spec.exists():
|
|
151
|
+
cmd.extend(["--recon-spec", "/recon_spec.yaml"])
|
|
152
|
+
|
|
153
|
+
if config.fs_subjects_dir and config.fs_subjects_dir.exists():
|
|
154
|
+
cmd.extend(["--freesurfer-input", "/subjects"])
|
|
155
|
+
|
|
156
|
+
if config.fs_license and config.fs_license.exists():
|
|
157
|
+
cmd.extend(["--fs-license-file", "/license.txt"])
|
|
158
|
+
|
|
159
|
+
# Execute
|
|
160
|
+
log_dir = output_dir.parent / "logs"
|
|
161
|
+
result = run_docker(
|
|
162
|
+
cmd=cmd,
|
|
163
|
+
tool_name="qsirecon",
|
|
164
|
+
participant=inputs.participant,
|
|
165
|
+
log_dir=log_dir,
|
|
166
|
+
)
|
|
167
|
+
|
|
168
|
+
# Add inputs, config, and expected outputs to result
|
|
169
|
+
result["inputs"] = inputs
|
|
170
|
+
result["config"] = config
|
|
171
|
+
result["expected_outputs"] = expected_outputs
|
|
172
|
+
|
|
173
|
+
return result
|
|
@@ -0,0 +1,41 @@
|
|
|
1
|
+
"""Schemas for procedure inputs, outputs, and defaults."""
|
|
2
|
+
|
|
3
|
+
from voxelops.schemas.heudiconv import (
|
|
4
|
+
HeudiconvDefaults,
|
|
5
|
+
HeudiconvInputs,
|
|
6
|
+
HeudiconvOutputs,
|
|
7
|
+
)
|
|
8
|
+
from voxelops.schemas.qsiparc import (
|
|
9
|
+
QSIParcDefaults,
|
|
10
|
+
QSIParcInputs,
|
|
11
|
+
QSIParcOutputs,
|
|
12
|
+
)
|
|
13
|
+
from voxelops.schemas.qsiprep import (
|
|
14
|
+
QSIPrepDefaults,
|
|
15
|
+
QSIPrepInputs,
|
|
16
|
+
QSIPrepOutputs,
|
|
17
|
+
)
|
|
18
|
+
from voxelops.schemas.qsirecon import (
|
|
19
|
+
QSIReconDefaults,
|
|
20
|
+
QSIReconInputs,
|
|
21
|
+
QSIReconOutputs,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
__all__ = [
|
|
25
|
+
# HeudiConv
|
|
26
|
+
"HeudiconvInputs",
|
|
27
|
+
"HeudiconvOutputs",
|
|
28
|
+
"HeudiconvDefaults",
|
|
29
|
+
# QSIPrep
|
|
30
|
+
"QSIPrepInputs",
|
|
31
|
+
"QSIPrepOutputs",
|
|
32
|
+
"QSIPrepDefaults",
|
|
33
|
+
# QSIRecon
|
|
34
|
+
"QSIReconInputs",
|
|
35
|
+
"QSIReconOutputs",
|
|
36
|
+
"QSIReconDefaults",
|
|
37
|
+
# QSIParc
|
|
38
|
+
"QSIParcInputs",
|
|
39
|
+
"QSIParcOutputs",
|
|
40
|
+
"QSIParcDefaults",
|
|
41
|
+
]
|
|
@@ -0,0 +1,121 @@
|
|
|
1
|
+
"""HeudiConv schemas: inputs, outputs, and defaults."""
|
|
2
|
+
|
|
3
|
+
from dataclasses import dataclass
|
|
4
|
+
from pathlib import Path
|
|
5
|
+
from typing import Optional
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
@dataclass
|
|
9
|
+
class HeudiconvInputs:
|
|
10
|
+
"""Required inputs for HeudiConv DICOM to BIDS conversion.
|
|
11
|
+
|
|
12
|
+
Parameters
|
|
13
|
+
----------
|
|
14
|
+
dicom_dir : Path
|
|
15
|
+
Directory containing DICOM files.
|
|
16
|
+
participant : str
|
|
17
|
+
Participant label (without 'sub-' prefix).
|
|
18
|
+
output_dir : Optional[Path], optional
|
|
19
|
+
Output BIDS directory, by default None.
|
|
20
|
+
If None, defaults to dicom_dir/../bids.
|
|
21
|
+
session : Optional[str], optional
|
|
22
|
+
Session label (without 'ses-' prefix), by default None.
|
|
23
|
+
heuristic : Optional[Path], optional
|
|
24
|
+
Path to heuristic.py file, by default None.
|
|
25
|
+
"""
|
|
26
|
+
|
|
27
|
+
dicom_dir: Path
|
|
28
|
+
participant: str
|
|
29
|
+
output_dir: Optional[Path] = None
|
|
30
|
+
session: Optional[str] = None
|
|
31
|
+
heuristic: Optional[Path] = None
|
|
32
|
+
|
|
33
|
+
def __post_init__(self):
|
|
34
|
+
"""Ensure paths are Path objects."""
|
|
35
|
+
self.dicom_dir = Path(self.dicom_dir)
|
|
36
|
+
if self.output_dir:
|
|
37
|
+
self.output_dir = Path(self.output_dir)
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
@dataclass
|
|
41
|
+
class HeudiconvOutputs:
|
|
42
|
+
"""Expected outputs from HeudiConv.
|
|
43
|
+
|
|
44
|
+
Parameters
|
|
45
|
+
----------
|
|
46
|
+
bids_dir : Path
|
|
47
|
+
Root BIDS directory.
|
|
48
|
+
participant_dir : Path
|
|
49
|
+
Participant-specific directory (sub-XX/).
|
|
50
|
+
dataset_description : Path
|
|
51
|
+
dataset_description.json file.
|
|
52
|
+
"""
|
|
53
|
+
|
|
54
|
+
bids_dir: Path
|
|
55
|
+
participant_dir: Path
|
|
56
|
+
dataset_description: Path
|
|
57
|
+
|
|
58
|
+
@classmethod
|
|
59
|
+
def from_inputs(cls, inputs: HeudiconvInputs, output_dir: Path):
|
|
60
|
+
"""Generate expected output paths from inputs.
|
|
61
|
+
|
|
62
|
+
Parameters
|
|
63
|
+
----------
|
|
64
|
+
inputs : HeudiconvInputs
|
|
65
|
+
HeudiconvInputs instance.
|
|
66
|
+
output_dir : Path
|
|
67
|
+
Resolved output directory.
|
|
68
|
+
|
|
69
|
+
Returns
|
|
70
|
+
-------
|
|
71
|
+
HeudiconvOutputs
|
|
72
|
+
HeudiconvOutputs with expected paths.
|
|
73
|
+
"""
|
|
74
|
+
participant_dir = output_dir / f"sub-{inputs.participant}"
|
|
75
|
+
if inputs.session:
|
|
76
|
+
participant_dir = participant_dir / f"ses-{inputs.session}"
|
|
77
|
+
|
|
78
|
+
return cls(
|
|
79
|
+
bids_dir=output_dir,
|
|
80
|
+
participant_dir=participant_dir,
|
|
81
|
+
dataset_description=output_dir / "dataset_description.json",
|
|
82
|
+
)
|
|
83
|
+
|
|
84
|
+
|
|
85
|
+
@dataclass
|
|
86
|
+
class HeudiconvDefaults:
|
|
87
|
+
"""Default configuration for HeudiConv.
|
|
88
|
+
|
|
89
|
+
Parameters
|
|
90
|
+
----------
|
|
91
|
+
heuristic : Optional[Path], optional
|
|
92
|
+
Path to heuristic.py file (required for conversion), by default None.
|
|
93
|
+
bids_validator : bool, optional
|
|
94
|
+
Run BIDS validator after conversion, by default True.
|
|
95
|
+
overwrite : bool, optional
|
|
96
|
+
Overwrite existing output, by default False.
|
|
97
|
+
converter : str, optional
|
|
98
|
+
DICOM converter to use, by default "dcm2niix".
|
|
99
|
+
docker_image : str, optional
|
|
100
|
+
Docker image to use, by default "nipy/heudiconv:1.3.4".
|
|
101
|
+
post_process : bool, optional
|
|
102
|
+
Enable post-heudiconv processing, by default True.
|
|
103
|
+
post_process_dry_run : bool, optional
|
|
104
|
+
Test mode - report only, don't modify, by default False.
|
|
105
|
+
"""
|
|
106
|
+
|
|
107
|
+
heuristic: Optional[Path] = None
|
|
108
|
+
bids_validator: bool = True
|
|
109
|
+
overwrite: bool = False
|
|
110
|
+
converter: str = "dcm2niix"
|
|
111
|
+
bids: Optional[str] = "notop"
|
|
112
|
+
grouping: Optional[str] = "all"
|
|
113
|
+
docker_image: str = "nipy/heudiconv:1.3.4"
|
|
114
|
+
# Post-processing options
|
|
115
|
+
post_process: bool = True # Enable post-heudiconv processing
|
|
116
|
+
post_process_dry_run: bool = False # Test mode - report only, don't modify
|
|
117
|
+
|
|
118
|
+
def __post_init__(self):
|
|
119
|
+
"""Ensure heuristic path is Path object if provided."""
|
|
120
|
+
if self.heuristic:
|
|
121
|
+
self.heuristic = Path(self.heuristic)
|