brkraw 0.3.11__py3-none-any.whl → 0.5.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- brkraw/__init__.py +9 -3
- brkraw/apps/__init__.py +12 -0
- brkraw/apps/addon/__init__.py +30 -0
- brkraw/apps/addon/core.py +35 -0
- brkraw/apps/addon/dependencies.py +402 -0
- brkraw/apps/addon/installation.py +500 -0
- brkraw/apps/addon/io.py +21 -0
- brkraw/apps/hook/__init__.py +25 -0
- brkraw/apps/hook/core.py +636 -0
- brkraw/apps/loader/__init__.py +10 -0
- brkraw/apps/loader/core.py +622 -0
- brkraw/apps/loader/formatter.py +288 -0
- brkraw/apps/loader/helper.py +797 -0
- brkraw/apps/loader/info/__init__.py +11 -0
- brkraw/apps/loader/info/scan.py +85 -0
- brkraw/apps/loader/info/scan.yaml +90 -0
- brkraw/apps/loader/info/study.py +69 -0
- brkraw/apps/loader/info/study.yaml +156 -0
- brkraw/apps/loader/info/transform.py +92 -0
- brkraw/apps/loader/types.py +220 -0
- brkraw/cli/__init__.py +5 -0
- brkraw/cli/commands/__init__.py +2 -0
- brkraw/cli/commands/addon.py +327 -0
- brkraw/cli/commands/config.py +205 -0
- brkraw/cli/commands/convert.py +903 -0
- brkraw/cli/commands/hook.py +348 -0
- brkraw/cli/commands/info.py +74 -0
- brkraw/cli/commands/init.py +214 -0
- brkraw/cli/commands/params.py +106 -0
- brkraw/cli/commands/prune.py +288 -0
- brkraw/cli/commands/session.py +371 -0
- brkraw/cli/hook_args.py +80 -0
- brkraw/cli/main.py +83 -0
- brkraw/cli/utils.py +60 -0
- brkraw/core/__init__.py +13 -0
- brkraw/core/config.py +380 -0
- brkraw/core/entrypoints.py +25 -0
- brkraw/core/formatter.py +367 -0
- brkraw/core/fs.py +495 -0
- brkraw/core/jcamp.py +600 -0
- brkraw/core/layout.py +451 -0
- brkraw/core/parameters.py +781 -0
- brkraw/core/zip.py +1121 -0
- brkraw/dataclasses/__init__.py +14 -0
- brkraw/dataclasses/node.py +139 -0
- brkraw/dataclasses/reco.py +33 -0
- brkraw/dataclasses/scan.py +61 -0
- brkraw/dataclasses/study.py +131 -0
- brkraw/default/__init__.py +3 -0
- brkraw/default/pruner_specs/deid4share.yaml +42 -0
- brkraw/default/rules/00_default.yaml +4 -0
- brkraw/default/specs/metadata_dicom.yaml +236 -0
- brkraw/default/specs/metadata_transforms.py +92 -0
- brkraw/resolver/__init__.py +7 -0
- brkraw/resolver/affine.py +539 -0
- brkraw/resolver/datatype.py +69 -0
- brkraw/resolver/fid.py +90 -0
- brkraw/resolver/helpers.py +36 -0
- brkraw/resolver/image.py +188 -0
- brkraw/resolver/nifti.py +370 -0
- brkraw/resolver/shape.py +235 -0
- brkraw/schema/__init__.py +3 -0
- brkraw/schema/context_map.yaml +62 -0
- brkraw/schema/meta.yaml +57 -0
- brkraw/schema/niftiheader.yaml +95 -0
- brkraw/schema/pruner.yaml +55 -0
- brkraw/schema/remapper.yaml +128 -0
- brkraw/schema/rules.yaml +154 -0
- brkraw/specs/__init__.py +10 -0
- brkraw/specs/hook/__init__.py +12 -0
- brkraw/specs/hook/logic.py +31 -0
- brkraw/specs/hook/validator.py +22 -0
- brkraw/specs/meta/__init__.py +5 -0
- brkraw/specs/meta/validator.py +156 -0
- brkraw/specs/pruner/__init__.py +15 -0
- brkraw/specs/pruner/logic.py +361 -0
- brkraw/specs/pruner/validator.py +119 -0
- brkraw/specs/remapper/__init__.py +27 -0
- brkraw/specs/remapper/logic.py +924 -0
- brkraw/specs/remapper/validator.py +314 -0
- brkraw/specs/rules/__init__.py +6 -0
- brkraw/specs/rules/logic.py +263 -0
- brkraw/specs/rules/validator.py +103 -0
- brkraw-0.5.0.dist-info/METADATA +81 -0
- brkraw-0.5.0.dist-info/RECORD +88 -0
- {brkraw-0.3.11.dist-info → brkraw-0.5.0.dist-info}/WHEEL +1 -2
- brkraw-0.5.0.dist-info/entry_points.txt +13 -0
- brkraw/lib/__init__.py +0 -4
- brkraw/lib/backup.py +0 -641
- brkraw/lib/bids.py +0 -0
- brkraw/lib/errors.py +0 -125
- brkraw/lib/loader.py +0 -1220
- brkraw/lib/orient.py +0 -194
- brkraw/lib/parser.py +0 -48
- brkraw/lib/pvobj.py +0 -301
- brkraw/lib/reference.py +0 -245
- brkraw/lib/utils.py +0 -471
- brkraw/scripts/__init__.py +0 -0
- brkraw/scripts/brk_backup.py +0 -106
- brkraw/scripts/brkraw.py +0 -744
- brkraw/ui/__init__.py +0 -0
- brkraw/ui/config.py +0 -17
- brkraw/ui/main_win.py +0 -214
- brkraw/ui/previewer.py +0 -225
- brkraw/ui/scan_info.py +0 -72
- brkraw/ui/scan_list.py +0 -73
- brkraw/ui/subj_info.py +0 -128
- brkraw-0.3.11.dist-info/METADATA +0 -25
- brkraw-0.3.11.dist-info/RECORD +0 -28
- brkraw-0.3.11.dist-info/entry_points.txt +0 -3
- brkraw-0.3.11.dist-info/top_level.txt +0 -2
- tests/__init__.py +0 -0
- {brkraw-0.3.11.dist-info → brkraw-0.5.0.dist-info/licenses}/LICENSE +0 -0
brkraw/scripts/brkraw.py
DELETED
|
@@ -1,744 +0,0 @@
|
|
|
1
|
-
# -*- coding: utf-8 -*-
|
|
2
|
-
from operator import index
|
|
3
|
-
from ..lib.errors import *
|
|
4
|
-
from .. import BrukerLoader, __version__
|
|
5
|
-
from ..lib.utils import set_rescale, save_meta_files, mkdir
|
|
6
|
-
import argparse
|
|
7
|
-
import os, re
|
|
8
|
-
import sys
|
|
9
|
-
|
|
10
|
-
_supporting_bids_ver = '1.2.2'
|
|
11
|
-
|
|
12
|
-
|
|
13
|
-
def main():
|
|
14
|
-
parser = argparse.ArgumentParser(prog='brkraw',
|
|
15
|
-
description="BrkRaw command-line interface")
|
|
16
|
-
parser.add_argument("-v", "--version", action='version', version='%(prog)s v{}'.format(__version__))
|
|
17
|
-
|
|
18
|
-
subparsers = parser.add_subparsers(title='Sub-commands',
|
|
19
|
-
description='To run this command, you must specify one of the functions listed'
|
|
20
|
-
'below next to the command. For more information on each function, '
|
|
21
|
-
'use -h next to the function name to call help document.',
|
|
22
|
-
help='description',
|
|
23
|
-
dest='function',
|
|
24
|
-
metavar='command')
|
|
25
|
-
|
|
26
|
-
input_str = "input raw Bruker data"
|
|
27
|
-
input_dir_str = "input directory that contains multiple raw Bruker data"
|
|
28
|
-
output_dir_str = "output directory name"
|
|
29
|
-
output_fnm_str = "output filename"
|
|
30
|
-
bids_opt = "create a JSON file contains metadata based on BIDS recommendation"
|
|
31
|
-
|
|
32
|
-
info = subparsers.add_parser("info", help='Prints out the information of the internal contents in Bruker raw data')
|
|
33
|
-
info.add_argument("input", help=input_str, type=str)
|
|
34
|
-
|
|
35
|
-
gui = subparsers.add_parser("gui", help='Run GUI mode')
|
|
36
|
-
nii = subparsers.add_parser("tonii", help='Convert a single raw Bruker data into NifTi file(s)')
|
|
37
|
-
niiall = subparsers.add_parser("tonii_all", help="Convert All raw Bruker data located in the input directory")
|
|
38
|
-
bids_helper = subparsers.add_parser("bids_helper", help="Creates a BIDS datasheet "
|
|
39
|
-
"for guiding BIDS data converting.")
|
|
40
|
-
bids_convert = subparsers.add_parser("bids_convert", help="Convert ALL raw Bruker data located "
|
|
41
|
-
"in the input directory based on the BIDS datasheet")
|
|
42
|
-
|
|
43
|
-
# Adding arguments for each parser
|
|
44
|
-
# gui
|
|
45
|
-
gui.add_argument("-i", "--input", help=input_str, type=str, default=None)
|
|
46
|
-
gui.add_argument("-o", "--output", help=output_dir_str, type=str, default=None)
|
|
47
|
-
gui.add_argument("--ignore-slope", help='remove slope value from header', action='store_true')
|
|
48
|
-
gui.add_argument("--ignore-offset", help='remove offset value from header', action='store_true')
|
|
49
|
-
gui.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true')
|
|
50
|
-
|
|
51
|
-
# tonii
|
|
52
|
-
nii.add_argument("input", help=input_str, type=str)
|
|
53
|
-
nii.add_argument("-b", "--bids", help=bids_opt, action='store_true')
|
|
54
|
-
nii.add_argument("-o", "--output", help=output_fnm_str, type=str, default=False)
|
|
55
|
-
nii.add_argument("-s", "--scanid", help="Scan ID, option to specify a particular scan to convert.", type=str)
|
|
56
|
-
nii.add_argument("-r", "--recoid", help="RECO ID (default=1), "
|
|
57
|
-
"option to specify a particular reconstruction id to convert",
|
|
58
|
-
type=int, default=1)
|
|
59
|
-
nii.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \
|
|
60
|
-
"available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None)
|
|
61
|
-
nii.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \
|
|
62
|
-
"the position variable can be defiend as <BodyPart>_<Side>, " + \
|
|
63
|
-
"available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None)
|
|
64
|
-
nii.add_argument("--ignore-slope", help='remove slope value from header', action='store_true')
|
|
65
|
-
nii.add_argument("--ignore-offset", help='remove offset value from header', action='store_true')
|
|
66
|
-
nii.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true')
|
|
67
|
-
nii.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true', default=True)
|
|
68
|
-
|
|
69
|
-
# tonii_all
|
|
70
|
-
niiall.add_argument("input", help=input_dir_str, type=str)
|
|
71
|
-
niiall.add_argument("-o", "--output", help=output_dir_str, type=str)
|
|
72
|
-
niiall.add_argument("-b", "--bids", help=bids_opt, action='store_true')
|
|
73
|
-
niiall.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \
|
|
74
|
-
"available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None)
|
|
75
|
-
niiall.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \
|
|
76
|
-
"the position variable can be defiend as <BodyPart>_<Side>, " + \
|
|
77
|
-
"available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None)
|
|
78
|
-
niiall.add_argument("--ignore-slope", help='remove slope value from header', action='store_true')
|
|
79
|
-
niiall.add_argument("--ignore-offset", help='remove offset value from header', action='store_true')
|
|
80
|
-
niiall.add_argument("--ignore-rescale", help='remove slope and offset values from header', action='store_true')
|
|
81
|
-
niiall.add_argument("--ignore-localizer", help='ignore the scan if it is localizer', action='store_true')
|
|
82
|
-
|
|
83
|
-
# bids_helper
|
|
84
|
-
bids_helper.add_argument("input", help=input_dir_str, type=str)
|
|
85
|
-
bids_helper.add_argument("output", help="output BIDS datasheet filename", type=str) # [220202] make compatible with csv, tsv and xlsx
|
|
86
|
-
bids_helper.add_argument("-f", "--format", help="file format of BIDS dataheets. Use this option if you did not specify the extension on output. The available options are (csv/tsv/xlsx) (default: csv)", type=str, default='csv')
|
|
87
|
-
bids_helper.add_argument("-j", "--json", help="create JSON syntax template for "
|
|
88
|
-
"parsing metadata from the header", action='store_true')
|
|
89
|
-
|
|
90
|
-
# bids_convert
|
|
91
|
-
bids_convert.add_argument("input", help=input_dir_str, type=str)
|
|
92
|
-
bids_convert.add_argument("datasheet", help="input BIDS datahseet filename", type=str)
|
|
93
|
-
bids_convert.add_argument("-j", "--json", help="input JSON syntax template filename", type=str, default=False)
|
|
94
|
-
bids_convert.add_argument("-o", "--output", help=output_dir_str, type=str, default=False)
|
|
95
|
-
bids_convert.add_argument("-t", "--subjecttype", help="override subject type in case the original setting was not properly set." + \
|
|
96
|
-
"available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)", type=str, default=None)
|
|
97
|
-
bids_convert.add_argument("-p", "--position", help="override position information in case the original setting was not properly input." + \
|
|
98
|
-
"the position variable can be defiend as <BodyPart>_<Side>, " + \
|
|
99
|
-
"available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)", type=str, default=None)
|
|
100
|
-
bids_convert.add_argument("--ignore-slope", help='remove slope value from header', action='store_true')
|
|
101
|
-
bids_convert.add_argument("--ignore-offset", help='remove offset value from header', action='store_true')
|
|
102
|
-
bids_convert.add_argument("--ignore-rescale", help='remove slope and offset values from header',
|
|
103
|
-
action='store_true')
|
|
104
|
-
|
|
105
|
-
args = parser.parse_args()
|
|
106
|
-
|
|
107
|
-
if args.function == 'info':
|
|
108
|
-
path = args.input
|
|
109
|
-
if any([os.path.isdir(path), ('zip' in path), ('PvDataset' in path)]):
|
|
110
|
-
study = BrukerLoader(path)
|
|
111
|
-
study.info()
|
|
112
|
-
else:
|
|
113
|
-
list_path = [d for d in os.listdir('.') if (any([os.path.isdir(d),
|
|
114
|
-
('zip' in d),
|
|
115
|
-
('PvDataset' in d)]) and re.search(path, d, re.IGNORECASE))]
|
|
116
|
-
for p in list_path:
|
|
117
|
-
study = BrukerLoader(p)
|
|
118
|
-
study.info()
|
|
119
|
-
|
|
120
|
-
elif args.function == 'gui':
|
|
121
|
-
ipath = args.input
|
|
122
|
-
opath = args.output
|
|
123
|
-
from ..ui.main_win import MainWindow
|
|
124
|
-
root = MainWindow()
|
|
125
|
-
if ipath != None:
|
|
126
|
-
root._path = ipath
|
|
127
|
-
if not args.ignore_rescale:
|
|
128
|
-
if args.ignore_slope:
|
|
129
|
-
root._ignore_slope = True
|
|
130
|
-
else:
|
|
131
|
-
root._ignore_slope = False
|
|
132
|
-
if args.ignore_offset:
|
|
133
|
-
root._ignore_offset = True
|
|
134
|
-
else:
|
|
135
|
-
root._ignore_offset = False
|
|
136
|
-
else:
|
|
137
|
-
root._ignore_slope = True
|
|
138
|
-
root._ignore_offset = True
|
|
139
|
-
|
|
140
|
-
root._extend_layout()
|
|
141
|
-
root._load_dataset()
|
|
142
|
-
if opath != None:
|
|
143
|
-
root._output = opath
|
|
144
|
-
else:
|
|
145
|
-
root._output = os.path.curdir
|
|
146
|
-
root.mainloop()
|
|
147
|
-
|
|
148
|
-
elif args.function == 'tonii':
|
|
149
|
-
path = args.input
|
|
150
|
-
scan_id = args.scanid
|
|
151
|
-
reco_id = args.recoid
|
|
152
|
-
study = BrukerLoader(path)
|
|
153
|
-
slope, offset = set_rescale(args)
|
|
154
|
-
ignore_localizer = args.ignore_localizer
|
|
155
|
-
study = override_header(study, args.subjecttype, args.position)
|
|
156
|
-
|
|
157
|
-
if study.is_pvdataset:
|
|
158
|
-
if args.output:
|
|
159
|
-
output = args.output
|
|
160
|
-
else:
|
|
161
|
-
output = '{}_{}'.format(study._pvobj.subj_id,study._pvobj.study_id)
|
|
162
|
-
if scan_id:
|
|
163
|
-
acqpars = study.get_acqp(int(scan_id))
|
|
164
|
-
scanname = acqpars._parameters['ACQ_scan_name']
|
|
165
|
-
scanname = scanname.replace(' ','-')
|
|
166
|
-
output_fname = '{}-{}-{}-{}'.format(output, scan_id, reco_id, scanname)
|
|
167
|
-
scan_id = int(scan_id)
|
|
168
|
-
reco_id = int(reco_id)
|
|
169
|
-
|
|
170
|
-
if ignore_localizer and is_localizer(study, scan_id, reco_id):
|
|
171
|
-
print('Identified a localizer, the file will not be converted: ScanID:{}'.format(str(scan_id)))
|
|
172
|
-
else:
|
|
173
|
-
try:
|
|
174
|
-
study.save_as(scan_id, reco_id, output_fname, slope=slope, offset=offset)
|
|
175
|
-
save_meta_files(study, args, scan_id, reco_id, output_fname)
|
|
176
|
-
print('NifTi file is generated... [{}]'.format(output_fname))
|
|
177
|
-
except:
|
|
178
|
-
print('Conversion failed: ScanID:{}, RecoID:{}'.format(str(scan_id), str(reco_id)))
|
|
179
|
-
else:
|
|
180
|
-
for scan_id, recos in study._pvobj.avail_reco_id.items():
|
|
181
|
-
acqpars = study.get_acqp(int(scan_id))
|
|
182
|
-
scanname = acqpars._parameters['ACQ_scan_name']
|
|
183
|
-
scanname = scanname.replace(' ','-')
|
|
184
|
-
if ignore_localizer and is_localizer(study, scan_id, recos[0]):
|
|
185
|
-
print('Identified a localizer, the file will not be converted: ScanID:{}'.format(str(scan_id)))
|
|
186
|
-
else:
|
|
187
|
-
for reco_id in recos:
|
|
188
|
-
output_fname = '{}-{}-{}-{}'.format(output, str(scan_id).zfill(2), reco_id, scanname)
|
|
189
|
-
try:
|
|
190
|
-
study.save_as(scan_id, reco_id, output_fname, slope=slope, offset=offset)
|
|
191
|
-
save_meta_files(study, args, scan_id, reco_id, output_fname)
|
|
192
|
-
print('NifTi file is generated... [{}]'.format(output_fname))
|
|
193
|
-
except:
|
|
194
|
-
print('Conversion failed: ScanID:{}, RecoID:{}'.format(str(scan_id), str(reco_id)))
|
|
195
|
-
else:
|
|
196
|
-
print('{} is not PvDataset.'.format(path))
|
|
197
|
-
|
|
198
|
-
elif args.function == 'tonii_all':
|
|
199
|
-
from os.path import join as opj, isdir, isfile
|
|
200
|
-
|
|
201
|
-
path = args.input
|
|
202
|
-
slope, offset = set_rescale(args)
|
|
203
|
-
ignore_localizer = args.ignore_localizer
|
|
204
|
-
invalid_error_message = '[Error] Invalid input path: {}\n'.format(path)
|
|
205
|
-
empty_folder = ' The input path does not contain any raw data.'
|
|
206
|
-
wrong_target = ' The input path indicates raw data itself. \n' \
|
|
207
|
-
' You must input the parents folder instead of path of the raw data\n' \
|
|
208
|
-
' If you want to convert single session raw data, use (tonii) instead.'
|
|
209
|
-
|
|
210
|
-
list_of_raw = sorted([d for d in os.listdir(path) if isdir(opj(path, d)) \
|
|
211
|
-
or (isfile(opj(path, d)) and (('zip' in d) or ('PvDataset' in d)))])
|
|
212
|
-
if not len(list_of_raw):
|
|
213
|
-
# raise error with message if the folder is empty (or does not contains any PvDataset)
|
|
214
|
-
print(invalid_error_message, empty_folder)
|
|
215
|
-
raise InvalidApproach(invalid_error_message)
|
|
216
|
-
if BrukerLoader(path).is_pvdataset:
|
|
217
|
-
# raise error if the input path is identified as PvDataset
|
|
218
|
-
print(invalid_error_message, wrong_target)
|
|
219
|
-
raise InvalidApproach(invalid_error_message)
|
|
220
|
-
|
|
221
|
-
base_path = args.output
|
|
222
|
-
if not base_path:
|
|
223
|
-
base_path = 'Data'
|
|
224
|
-
mkdir(base_path)
|
|
225
|
-
for raw in list_of_raw:
|
|
226
|
-
sub_path = os.path.join(path, raw)
|
|
227
|
-
study = BrukerLoader(sub_path)
|
|
228
|
-
if study.is_pvdataset:
|
|
229
|
-
study = override_header(study, args.subjecttype, args.position)
|
|
230
|
-
if len(study._pvobj.avail_scan_id):
|
|
231
|
-
subj_path = os.path.join(base_path, 'sub-{}'.format(study._pvobj.subj_id))
|
|
232
|
-
mkdir(subj_path)
|
|
233
|
-
sess_path = os.path.join(subj_path, 'ses-{}'.format(study._pvobj.study_id))
|
|
234
|
-
mkdir(sess_path)
|
|
235
|
-
for scan_id, recos in study._pvobj.avail_reco_id.items():
|
|
236
|
-
if ignore_localizer and is_localizer(study, scan_id, recos[0]): # add option to exclude localizer during mass conversion
|
|
237
|
-
print('Identified a localizer, the file will not be converted: ScanID:{}'.format(str(scan_id)))
|
|
238
|
-
else:
|
|
239
|
-
method = study._pvobj._method[scan_id].parameters['Method']
|
|
240
|
-
if re.search('epi', method, re.IGNORECASE) and not re.search('dti', method, re.IGNORECASE):
|
|
241
|
-
output_path = os.path.join(sess_path, 'func')
|
|
242
|
-
elif re.search('dti', method, re.IGNORECASE):
|
|
243
|
-
output_path = os.path.join(sess_path, 'dwi')
|
|
244
|
-
elif re.search('flash', method, re.IGNORECASE) or re.search('rare', method, re.IGNORECASE):
|
|
245
|
-
output_path = os.path.join(sess_path, 'anat')
|
|
246
|
-
else:
|
|
247
|
-
output_path = os.path.join(sess_path, 'etc')
|
|
248
|
-
mkdir(output_path)
|
|
249
|
-
filename = 'sub-{}_ses-{}_{}'.format(study._pvobj.subj_id, study._pvobj.study_id,
|
|
250
|
-
str(scan_id).zfill(2))
|
|
251
|
-
for reco_id in recos:
|
|
252
|
-
output_fname = os.path.join(output_path, '{}_reco-{}'.format(filename,
|
|
253
|
-
str(reco_id).zfill(2)))
|
|
254
|
-
try:
|
|
255
|
-
study.save_as(scan_id, reco_id, output_fname, slope=slope, offset=offset)
|
|
256
|
-
save_meta_files(study, args, scan_id, reco_id, output_fname)
|
|
257
|
-
except:
|
|
258
|
-
print('Conversion failed: ScanID:{}, RecoID:{}'.format(str(scan_id), str(reco_id)))
|
|
259
|
-
print('{} is converted...'.format(raw))
|
|
260
|
-
else:
|
|
261
|
-
print('{} does not contains any scan data to convert...'.format(raw))
|
|
262
|
-
else:
|
|
263
|
-
print('{} is not PvDataset.'.format(raw))
|
|
264
|
-
|
|
265
|
-
elif args.function == 'bids_helper':
|
|
266
|
-
import pandas as pd
|
|
267
|
-
path = os.path.abspath(args.input)
|
|
268
|
-
ds_output = os.path.abspath(args.output)
|
|
269
|
-
make_json = args.json
|
|
270
|
-
|
|
271
|
-
# [220202] for back compatibility
|
|
272
|
-
ds_fname, ds_output_ext = os.path.splitext(ds_output)
|
|
273
|
-
if ds_output_ext in ['.xlsx', '.csv', '.tsv']:
|
|
274
|
-
ds_format = ds_output_ext[1:]
|
|
275
|
-
else:
|
|
276
|
-
ds_format = args.format
|
|
277
|
-
|
|
278
|
-
# [220202] make compatible with csv, tsv and xlsx
|
|
279
|
-
output = '{}.{}'.format(ds_output, ds_format)
|
|
280
|
-
|
|
281
|
-
Headers = ['RawData', 'SubjID', 'SessID', 'ScanID', 'RecoID', 'DataType',
|
|
282
|
-
'task', 'acq', 'ce', 'rec', 'dir', 'run', 'modality', 'Start', 'End']
|
|
283
|
-
df = pd.DataFrame(columns=Headers)
|
|
284
|
-
|
|
285
|
-
# if the path directly contains scan files for one participant
|
|
286
|
-
if 'subject' in os.listdir(path):
|
|
287
|
-
dNames = ['']
|
|
288
|
-
else: # old way, when you run against the parent folder (which contains one or more scan folder).
|
|
289
|
-
dNames = sorted(os.listdir(path))
|
|
290
|
-
|
|
291
|
-
for dname in dNames:
|
|
292
|
-
dpath = os.path.join(path, dname)
|
|
293
|
-
|
|
294
|
-
try:
|
|
295
|
-
dset = BrukerLoader(dpath)
|
|
296
|
-
except:
|
|
297
|
-
dset = None
|
|
298
|
-
|
|
299
|
-
if dset != None:
|
|
300
|
-
if dset.is_pvdataset:
|
|
301
|
-
pvobj = dset.pvobj
|
|
302
|
-
|
|
303
|
-
rawdata = pvobj.path
|
|
304
|
-
subj_id = pvobj.subj_id
|
|
305
|
-
|
|
306
|
-
# make subj_id bids appropriate
|
|
307
|
-
subj_id = cleanSubjectID(subj_id)
|
|
308
|
-
|
|
309
|
-
sess_id = pvobj.session_id
|
|
310
|
-
|
|
311
|
-
# make sess_id bids appropriate
|
|
312
|
-
sess_id = cleanSessionID(sess_id)
|
|
313
|
-
|
|
314
|
-
for scan_id, recos in pvobj.avail_reco_id.items():
|
|
315
|
-
for reco_id in recos:
|
|
316
|
-
visu_pars = dset.get_visu_pars(scan_id, reco_id)
|
|
317
|
-
if dset._get_dim_info(visu_pars)[1] == 'spatial_only':
|
|
318
|
-
|
|
319
|
-
if not is_localizer(dset, scan_id, reco_id):
|
|
320
|
-
method = dset.get_method(scan_id).parameters['Method']
|
|
321
|
-
|
|
322
|
-
datatype = assignDataType(method)
|
|
323
|
-
|
|
324
|
-
item = dict(zip(Headers, [rawdata, subj_id, sess_id, scan_id, reco_id, datatype]))
|
|
325
|
-
if datatype == 'fmap':
|
|
326
|
-
for m, s, e in [['fieldmap', 0, 1], ['magnitude', 1, 2]]:
|
|
327
|
-
item['modality'] = m
|
|
328
|
-
item['Start'] = s
|
|
329
|
-
item['End'] = e
|
|
330
|
-
df = pd.concat([df, pd.DataFrame([item])], ignore_index=True)
|
|
331
|
-
elif datatype == 'dwi':
|
|
332
|
-
item['modality'] = 'dwi'
|
|
333
|
-
df = pd.concat([df, pd.DataFrame([item])], ignore_index=True)
|
|
334
|
-
elif datatype == 'anat' and re.search('MSME', method, re.IGNORECASE):
|
|
335
|
-
item['modality'] = 'MESE'
|
|
336
|
-
df = pd.concat([df, pd.DataFrame([item])], ignore_index=True)
|
|
337
|
-
else:
|
|
338
|
-
df = pd.concat([df, pd.DataFrame([item])], ignore_index=True)
|
|
339
|
-
if 'xlsx' in ds_format:
|
|
340
|
-
df.to_excel(output, index=None)
|
|
341
|
-
elif 'csv' in ds_format:
|
|
342
|
-
df.to_csv(output, index=None, sep=',')
|
|
343
|
-
elif 'tsv' in ds_format:
|
|
344
|
-
df.to_csv(output, index=None, sep='\t')
|
|
345
|
-
else:
|
|
346
|
-
print('[{}] is not supported.'.format(ds_format))
|
|
347
|
-
raise InvalidApproach('Invalid input for datasheet!')
|
|
348
|
-
|
|
349
|
-
if make_json:
|
|
350
|
-
json_fname = '{}.json'.format(ds_fname)
|
|
351
|
-
print('Creating JSON syntax template for parsing the BIDS required metadata '
|
|
352
|
-
'(BIDS v{}): {}'.format(_supporting_bids_ver, json_fname))
|
|
353
|
-
with open(json_fname, 'w') as f:
|
|
354
|
-
import json
|
|
355
|
-
from ..lib.reference import COMMON_META_REF, FMRI_META_REF, FIELDMAP_META_REF
|
|
356
|
-
ref_dict = dict(common=COMMON_META_REF,
|
|
357
|
-
func=FMRI_META_REF,
|
|
358
|
-
fmap=FIELDMAP_META_REF)
|
|
359
|
-
json.dump(ref_dict, f, indent=4)
|
|
360
|
-
|
|
361
|
-
print('[Important notice] The function helps to minimize the BIDS organization but does not guarantee that '
|
|
362
|
-
'the dataset always meets the BIDS requirements. '
|
|
363
|
-
'Therefore, after converting your data, we recommend validating '
|
|
364
|
-
'your dataset using an official BIDS validator.')
|
|
365
|
-
|
|
366
|
-
elif args.function == 'bids_convert':
|
|
367
|
-
import pandas as pd
|
|
368
|
-
import numpy as np
|
|
369
|
-
from ..lib.utils import build_bids_json, bids_validation
|
|
370
|
-
|
|
371
|
-
pd.options.mode.chained_assignment = None
|
|
372
|
-
path = args.input
|
|
373
|
-
datasheet = args.datasheet
|
|
374
|
-
output = args.output
|
|
375
|
-
datasheet_ext = os.path.splitext(datasheet)[-1]
|
|
376
|
-
|
|
377
|
-
# [220202] make compatible with csv, tsv and xlsx
|
|
378
|
-
if 'xlsx' in datasheet_ext:
|
|
379
|
-
df = pd.read_excel(datasheet, dtype={'SubjID': str, 'SessID': str, 'run': str})
|
|
380
|
-
elif 'csv' in datasheet_ext:
|
|
381
|
-
df = pd.read_csv(datasheet, dtype={'SubjID': str, 'SessID': str, 'run': str}, index_col=None, header=0, sep=',')
|
|
382
|
-
elif 'tsv' in datasheet_ext:
|
|
383
|
-
df = pd.read_csv(datasheet, dtype={'SubjID': str, 'SessID': str, 'run': str}, index_col=None, header=0, sep='\t')
|
|
384
|
-
else:
|
|
385
|
-
print(f'{datasheet_ext} if not supported format.')
|
|
386
|
-
raise InvalidApproach('Invalid input for datasheet!')
|
|
387
|
-
|
|
388
|
-
json_fname = args.json
|
|
389
|
-
slope, offset = set_rescale(args)
|
|
390
|
-
|
|
391
|
-
# check if the project is session included
|
|
392
|
-
if all(pd.isnull(df['SessID'])):
|
|
393
|
-
# SessID was removed (not column, but value), this need to go to documentation
|
|
394
|
-
include_session = False
|
|
395
|
-
else:
|
|
396
|
-
# if SessionID appears in datasheet, then by default session appears.
|
|
397
|
-
include_session = True
|
|
398
|
-
|
|
399
|
-
if not output:
|
|
400
|
-
root_path = os.path.abspath(os.path.join(os.path.curdir, 'Data'))
|
|
401
|
-
else:
|
|
402
|
-
root_path = output
|
|
403
|
-
|
|
404
|
-
mkdir(root_path)
|
|
405
|
-
|
|
406
|
-
# prepare the required file for converted BIDS dataset
|
|
407
|
-
generateModalityAgnosticFiles(root_path, json_fname)
|
|
408
|
-
|
|
409
|
-
print('Inspect input BIDS datasheet...')
|
|
410
|
-
|
|
411
|
-
# if the path directly contains scan files for one participant
|
|
412
|
-
if 'subject' in os.listdir(path):
|
|
413
|
-
dNames = ['']
|
|
414
|
-
else: # old way, when you run against the parent folder (which contains one or more scan folder).
|
|
415
|
-
dNames = sorted(os.listdir(path))
|
|
416
|
-
|
|
417
|
-
for dname in dNames:
|
|
418
|
-
dpath = os.path.join(path, dname)
|
|
419
|
-
try:
|
|
420
|
-
dset = BrukerLoader(dpath)
|
|
421
|
-
dset = override_header(dset, args.subjecttype, args.position)
|
|
422
|
-
if dset.is_pvdataset:
|
|
423
|
-
pvobj = dset.pvobj
|
|
424
|
-
rawdata = pvobj.path
|
|
425
|
-
filtered_dset = df[df['RawData'].isin([rawdata])].reset_index()
|
|
426
|
-
|
|
427
|
-
# add Filename and Dir colomn
|
|
428
|
-
filtered_dset.loc[:, 'FileName'] = [np.nan] * len(filtered_dset)
|
|
429
|
-
filtered_dset.loc[:, 'Dir'] = [np.nan] * len(filtered_dset)
|
|
430
|
-
|
|
431
|
-
if len(filtered_dset):
|
|
432
|
-
subj_id = list(set(filtered_dset['SubjID']))[0]
|
|
433
|
-
subj_code = 'sub-{}'.format(subj_id)
|
|
434
|
-
# append to participants.tsv one record
|
|
435
|
-
with open(os.path.join(root_path, 'participants.tsv'), 'a+') as f:
|
|
436
|
-
f.write(subj_code + '\n')
|
|
437
|
-
|
|
438
|
-
filtered_dset = completeFieldsCreateFolders(df, filtered_dset, dset, include_session, root_path, subj_code)
|
|
439
|
-
|
|
440
|
-
list_tested_fn = []
|
|
441
|
-
# Converting data according to the updated sheet
|
|
442
|
-
print('Converting {}...'.format(dname))
|
|
443
|
-
|
|
444
|
-
for i, row in filtered_dset.iterrows():
|
|
445
|
-
temp_fname = '{}_{}'.format(row.FileName, row.modality)
|
|
446
|
-
if temp_fname not in list_tested_fn:
|
|
447
|
-
# filter the DataFrame that has same filename (updated without run)
|
|
448
|
-
fn_filter = filtered_dset.loc[:, 'FileName'].isin([row.FileName])
|
|
449
|
-
fn_df = filtered_dset[fn_filter].reset_index(drop=True)
|
|
450
|
-
|
|
451
|
-
# filter specific modality from above DataFrame
|
|
452
|
-
md_filter = fn_df.loc[:, 'modality'].isin([row.modality])
|
|
453
|
-
md_df = fn_df[md_filter].reset_index(drop=True)
|
|
454
|
-
|
|
455
|
-
if len(md_df) > 1:
|
|
456
|
-
conflict_tested = []
|
|
457
|
-
for j, sub_row in md_df.iterrows():
|
|
458
|
-
if pd.isnull(sub_row.run):
|
|
459
|
-
fname = '{}_run-{}'.format(sub_row.FileName, str(j+1).zfill(2))
|
|
460
|
-
else:
|
|
461
|
-
_ = bids_validation(df, i, 'run', sub_row.run, 3, dtype=int)
|
|
462
|
-
fname = '{}_run-{}'.format(sub_row.FileName, str(sub_row.run).zfill(2)) # [20210822] format error
|
|
463
|
-
if fname in conflict_tested:
|
|
464
|
-
raise ValueConflictInField('ScanID:[{}] Conflict error. '
|
|
465
|
-
'The [run] index value must be unique '
|
|
466
|
-
'among the scans with the same modality.'
|
|
467
|
-
''.format(sub_row.ScanID))
|
|
468
|
-
else:
|
|
469
|
-
conflict_tested.append(fname)
|
|
470
|
-
build_bids_json(dset, sub_row, fname, json_fname, slope=slope, offset=offset)
|
|
471
|
-
else:
|
|
472
|
-
fname = '{}'.format(row.FileName)
|
|
473
|
-
build_bids_json(dset, row, fname, json_fname, slope=slope, offset=offset)
|
|
474
|
-
list_tested_fn.append(temp_fname)
|
|
475
|
-
print('...Done.')
|
|
476
|
-
except FileNotValidError:
|
|
477
|
-
pass
|
|
478
|
-
else:
|
|
479
|
-
parser.print_help()
|
|
480
|
-
|
|
481
|
-
|
|
482
|
-
def cleanSubjectID(subj_id):
|
|
483
|
-
"""To replace the underscore in subject id.
|
|
484
|
-
Args:
|
|
485
|
-
subj_id (str): the orignal subject id.
|
|
486
|
-
Returns:
|
|
487
|
-
str: the replaced subject id.
|
|
488
|
-
"""
|
|
489
|
-
|
|
490
|
-
import warnings
|
|
491
|
-
|
|
492
|
-
subj_id = str(subj_id)
|
|
493
|
-
|
|
494
|
-
# underscore will mess up bids output
|
|
495
|
-
if '_' in subj_id:
|
|
496
|
-
subj_id = subj_id.replace('_', 'Underscore')
|
|
497
|
-
# warn user that the subject/participantID has a '_' and is replaced with 'Underscore'
|
|
498
|
-
warnings.warn('Participant or subject ID has "_"s, replaced with "Underscore" to make it bids compatiable. You should avoid use "_" in participant/subject ID for BIDS purpose')
|
|
499
|
-
|
|
500
|
-
# Hyphen will mess up bids output
|
|
501
|
-
if '-' in subj_id:
|
|
502
|
-
subj_id = subj_id.replace('-', 'Hyphen')
|
|
503
|
-
# warn user that the subject/participantID has a '-' and is replaced with 'Hyphen'
|
|
504
|
-
warnings.warn('Participant or subject ID has "-"s, replaced with "Hyphen" to make it bids compatiable. You should avoid use "-" in participant/subject ID for BIDS purpose')
|
|
505
|
-
return subj_id
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
# This could integrate with cleanSubjectID, but mind the different warning messages
|
|
509
|
-
def cleanSessionID(sess_id):
|
|
510
|
-
"""To replace the underscore in session id.
|
|
511
|
-
Args:
|
|
512
|
-
sess_id (str): the orignal session id.
|
|
513
|
-
Returns:
|
|
514
|
-
str: the replaced session id.
|
|
515
|
-
"""
|
|
516
|
-
|
|
517
|
-
import warnings
|
|
518
|
-
|
|
519
|
-
sess_id = str(sess_id)
|
|
520
|
-
|
|
521
|
-
# underscore will mess up bids output
|
|
522
|
-
if '_' in sess_id:
|
|
523
|
-
sess_id = sess_id.replace('_', 'Underscore')
|
|
524
|
-
# warn user that the subject/participantID has a '_' and is replaced with 'Underscore'
|
|
525
|
-
warnings.warn('Session ID has "_"s, replaced with "Underscore" to make it bids compatiable. You should avoid use "_" in session ID for BIDS purpose')
|
|
526
|
-
|
|
527
|
-
# Hyphen will mess up bids output
|
|
528
|
-
if '-' in sess_id:
|
|
529
|
-
sess_id = sess_id.replace('-', 'Hyphen')
|
|
530
|
-
# warn user that the subject/participantID has a '-' and is replaced with 'Hyphen'
|
|
531
|
-
warnings.warn('Session ID has "-"s, replaced with "Hyphen" to make it bids compatiable. You should avoid use "-" in session ID for BIDS purpose')
|
|
532
|
-
|
|
533
|
-
return sess_id
|
|
534
|
-
|
|
535
|
-
|
|
536
|
-
def assignDataType (method):
|
|
537
|
-
"""To assign the dataType based on method.
|
|
538
|
-
Args:
|
|
539
|
-
method (str): the method from BrukerLoader.get_method.parameters['Method'].
|
|
540
|
-
Returns:
|
|
541
|
-
str: the datatype.
|
|
542
|
-
"""
|
|
543
|
-
if re.search('epi', method, re.IGNORECASE) and not re.search('dti', method, re.IGNORECASE):
|
|
544
|
-
#Why epi is function here? there should at lease a comment.
|
|
545
|
-
datatype = 'func'
|
|
546
|
-
elif re.search('dti', method, re.IGNORECASE):
|
|
547
|
-
datatype = 'dwi'
|
|
548
|
-
elif re.search('flash', method, re.IGNORECASE) or re.search('rare', method, re.IGNORECASE):
|
|
549
|
-
datatype = 'anat'
|
|
550
|
-
elif re.search('fieldmap', method, re.IGNORECASE):
|
|
551
|
-
datatype = 'fmap'
|
|
552
|
-
elif re.search('MSME', method, re.IGNORECASE):
|
|
553
|
-
datatype = 'anat'
|
|
554
|
-
|
|
555
|
-
# warn user for MSME default to anat and MESE
|
|
556
|
-
import warnings
|
|
557
|
-
msg = "MSME found in your scan, default to anat DataType and MESE modality, " + \
|
|
558
|
-
"please update the datasheet to indicate the proper DataType if different than default."
|
|
559
|
-
warnings.warn(msg)
|
|
560
|
-
|
|
561
|
-
else:
|
|
562
|
-
# what is this? seems like holding files not able to identify
|
|
563
|
-
datatype = 'etc'
|
|
564
|
-
|
|
565
|
-
# warn user to manually update the DataType in datasheet
|
|
566
|
-
import warnings
|
|
567
|
-
|
|
568
|
-
msg = "\n \n ----- Important ----- \
|
|
569
|
-
\n We do not know how to classify some of your scan and marked them as etc.\
|
|
570
|
-
\n To produce valid BIDS outputs, please update the datasheet to indicate the proper DataType for them \n"
|
|
571
|
-
warnings.warn(msg)
|
|
572
|
-
|
|
573
|
-
return datatype
|
|
574
|
-
|
|
575
|
-
|
|
576
|
-
def generateModalityAgnosticFiles(root_path, json_fname):
|
|
577
|
-
"""To create ModalityAgnosticFiles in output folder.
|
|
578
|
-
Args:
|
|
579
|
-
root_path (str): the root output folder
|
|
580
|
-
json_fname (str): I do not under why this variable is needed.
|
|
581
|
-
Returns:
|
|
582
|
-
nothing: just generate files.
|
|
583
|
-
"""
|
|
584
|
-
data_des = 'dataset_description.json'
|
|
585
|
-
readme = 'README'
|
|
586
|
-
# why open use only the current folder and os.path not?
|
|
587
|
-
if not os.path.exists(data_des):
|
|
588
|
-
with open(os.path.join(root_path, 'dataset_description.json'), 'w') as f:
|
|
589
|
-
import json
|
|
590
|
-
import datetime
|
|
591
|
-
from ..lib.reference import DATASET_DESC_REF
|
|
592
|
-
json.dump(DATASET_DESC_REF, f, indent=4)
|
|
593
|
-
if not os.path.exists(readme):
|
|
594
|
-
with open(os.path.join(root_path, readme), 'w') as f:
|
|
595
|
-
# I do not know why json_fname here.
|
|
596
|
-
f.write('This dataset has been converted using BrkRaw (v{})'
|
|
597
|
-
'at {}.\n'.format(json_fname, datetime.datetime.now()))
|
|
598
|
-
f.write('## How to cite?\n - https://doi.org/10.5281/zenodo.3818615\n')
|
|
599
|
-
|
|
600
|
-
# https://bids-specification.readthedocs.io/en/stable/03-modality-agnostic-files.html
|
|
601
|
-
# participant.tsv file. if not exist, create it, and append. if need tab use \t
|
|
602
|
-
participantsTsvPath = os.path.join(root_path, 'participants.tsv')
|
|
603
|
-
if not os.path.exists(participantsTsvPath):
|
|
604
|
-
with open(participantsTsvPath, 'a+') as f:
|
|
605
|
-
f.write('participant_id\n')
|
|
606
|
-
else:
|
|
607
|
-
print('Exiting before convert..., participants.tsv already exist in output folder: ', participantsTsvPath)
|
|
608
|
-
sys.exit()
|
|
609
|
-
|
|
610
|
-
# participant.json file. if not exist, create it, and append. if need tab use \t
|
|
611
|
-
participantsJsonPath = os.path.join(root_path, 'participants.json')
|
|
612
|
-
if not os.path.exists(participantsJsonPath):
|
|
613
|
-
with open(participantsJsonPath, 'a+') as f:
|
|
614
|
-
sideCar = {
|
|
615
|
-
"participant_id": {
|
|
616
|
-
"Description": "Participant identifier"
|
|
617
|
-
}
|
|
618
|
-
}
|
|
619
|
-
json.dump(sideCar, f, indent=4)
|
|
620
|
-
else:
|
|
621
|
-
print('Exiting...before convert, participants.json already exist in output folder: ', participantsJsonPath)
|
|
622
|
-
sys.exit()
|
|
623
|
-
|
|
624
|
-
|
|
625
|
-
|
|
626
|
-
def createFolderTree(include_session, row, root_path, subj_code):
|
|
627
|
-
"""To create participant (and session if include_session) folder.
|
|
628
|
-
Args:
|
|
629
|
-
include_session (bool): include_session.
|
|
630
|
-
row (obj): a (panadas) row of data containing SessID and DataType.
|
|
631
|
-
root_path (str): the root path of output folder
|
|
632
|
-
subj_code (str): subject or participant folder name
|
|
633
|
-
Returns:
|
|
634
|
-
list: first 0 element is dtype_path, second 1 is fname.
|
|
635
|
-
"""
|
|
636
|
-
if include_session:
|
|
637
|
-
# If session included, make session dir
|
|
638
|
-
sess_code = 'ses-{}'.format(row.SessID)
|
|
639
|
-
subj_path = os.path.join(root_path, subj_code)
|
|
640
|
-
mkdir(subj_path)
|
|
641
|
-
subj_path = os.path.join(subj_path, sess_code)
|
|
642
|
-
mkdir(subj_path)
|
|
643
|
-
# add session info to filename as well
|
|
644
|
-
fname = '{}_{}'.format(subj_code, sess_code)
|
|
645
|
-
else:
|
|
646
|
-
subj_path = os.path.join(root_path, subj_code)
|
|
647
|
-
mkdir(subj_path)
|
|
648
|
-
fname = '{}'.format(subj_code)
|
|
649
|
-
|
|
650
|
-
datatype = row.DataType
|
|
651
|
-
dtype_path = os.path.join(subj_path, datatype)
|
|
652
|
-
mkdir(dtype_path)
|
|
653
|
-
|
|
654
|
-
return [dtype_path, fname]
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
def completeFieldsCreateFolders (df, filtered_dset, dset, multi_session, root_path, subj_code):
|
|
658
|
-
"""To complete the dataframe fields and create output folders. [too many parameters]
|
|
659
|
-
Args:
|
|
660
|
-
df (dataframe): original pandas dataframe, not sure whether it can replaced by filtered_dset (someone has to figure it out)
|
|
661
|
-
filtered_dset (dataframe): filtered pandas dataframe
|
|
662
|
-
dset (object): BrukerLoader(dpath) object
|
|
663
|
-
multi_session (bool): multi_session.
|
|
664
|
-
root_path (str): the root path of output folder
|
|
665
|
-
subj_code (str): subject or participant folder name
|
|
666
|
-
Returns:
|
|
667
|
-
dataframe: the completed filtered_dset.
|
|
668
|
-
"""
|
|
669
|
-
import pandas as pd
|
|
670
|
-
from ..lib.utils import bids_validation
|
|
671
|
-
|
|
672
|
-
# iterrows to create folder tree, add to filtered_dset fname, dtype_path, and modality
|
|
673
|
-
for i, row in filtered_dset.iterrows():
|
|
674
|
-
dtype_path, fname = createFolderTree(multi_session, row, root_path, subj_code)
|
|
675
|
-
if pd.notnull(row.task):
|
|
676
|
-
if bids_validation(df, i, 'task', row.task, 10):
|
|
677
|
-
fname = '{}_task-{}'.format(fname, row.task)
|
|
678
|
-
if pd.notnull(row.acq):
|
|
679
|
-
if bids_validation(df, i, 'acq', row.acq, 10):
|
|
680
|
-
fname = '{}_acq-{}'.format(fname, row.acq)
|
|
681
|
-
if pd.notnull(row.ce):
|
|
682
|
-
if bids_validation(df, i, 'ce', row.ce, 5):
|
|
683
|
-
fname = '{}_ce-{}'.format(fname, row.ce)
|
|
684
|
-
if pd.notnull(row.dir):
|
|
685
|
-
if bids_validation(df, i, 'dir', row.dir, 2):
|
|
686
|
-
fname = '{}_dir-{}'.format(fname, row.dir)
|
|
687
|
-
if pd.notnull(row.rec):
|
|
688
|
-
if bids_validation(df, i, 'rec', row.rec, 2):
|
|
689
|
-
fname = '{}_rec-{}'.format(fname, row.rec)
|
|
690
|
-
filtered_dset.loc[i, 'FileName'] = fname
|
|
691
|
-
filtered_dset.loc[i, 'Dir'] = dtype_path
|
|
692
|
-
if pd.isnull(row.modality):
|
|
693
|
-
method = dset.get_method(row.ScanID).parameters['Method']
|
|
694
|
-
if row.DataType == 'anat':
|
|
695
|
-
if re.search('flash', method, re.IGNORECASE):
|
|
696
|
-
modality = 'FLASH'
|
|
697
|
-
elif re.search('rare', method, re.IGNORECASE):
|
|
698
|
-
modality = 'T2w'
|
|
699
|
-
else:
|
|
700
|
-
modality = '{}'.format(method.split(':')[-1])
|
|
701
|
-
else:
|
|
702
|
-
modality = '{}'.format(method.split(':')[-1])
|
|
703
|
-
filtered_dset.loc[i, 'modality'] = modality
|
|
704
|
-
else:
|
|
705
|
-
bids_validation(df, i, 'modality', row.modality, 10, dtype=str)
|
|
706
|
-
|
|
707
|
-
return filtered_dset
|
|
708
|
-
|
|
709
|
-
|
|
710
|
-
def is_localizer(pvobj, scan_id, reco_id):
|
|
711
|
-
visu_pars = pvobj.get_visu_pars(scan_id, reco_id)
|
|
712
|
-
if 'VisuAcquisitionProtocol' in visu_pars.parameters:
|
|
713
|
-
ac_proc = visu_pars.parameters['VisuAcquisitionProtocol']
|
|
714
|
-
if re.search('tripilot', ac_proc, re.IGNORECASE) or re.search('localizer', ac_proc, re.IGNORECASE):
|
|
715
|
-
return True
|
|
716
|
-
else:
|
|
717
|
-
return False
|
|
718
|
-
else:
|
|
719
|
-
return False
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
def override_header(pvobj, subjtype, position):
|
|
723
|
-
"""override subject position and subject type"""
|
|
724
|
-
import warnings
|
|
725
|
-
if position != None:
|
|
726
|
-
try:
|
|
727
|
-
pvobj.override_position(position)
|
|
728
|
-
except:
|
|
729
|
-
msg = "Unknown position string [{}]. Please check your input option.".format(position) + \
|
|
730
|
-
"The position variable can be defiend as <BodyPart>_<Side>," + \
|
|
731
|
-
"available BodyParts are (Head, Foot, Tail) and sides are (Supine, Prone, Left, Right). (e.g. Head_Supine)"
|
|
732
|
-
raise InvalidApproach(msg)
|
|
733
|
-
if subjtype != None:
|
|
734
|
-
try:
|
|
735
|
-
pvobj.override_subjtype(subjtype)
|
|
736
|
-
except:
|
|
737
|
-
msg = "Unknown subject type [{}]. Please check your input option.".format(subjtype) + \
|
|
738
|
-
"available options are (Biped, Quadruped, Phantom, Other, OtherAnimal)"
|
|
739
|
-
raise InvalidApproach(msg)
|
|
740
|
-
return pvobj
|
|
741
|
-
|
|
742
|
-
|
|
743
|
-
if __name__ == '__main__':
|
|
744
|
-
main()
|