xnatqa 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- xnatqa-0.0.1/LICENSE +0 -0
- xnatqa-0.0.1/PKG-INFO +28 -0
- xnatqa-0.0.1/README.md +9 -0
- xnatqa-0.0.1/pyproject.toml +29 -0
- xnatqa-0.0.1/setup.cfg +4 -0
- xnatqa-0.0.1/xnatqa/__init__.py +21 -0
- xnatqa-0.0.1/xnatqa/launch/__init__.py +36 -0
- xnatqa-0.0.1/xnatqa/tag/__init__.py +198 -0
- xnatqa-0.0.1/xnatqa.egg-info/PKG-INFO +28 -0
- xnatqa-0.0.1/xnatqa.egg-info/SOURCES.txt +11 -0
- xnatqa-0.0.1/xnatqa.egg-info/dependency_links.txt +1 -0
- xnatqa-0.0.1/xnatqa.egg-info/requires.txt +4 -0
- xnatqa-0.0.1/xnatqa.egg-info/top_level.txt +1 -0
xnatqa-0.0.1/LICENSE
ADDED
|
File without changes
|
xnatqa-0.0.1/PKG-INFO
ADDED
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: xnatqa
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: A workflow for automatically labeling and running QA on MRI scans within XNAT
|
|
5
|
+
Author-email: Kyle Kurkela <kkurkela@bu.edu>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/kakurk/auto_labeler
|
|
8
|
+
Project-URL: Issues, https://github.com/kakurk/auto_labeler/issues
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.9
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Dist: xnattager
|
|
15
|
+
Requires-Dist: yaxil
|
|
16
|
+
Requires-Dist: yaml
|
|
17
|
+
Requires-Dist: glob
|
|
18
|
+
Dynamic: license-file
|
|
19
|
+
|
|
20
|
+
# xnatqa
|
|
21
|
+
|
|
22
|
+
A workflow built on top of the BOLDQC, ANATQC, yaxil, and xnattager packages from Harvard.
|
|
23
|
+
|
|
24
|
+
The goal of this workflow is to automatically tag all scans in a newly created scanning session within an XNAT instance as BOLD or ANAT and then automatically launching the BOLDQC and ANATQC routines for the respective scan types.
|
|
25
|
+
|
|
26
|
+
Please see BOLDQC, ANATQC, yaxil, and xnattager for more information.
|
|
27
|
+
|
|
28
|
+
BOLDQC and ANATQC are implemented as singularity containers that are housed as modules on the SCC. Recipies for building those containers can be found here and here.
|
xnatqa-0.0.1/README.md
ADDED
|
@@ -0,0 +1,9 @@
|
|
|
1
|
+
# xnatqa
|
|
2
|
+
|
|
3
|
+
A workflow built on top of the BOLDQC, ANATQC, yaxil, and xnattager packages from Harvard.
|
|
4
|
+
|
|
5
|
+
The goal of this workflow is to automatically tag all scans in a newly created scanning session within an XNAT instance as BOLD or ANAT and then automatically launching the BOLDQC and ANATQC routines for the respective scan types.
|
|
6
|
+
|
|
7
|
+
Please see BOLDQC, ANATQC, yaxil, and xnattager for more information.
|
|
8
|
+
|
|
9
|
+
BOLDQC and ANATQC are implemented as singularity containers that are housed as modules on the SCC. Recipies for building those containers can be found here and here.
|
|
@@ -0,0 +1,29 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "xnatqa"
|
|
3
|
+
version = "0.0.1"
|
|
4
|
+
authors = [
|
|
5
|
+
{ name="Kyle Kurkela", email="kkurkela@bu.edu" },
|
|
6
|
+
]
|
|
7
|
+
description = "A workflow for automatically labeling and running QA on MRI scans within XNAT"
|
|
8
|
+
readme = "README.md"
|
|
9
|
+
requires-python = ">=3.9"
|
|
10
|
+
classifiers = [
|
|
11
|
+
"Programming Language :: Python :: 3",
|
|
12
|
+
"Operating System :: OS Independent",
|
|
13
|
+
]
|
|
14
|
+
license = "MIT"
|
|
15
|
+
license-files = ["LICEN[CS]E*"]
|
|
16
|
+
dependencies = [
|
|
17
|
+
"xnattager",
|
|
18
|
+
"yaxil",
|
|
19
|
+
"yaml",
|
|
20
|
+
"glob"
|
|
21
|
+
]
|
|
22
|
+
|
|
23
|
+
[project.urls]
|
|
24
|
+
Homepage = "https://github.com/kakurk/auto_labeler"
|
|
25
|
+
Issues = "https://github.com/kakurk/auto_labeler/issues"
|
|
26
|
+
|
|
27
|
+
[build-system]
|
|
28
|
+
requires = ["setuptools >= 77.0.3"]
|
|
29
|
+
build-backend = "setuptools.build_meta"
|
xnatqa-0.0.1/setup.cfg
ADDED
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
def get_files_with_extension(directory, extension):
|
|
5
|
+
files = []
|
|
6
|
+
for filename in os.listdir(directory):
|
|
7
|
+
if os.path.isfile(os.path.join(directory, filename)) and filename.endswith(extension):
|
|
8
|
+
files.append(filename)
|
|
9
|
+
return files
|
|
10
|
+
|
|
11
|
+
def read_json_file(file_path):
|
|
12
|
+
try:
|
|
13
|
+
with open(file_path, 'r') as file:
|
|
14
|
+
data = json.load(file)
|
|
15
|
+
return data
|
|
16
|
+
except FileNotFoundError:
|
|
17
|
+
print(f"Error: File not found: {file_path}")
|
|
18
|
+
return None
|
|
19
|
+
except json.JSONDecodeError:
|
|
20
|
+
print(f"Error: Invalid JSON format in file: {file_path}")
|
|
21
|
+
return None
|
|
@@ -0,0 +1,36 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import yaxil
|
|
3
|
+
|
|
4
|
+
def launch(MRsession):
|
|
5
|
+
# So, at this point, everything has been labeled for this session.
|
|
6
|
+
# We now need to:
|
|
7
|
+
|
|
8
|
+
# Identify all of the tagged scans in this sessions
|
|
9
|
+
# For each tagged scan, launch the appropriate QA routine
|
|
10
|
+
|
|
11
|
+
# authenticate with xnat using the ~/.xnat_auth file created earlier in the workflow
|
|
12
|
+
auth = yaxil.auth(alias = 'xnat')
|
|
13
|
+
|
|
14
|
+
# open and automatically close a connection to XNAT using the auth
|
|
15
|
+
with yaxil.session(auth) as sess:
|
|
16
|
+
# keep track of the number of BOLD (b) and ANAT (a) scans idenfified
|
|
17
|
+
b = 0
|
|
18
|
+
a = 0
|
|
19
|
+
|
|
20
|
+
# for each scan in this session...
|
|
21
|
+
for scan in sess.scans(label=MRsession):
|
|
22
|
+
|
|
23
|
+
# this scan's note
|
|
24
|
+
note = scan['note']
|
|
25
|
+
|
|
26
|
+
# if that note has a "#BOLD" tag...
|
|
27
|
+
if '#BOLD' in note:
|
|
28
|
+
print('Run BOLDQC')
|
|
29
|
+
os.system(f'qsub -P drkrcs boldqc.qsub {MRsession} {b}')
|
|
30
|
+
b+=1
|
|
31
|
+
|
|
32
|
+
# if that note has a "#T1w" tag...
|
|
33
|
+
if '#T1w' in note:
|
|
34
|
+
print('Run ANATQC')
|
|
35
|
+
os.system(f'qsub -P drkrcs anatqc.qsub {MRsession} {a}')
|
|
36
|
+
a+=1
|
|
@@ -0,0 +1,198 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import yaml
|
|
3
|
+
from glob import glob
|
|
4
|
+
import re
|
|
5
|
+
import xnatqa
|
|
6
|
+
|
|
7
|
+
def extract_bids_suffix(bids_string):
|
|
8
|
+
# Extract the BIDS suffix from a BIDS formatted string
|
|
9
|
+
search_pattern = "([^_]+)$"
|
|
10
|
+
match = re.search(search_pattern, bids_string)
|
|
11
|
+
bids_suffix = match.group(1)
|
|
12
|
+
return bids_suffix
|
|
13
|
+
|
|
14
|
+
def generate_tagger_config(dicom_dir):
|
|
15
|
+
|
|
16
|
+
# This calls the latest version of dcm2niix WITHOUT creating the *.nii.gz files, only the BIDS metadata *.json sidecars.
|
|
17
|
+
# dcm2niix automatically skips scans that it has identified as being non-BIDS (i.e., localizers, scouts)
|
|
18
|
+
# dcm2niix will also include a field called "BidsGuess" containing an "educated guess" as to what the BIDS label of this scan should be.
|
|
19
|
+
# This seems to work well most of the time, with the odd hicups. I include manual code here to catch the "hicups".
|
|
20
|
+
|
|
21
|
+
# call to dcm2niix. generates a bunch of *.json text files in the current working directory.
|
|
22
|
+
os.system(f"dcm2niix -s y -a y -b o -o $PWD -f 'output_%s_%d' -w 0 -m 1 -i y {dicom_dir} &>>log.txt")
|
|
23
|
+
|
|
24
|
+
# idenfity all of these text files
|
|
25
|
+
jsonFiles = glob('output*.json')
|
|
26
|
+
|
|
27
|
+
# sort the found files so that they are in decensing order by series_number
|
|
28
|
+
# this is probably unnecssary
|
|
29
|
+
jsonFiles.sort(key=lambda f: int(re.search('(?<=output_)\d+(?=_)', f).group()))
|
|
30
|
+
|
|
31
|
+
# initialize a dictionary to hold xnattager data
|
|
32
|
+
tagger_data = dict(t1 = [], t1_move = [], t2 = [], t2_move = [], bold = [])
|
|
33
|
+
|
|
34
|
+
# looping over the json sidecar files...
|
|
35
|
+
for f in jsonFiles:
|
|
36
|
+
|
|
37
|
+
# read in the json sidecar
|
|
38
|
+
json_data = xnatqa.read_json_file(f)
|
|
39
|
+
|
|
40
|
+
# pull out some useful meta-data that should be contained within every file
|
|
41
|
+
series_description = json_data['SeriesDescription']
|
|
42
|
+
series_number = json_data['SeriesNumber']
|
|
43
|
+
image_type = json_data['ImageType']
|
|
44
|
+
# remove the last element of the image_type list
|
|
45
|
+
del image_type[-1]
|
|
46
|
+
bids_guess = json_data['BidsGuess']
|
|
47
|
+
|
|
48
|
+
# if there is a BidsGuess field...
|
|
49
|
+
if 'BidsGuess' in json_data.keys():
|
|
50
|
+
|
|
51
|
+
# if that guess was "func"
|
|
52
|
+
if bids_guess[0] == 'func':
|
|
53
|
+
|
|
54
|
+
bids_string = bids_guess[1]
|
|
55
|
+
bids_suffix = extract_bids_suffix(bids_string)
|
|
56
|
+
|
|
57
|
+
# if the suffix is "bold"...
|
|
58
|
+
if bids_suffix == 'bold':
|
|
59
|
+
|
|
60
|
+
# there are a couple of common problems with the BidsGuess feature. One is that it does NOT properly identify "SBREF" scans. Make sure this is not an "SBREF" scan. Here I search for the keyword "SBREF" to be somewhere within the study description name.
|
|
61
|
+
sbref_keywords = ['sbref']
|
|
62
|
+
|
|
63
|
+
if any(kw in series_description.lower() for kw in sbref_keywords):
|
|
64
|
+
|
|
65
|
+
print()
|
|
66
|
+
print(f'Series Number: {series_number}')
|
|
67
|
+
print(f'Series Description: {series_description}')
|
|
68
|
+
print(f"Bids Guess: {json_data['BidsGuess']}")
|
|
69
|
+
print('Is an SBREF scan. Ignoring...')
|
|
70
|
+
print()
|
|
71
|
+
continue
|
|
72
|
+
|
|
73
|
+
# double check that it is, in fact, a BOLD image
|
|
74
|
+
# this will be a rudimentary check, attempting to catch glaring errors by looking for the presence of keywords in the series description
|
|
75
|
+
exclude_keywords = ['t1', 'anat', 'mprage', 'memprage']
|
|
76
|
+
if any(kw in series_description.lower() for kw in exclude_keywords):
|
|
77
|
+
|
|
78
|
+
print()
|
|
79
|
+
print(f'Series Number: {series_number}')
|
|
80
|
+
print(f'Series Description: {series_description}')
|
|
81
|
+
print(f"Bids Guess: {bids_guess}")
|
|
82
|
+
print('Relableing to T1...')
|
|
83
|
+
print()
|
|
84
|
+
|
|
85
|
+
tagger_data['t1'].append({'series_description': series_description, 'image_type': image_type, 'tag': '#T1w'})
|
|
86
|
+
continue
|
|
87
|
+
|
|
88
|
+
exclude_keywords = ['t2']
|
|
89
|
+
if any(kw in series_description.lower() for kw in exclude_keywords):
|
|
90
|
+
|
|
91
|
+
print()
|
|
92
|
+
print(f'Series Number: {series_number}')
|
|
93
|
+
print(f'Series Description: {series_description}')
|
|
94
|
+
print(f'Bids Guess: {bids_guess}')
|
|
95
|
+
print('Relableing to T2...')
|
|
96
|
+
print()
|
|
97
|
+
|
|
98
|
+
tagger_data['t2'].append({'series_description': series_description, 'image_type': image_type, 'tag': '#T2w'})
|
|
99
|
+
continue
|
|
100
|
+
|
|
101
|
+
print()
|
|
102
|
+
print(f'Series Number: {series_number}')
|
|
103
|
+
print(f'Series Description: {series_description}')
|
|
104
|
+
print(f'Bids Guess: {bids_guess}')
|
|
105
|
+
print('Labeling as BOLD...')
|
|
106
|
+
print()
|
|
107
|
+
|
|
108
|
+
tagger_data['bold'].append({'series_description': series_description, 'image_type': image_type, 'tag': '#BOLD'})
|
|
109
|
+
continue
|
|
110
|
+
|
|
111
|
+
# if the BidsGuess was "anat"...
|
|
112
|
+
if bids_guess[0] == 'anat':
|
|
113
|
+
|
|
114
|
+
bids_string = bids_guess[1]
|
|
115
|
+
bids_suffix = extract_bids_suffix(bids_string)
|
|
116
|
+
|
|
117
|
+
# if the BIDS suffix was T1w...
|
|
118
|
+
if bids_suffix == 'T1w':
|
|
119
|
+
|
|
120
|
+
slice_thickness = json_data['SliceThickness']
|
|
121
|
+
NonlinearGradientCorrection = json_data['NonlinearGradientCorrection']
|
|
122
|
+
|
|
123
|
+
if slice_thickness == 8:
|
|
124
|
+
|
|
125
|
+
# this is a T1w VNAV setter scan.
|
|
126
|
+
|
|
127
|
+
print()
|
|
128
|
+
print(f'Series Number: {series_number}')
|
|
129
|
+
print(f'Series Description: {series_description}')
|
|
130
|
+
print(f"Bids Guess: {bids_guess}")
|
|
131
|
+
print('Labeling as T1w_move...')
|
|
132
|
+
print()
|
|
133
|
+
|
|
134
|
+
tagger_data['t1_move'].append({'series_description': series_description, 'image_type': image_type, 'tag': '#T1w_MOVE'})
|
|
135
|
+
continue
|
|
136
|
+
|
|
137
|
+
elif not NonlinearGradientCorrection and 'NumberOfAverages' in json_data:
|
|
138
|
+
|
|
139
|
+
# this is a T1w scan
|
|
140
|
+
|
|
141
|
+
print()
|
|
142
|
+
print(f'Series Number: {series_number}')
|
|
143
|
+
print(f'Series Description: {series_description}')
|
|
144
|
+
print(f"Bids Guess: {bids_guess}")
|
|
145
|
+
print('Labeling as T1w...')
|
|
146
|
+
print()
|
|
147
|
+
|
|
148
|
+
tagger_data['t1'].append({'series_description': series_description, 'image_type': image_type, 'tag': '#T1w'})
|
|
149
|
+
continue
|
|
150
|
+
|
|
151
|
+
elif bids_suffix == 'T2w':
|
|
152
|
+
|
|
153
|
+
if json_data['SliceThickness'] == 8:
|
|
154
|
+
|
|
155
|
+
# this is a T2w VNAV setter scan
|
|
156
|
+
print()
|
|
157
|
+
print(f'Series Number: {series_number}')
|
|
158
|
+
print(f'Series Description: {series_description}')
|
|
159
|
+
print(f"Bids Guess: {bids_guess}")
|
|
160
|
+
print('Labeling as T2w_move...')
|
|
161
|
+
print()
|
|
162
|
+
|
|
163
|
+
tagger_data['t2_move'].append({'series_description': series_description, 'image_type': image_type, 'tag': '#T2w_move'})
|
|
164
|
+
continue
|
|
165
|
+
|
|
166
|
+
else:
|
|
167
|
+
|
|
168
|
+
# this is a T2w scan
|
|
169
|
+
print()
|
|
170
|
+
print(f'Series Number: {series_number}')
|
|
171
|
+
print(f'Series Description: {series_description}')
|
|
172
|
+
print(f"Bids Guess: {bids_guess}")
|
|
173
|
+
print('Labeling as T2w...')
|
|
174
|
+
print()
|
|
175
|
+
|
|
176
|
+
tagger_data['t2'].append({'series_description': series_description, 'image_type': image_type, 'tag': '#T2w'})
|
|
177
|
+
continue
|
|
178
|
+
|
|
179
|
+
# write tagger data to a yaml file. used by the xnattagger package for uploading tags to XNAT. See github.com/harvard-nrg/xnattager
|
|
180
|
+
with open('tagger.yaml', 'a') as file:
|
|
181
|
+
yaml.dump(tagger_data, file)
|
|
182
|
+
|
|
183
|
+
def update_xnat_tags(MRsession):
|
|
184
|
+
|
|
185
|
+
# make sure an xnat authentication files has already been created. See YXAIL documentation.
|
|
186
|
+
assert os.path.exists(os.path.expanduser('~/.xnat_auth')), 'xnat authentication needs to be run'
|
|
187
|
+
|
|
188
|
+
# run the command
|
|
189
|
+
os.system(f'xnat_tagger.py --label {MRsession} --target-modality all --xnat-alias xnat --config tagger.yaml')
|
|
190
|
+
|
|
191
|
+
def tag_scans(dicom_dir, MRsession):
|
|
192
|
+
|
|
193
|
+
# generate the xnattag config file
|
|
194
|
+
generate_tagger_config(dicom_dir)
|
|
195
|
+
|
|
196
|
+
# update the xnat tags
|
|
197
|
+
update_xnat_tags(MRsession)
|
|
198
|
+
|
|
@@ -0,0 +1,28 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: xnatqa
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: A workflow for automatically labeling and running QA on MRI scans within XNAT
|
|
5
|
+
Author-email: Kyle Kurkela <kkurkela@bu.edu>
|
|
6
|
+
License-Expression: MIT
|
|
7
|
+
Project-URL: Homepage, https://github.com/kakurk/auto_labeler
|
|
8
|
+
Project-URL: Issues, https://github.com/kakurk/auto_labeler/issues
|
|
9
|
+
Classifier: Programming Language :: Python :: 3
|
|
10
|
+
Classifier: Operating System :: OS Independent
|
|
11
|
+
Requires-Python: >=3.9
|
|
12
|
+
Description-Content-Type: text/markdown
|
|
13
|
+
License-File: LICENSE
|
|
14
|
+
Requires-Dist: xnattager
|
|
15
|
+
Requires-Dist: yaxil
|
|
16
|
+
Requires-Dist: yaml
|
|
17
|
+
Requires-Dist: glob
|
|
18
|
+
Dynamic: license-file
|
|
19
|
+
|
|
20
|
+
# xnatqa
|
|
21
|
+
|
|
22
|
+
A workflow built on top of the BOLDQC, ANATQC, yaxil, and xnattager packages from Harvard.
|
|
23
|
+
|
|
24
|
+
The goal of this workflow is to automatically tag all scans in a newly created scanning session within an XNAT instance as BOLD or ANAT and then automatically launching the BOLDQC and ANATQC routines for the respective scan types.
|
|
25
|
+
|
|
26
|
+
Please see BOLDQC, ANATQC, yaxil, and xnattager for more information.
|
|
27
|
+
|
|
28
|
+
BOLDQC and ANATQC are implemented as singularity containers that are housed as modules on the SCC. Recipies for building those containers can be found here and here.
|
|
@@ -0,0 +1,11 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
xnatqa/__init__.py
|
|
5
|
+
xnatqa.egg-info/PKG-INFO
|
|
6
|
+
xnatqa.egg-info/SOURCES.txt
|
|
7
|
+
xnatqa.egg-info/dependency_links.txt
|
|
8
|
+
xnatqa.egg-info/requires.txt
|
|
9
|
+
xnatqa.egg-info/top_level.txt
|
|
10
|
+
xnatqa/launch/__init__.py
|
|
11
|
+
xnatqa/tag/__init__.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
xnatqa
|