dbdicom 0.2.6__py3-none-any.whl → 0.3.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of dbdicom might be problematic. Click here for more details.
- dbdicom/__init__.py +1 -28
- dbdicom/api.py +287 -0
- dbdicom/const.py +144 -0
- dbdicom/dataset.py +721 -0
- dbdicom/dbd.py +736 -0
- dbdicom/external/__pycache__/__init__.cpython-311.pyc +0 -0
- dbdicom/external/dcm4che/__pycache__/__init__.cpython-311.pyc +0 -0
- dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-311.pyc +0 -0
- dbdicom/register.py +527 -0
- dbdicom/{ds/types → sop_classes}/ct_image.py +2 -16
- dbdicom/{ds/types → sop_classes}/enhanced_mr_image.py +153 -26
- dbdicom/{ds/types → sop_classes}/mr_image.py +185 -140
- dbdicom/sop_classes/parametric_map.py +310 -0
- dbdicom/sop_classes/secondary_capture.py +140 -0
- dbdicom/sop_classes/segmentation.py +311 -0
- dbdicom/{ds/types → sop_classes}/ultrasound_multiframe_image.py +1 -15
- dbdicom/{ds/types → sop_classes}/xray_angiographic_image.py +2 -17
- dbdicom/utils/arrays.py +36 -0
- dbdicom/utils/files.py +0 -20
- dbdicom/utils/image.py +10 -629
- dbdicom-0.3.1.dist-info/METADATA +28 -0
- dbdicom-0.3.1.dist-info/RECORD +53 -0
- dbdicom/create.py +0 -457
- dbdicom/dro.py +0 -174
- dbdicom/ds/__init__.py +0 -10
- dbdicom/ds/create.py +0 -63
- dbdicom/ds/dataset.py +0 -869
- dbdicom/ds/dictionaries.py +0 -620
- dbdicom/ds/types/parametric_map.py +0 -226
- dbdicom/extensions/__init__.py +0 -9
- dbdicom/extensions/dipy.py +0 -448
- dbdicom/extensions/elastix.py +0 -503
- dbdicom/extensions/matplotlib.py +0 -107
- dbdicom/extensions/numpy.py +0 -271
- dbdicom/extensions/scipy.py +0 -1512
- dbdicom/extensions/skimage.py +0 -1030
- dbdicom/extensions/sklearn.py +0 -243
- dbdicom/extensions/vreg.py +0 -1390
- dbdicom/manager.py +0 -2132
- dbdicom/message.py +0 -119
- dbdicom/pipelines.py +0 -66
- dbdicom/record.py +0 -1893
- dbdicom/types/database.py +0 -107
- dbdicom/types/instance.py +0 -231
- dbdicom/types/patient.py +0 -40
- dbdicom/types/series.py +0 -2874
- dbdicom/types/study.py +0 -58
- dbdicom-0.2.6.dist-info/METADATA +0 -72
- dbdicom-0.2.6.dist-info/RECORD +0 -66
- {dbdicom-0.2.6.dist-info → dbdicom-0.3.1.dist-info}/WHEEL +0 -0
- {dbdicom-0.2.6.dist-info → dbdicom-0.3.1.dist-info}/licenses/LICENSE +0 -0
- {dbdicom-0.2.6.dist-info → dbdicom-0.3.1.dist-info}/top_level.txt +0 -0
dbdicom/manager.py
DELETED
|
@@ -1,2132 +0,0 @@
|
|
|
1
|
-
"""
|
|
2
|
-
Maintains an index of all files on disk.
|
|
3
|
-
"""
|
|
4
|
-
|
|
5
|
-
import os
|
|
6
|
-
import copy
|
|
7
|
-
import timeit
|
|
8
|
-
#from tkinter import N
|
|
9
|
-
import pandas as pd
|
|
10
|
-
import numpy as np
|
|
11
|
-
import nibabel as nib
|
|
12
|
-
|
|
13
|
-
from dbdicom.message import StatusBar, Dialog
|
|
14
|
-
import dbdicom.utils.files as filetools
|
|
15
|
-
import dbdicom.utils.dcm4che as dcm4che
|
|
16
|
-
import dbdicom.utils.image as dbimage
|
|
17
|
-
import dbdicom.ds.dataset as dbdataset
|
|
18
|
-
from dbdicom.ds.create import read_dataset, SOPClass, new_dataset
|
|
19
|
-
from dbdicom.ds.dataset import DbDataset
|
|
20
|
-
|
|
21
|
-
class DatabaseCorrupted(Exception):
|
|
22
|
-
pass
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
|
|
26
|
-
class Manager():
|
|
27
|
-
"""Programming interface for reading and writing a DICOM folder."""
|
|
28
|
-
|
|
29
|
-
# TODO: Add AccessionNumber so studies can be sorted correctly without reading the files
|
|
30
|
-
# Note this makes all existing pkl files unusable - ensure backwards compatibility.
|
|
31
|
-
|
|
32
|
-
# The column labels of the register
|
|
33
|
-
columns = [
|
|
34
|
-
'PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID', 'SOPClassUID',
|
|
35
|
-
'PatientName', 'StudyDescription', 'StudyDate', 'SeriesDescription', 'SeriesNumber', 'InstanceNumber',
|
|
36
|
-
'ImageOrientationPatient', 'ImagePositionPatient', 'PixelSpacing', 'SliceThickness', 'SliceLocation', 'AcquisitionTime',
|
|
37
|
-
]
|
|
38
|
-
|
|
39
|
-
# Non-UID subset of column labels with their respective indices
|
|
40
|
-
# These are non-critical and can be set manually by users
|
|
41
|
-
_descriptives = {
|
|
42
|
-
'PatientName': 5,
|
|
43
|
-
'StudyDescription': 6,
|
|
44
|
-
'StudyDate': 7,
|
|
45
|
-
'SeriesDescription': 8,
|
|
46
|
-
'ImageOrientationPatient':11,
|
|
47
|
-
'ImagePositionPatient':12,
|
|
48
|
-
'PixelSpacing':13,
|
|
49
|
-
'SliceThickness':14,
|
|
50
|
-
'SliceLocation':15,
|
|
51
|
-
'AcquisitionTime':16,
|
|
52
|
-
}
|
|
53
|
-
|
|
54
|
-
def default(self):
|
|
55
|
-
return [None, None, None, None, None,
|
|
56
|
-
None, None, None, None, int(-1), int(-1),
|
|
57
|
-
None, None, None, float(-1.0), float(-1.0), None,
|
|
58
|
-
]
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
def __init__(self, path=None, dataframe=None, status=StatusBar(), dialog=Dialog()):
|
|
62
|
-
"""Initialise the folder with a path and objects to message to the user.
|
|
63
|
-
|
|
64
|
-
When used inside a GUI, status and dialog should be instances of the status bar and
|
|
65
|
-
dialog class defined in `wezel`.
|
|
66
|
-
|
|
67
|
-
path = None: The index manages data in memory
|
|
68
|
-
dataframe = None: no database open
|
|
69
|
-
"""
|
|
70
|
-
if dataframe is None:
|
|
71
|
-
#dataframe = pd.DataFrame(index=[], columns=self.columns)
|
|
72
|
-
dataframe = pd.DataFrame(index=[], columns=self.columns+['removed','created']) # Added 28/05/2023
|
|
73
|
-
# THIS NEEDS A MECHANISM TO PREVENT ANOTHER Manager to open the same database.
|
|
74
|
-
self.status = status
|
|
75
|
-
self.dialog = dialog
|
|
76
|
-
self.path = path
|
|
77
|
-
self.register = dataframe
|
|
78
|
-
self.dataset = {}
|
|
79
|
-
|
|
80
|
-
def scan(self, unzip=False):
|
|
81
|
-
"""
|
|
82
|
-
Reads all files in the folder and summarises key attributes in a table for faster access.
|
|
83
|
-
"""
|
|
84
|
-
# Take unzip out until test is developed - less essential feature
|
|
85
|
-
# if unzip:
|
|
86
|
-
# filetools._unzip_files(self.path, self.status)
|
|
87
|
-
|
|
88
|
-
#self.read_dataframe()
|
|
89
|
-
|
|
90
|
-
if self.path is None:
|
|
91
|
-
self.register = pd.DataFrame(index=[], columns=self.columns)
|
|
92
|
-
self.dataset = {}
|
|
93
|
-
return
|
|
94
|
-
files = filetools.all_files(self.path)
|
|
95
|
-
self.register = dbdataset.read_dataframe(
|
|
96
|
-
files,
|
|
97
|
-
self.columns+['NumberOfFrames'],
|
|
98
|
-
self.status,
|
|
99
|
-
path=self.path,
|
|
100
|
-
message='Reading database..',
|
|
101
|
-
images_only = True)
|
|
102
|
-
self.register['removed'] = False
|
|
103
|
-
self.register['created'] = False
|
|
104
|
-
# No support for multiframe data at the moment
|
|
105
|
-
self._multiframe_to_singleframe()
|
|
106
|
-
self.register.drop('NumberOfFrames', axis=1, inplace=True)
|
|
107
|
-
# For now ensure all series have just a single CIOD
|
|
108
|
-
self._split_series()
|
|
109
|
-
#self.save()
|
|
110
|
-
return self
|
|
111
|
-
|
|
112
|
-
|
|
113
|
-
def _split_series(self):
|
|
114
|
-
"""
|
|
115
|
-
Split series with multiple SOP Classes.
|
|
116
|
-
|
|
117
|
-
If a series contain instances from different SOP Classes,
|
|
118
|
-
these are separated out into multiple series with identical SOP Classes.
|
|
119
|
-
"""
|
|
120
|
-
df = self.register
|
|
121
|
-
df = df[df.removed == False]
|
|
122
|
-
|
|
123
|
-
# For each series, check if there are multiple
|
|
124
|
-
# SOP Classes in the series and split them if yes.
|
|
125
|
-
all_series = df.SeriesInstanceUID.unique()
|
|
126
|
-
for s, series in enumerate(all_series):
|
|
127
|
-
msg = 'Splitting series with multiple data types'
|
|
128
|
-
self.status.progress(s+1, len(all_series), message=msg)
|
|
129
|
-
df_series = df[df.SeriesInstanceUID == series]
|
|
130
|
-
sop_classes = df_series.SOPClassUID.unique()
|
|
131
|
-
if len(sop_classes) > 1:
|
|
132
|
-
# For each sop_class, create a new series and move all
|
|
133
|
-
# instances of that sop_class to the new series
|
|
134
|
-
study = self.parent(series)
|
|
135
|
-
series_desc = df_series.SeriesDescription.values[0]
|
|
136
|
-
for i, sop_class in enumerate(sop_classes[1:]):
|
|
137
|
-
desc = series_desc + ' [' + str(i+1) + ']'
|
|
138
|
-
new_series, _ = self.new_series(parent=study, SeriesDescription=desc)
|
|
139
|
-
df_sop_class = df_series[df_series.SOPClassUID == sop_class]
|
|
140
|
-
instances = df_sop_class.SOPInstanceUID.values.tolist()
|
|
141
|
-
moved = self.move_to_series(instances, new_series)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
def _multiframe_to_singleframe(self):
|
|
145
|
-
"""Converts all multiframe files in the folder into single-frame files.
|
|
146
|
-
|
|
147
|
-
Reads all the multi-frame files in the folder,
|
|
148
|
-
converts them to singleframe files, and delete the original multiframe file.
|
|
149
|
-
"""
|
|
150
|
-
if self.path is None:
|
|
151
|
-
# Low priority - we are not creating multiframe data from scratch yet
|
|
152
|
-
# So will always be loaded from disk initially where the solution exists.
|
|
153
|
-
# Solution: save data in a temporary file, use the filebased conversion,
|
|
154
|
-
# the upload the solution and delete the temporary file.
|
|
155
|
-
raise ValueError('Multi-frame to single-frame conversion does not yet exist from data in memory')
|
|
156
|
-
singleframe = self.register.NumberOfFrames.isnull()
|
|
157
|
-
multiframe = singleframe == False
|
|
158
|
-
nr_multiframe = multiframe.sum()
|
|
159
|
-
if nr_multiframe != 0:
|
|
160
|
-
cnt=0
|
|
161
|
-
for relpath in self.register[multiframe].index.values:
|
|
162
|
-
cnt+=1
|
|
163
|
-
msg = "Converting multiframe file " + relpath
|
|
164
|
-
self.status.progress(cnt, nr_multiframe, message=msg)
|
|
165
|
-
#
|
|
166
|
-
# Create these in the dbdicom folder, not in the original folder.
|
|
167
|
-
#
|
|
168
|
-
filepath = os.path.join(self.path, relpath)
|
|
169
|
-
singleframe_files = dcm4che.split_multiframe(filepath)
|
|
170
|
-
if singleframe_files != []:
|
|
171
|
-
# add the single frame files to the dataframe
|
|
172
|
-
df = dbdataset.read_dataframe(singleframe_files, self.columns, path=self.path)
|
|
173
|
-
df['removed'] = False
|
|
174
|
-
df['created'] = False
|
|
175
|
-
self.register = pd.concat([self.register, df])
|
|
176
|
-
# delete the original multiframe
|
|
177
|
-
os.remove(filepath)
|
|
178
|
-
# drop the file also if the conversion has failed
|
|
179
|
-
self.register.drop(index=relpath, inplace=True)
|
|
180
|
-
|
|
181
|
-
def _pkl(self):
|
|
182
|
-
""" Returns the file path of the .pkl file"""
|
|
183
|
-
if self.path is None:
|
|
184
|
-
return None
|
|
185
|
-
filename = os.path.basename(os.path.normpath(self.path)) + ".pkl"
|
|
186
|
-
return os.path.join(self.path, filename)
|
|
187
|
-
|
|
188
|
-
def npy(self, uid):
|
|
189
|
-
# Not in use - default path for temporary storage in numoy format
|
|
190
|
-
path = os.path.join(self.path, "dbdicom_npy")
|
|
191
|
-
if not os.path.isdir(path):
|
|
192
|
-
os.mkdir(path)
|
|
193
|
-
file = os.path.join(path, uid + '.npy')
|
|
194
|
-
return file
|
|
195
|
-
|
|
196
|
-
def _write_df(self):
|
|
197
|
-
""" Writes the dataFrame as a .pkl file"""
|
|
198
|
-
if self.path is None:
|
|
199
|
-
return
|
|
200
|
-
file = self._pkl()
|
|
201
|
-
self.register.to_pickle(file)
|
|
202
|
-
|
|
203
|
-
def _read_df(self):
|
|
204
|
-
"""Reads the dataFrame from a .pkl file """
|
|
205
|
-
if self.path is None:
|
|
206
|
-
return
|
|
207
|
-
file = self._pkl()
|
|
208
|
-
self.register = pd.read_pickle(file)
|
|
209
|
-
|
|
210
|
-
def write_csv(self, file):
|
|
211
|
-
""" Writes the dataFrame as a .csv file for visual inspection"""
|
|
212
|
-
self.register.to_csv(file)
|
|
213
|
-
|
|
214
|
-
def filepath(self, key):
|
|
215
|
-
"""Return the full filepath for a given relative path.
|
|
216
|
-
|
|
217
|
-
Returns None for data that live in memory only."""
|
|
218
|
-
# Needs a formal test for completeness
|
|
219
|
-
if self.path is None:
|
|
220
|
-
return None
|
|
221
|
-
return os.path.join(self.path, key)
|
|
222
|
-
|
|
223
|
-
def filepaths(self, *args, **kwargs):
|
|
224
|
-
"""Return a list of full filepaths for all dicom files in the folder"""
|
|
225
|
-
# Needs a formal test for completeness
|
|
226
|
-
return [self.filepath(key) for key in self.keys(*args, **kwargs)]
|
|
227
|
-
|
|
228
|
-
def open(self, path=None, unzip=False):
|
|
229
|
-
"""Opens a DICOM folder for read and write.
|
|
230
|
-
|
|
231
|
-
Reads the contents of the folder and summarises all DICOM files
|
|
232
|
-
in a dataframe for faster access next time. The dataframe is saved
|
|
233
|
-
as a pkl file when the folder is closed with `.close()`.
|
|
234
|
-
All non-DICOM files in the folder are ignored.
|
|
235
|
-
|
|
236
|
-
Args:
|
|
237
|
-
path: The full path to the directory that is to be opened.
|
|
238
|
-
|
|
239
|
-
"""
|
|
240
|
-
if path is not None:
|
|
241
|
-
self.path = path
|
|
242
|
-
if self.path is None:
|
|
243
|
-
raise ValueError('Cannot open database - no path is specified')
|
|
244
|
-
if os.path.exists(self._pkl()):
|
|
245
|
-
try:
|
|
246
|
-
self._read_df()
|
|
247
|
-
except:
|
|
248
|
-
# If the file is corrupted, delete it and load again
|
|
249
|
-
os.remove(self._pkl())
|
|
250
|
-
self.scan(unzip=unzip)
|
|
251
|
-
else:
|
|
252
|
-
self.scan(unzip=unzip)
|
|
253
|
-
return self
|
|
254
|
-
|
|
255
|
-
|
|
256
|
-
def type(self, uid=None, key=None):
|
|
257
|
-
"""Is the UID a patient, study, series or dataset"""
|
|
258
|
-
|
|
259
|
-
if uid is None:
|
|
260
|
-
return None
|
|
261
|
-
if uid == 'Database':
|
|
262
|
-
return uid
|
|
263
|
-
|
|
264
|
-
if key is None:
|
|
265
|
-
df = self.register
|
|
266
|
-
type = df.columns[df.isin([uid]).any()].values
|
|
267
|
-
if type.size == 0: # uid does not exists in the database
|
|
268
|
-
return None
|
|
269
|
-
else:
|
|
270
|
-
type = type[0]
|
|
271
|
-
else:
|
|
272
|
-
df = self.register.loc[key,:]
|
|
273
|
-
type = df[df.isin([uid])].index[0]
|
|
274
|
-
|
|
275
|
-
if type == 'PatientID':
|
|
276
|
-
return 'Patient'
|
|
277
|
-
if type == 'StudyInstanceUID':
|
|
278
|
-
return 'Study'
|
|
279
|
-
if type == 'SeriesInstanceUID':
|
|
280
|
-
return 'Series'
|
|
281
|
-
if type == 'SOPInstanceUID':
|
|
282
|
-
return 'Instance'
|
|
283
|
-
|
|
284
|
-
|
|
285
|
-
def tree(self, depth=3):
|
|
286
|
-
|
|
287
|
-
df = self.register
|
|
288
|
-
if df is None:
|
|
289
|
-
raise ValueError('Cannot build tree - no database open')
|
|
290
|
-
df = df[df.removed == False]
|
|
291
|
-
df.sort_values(['PatientName','StudyDate','SeriesNumber','InstanceNumber'], inplace=True)
|
|
292
|
-
|
|
293
|
-
database = {'uid': self.path}
|
|
294
|
-
database['patients'] = []
|
|
295
|
-
for uid_patient in df.PatientID.dropna().unique():
|
|
296
|
-
patient = {'uid': uid_patient}
|
|
297
|
-
database['patients'].append(patient)
|
|
298
|
-
if depth >= 1:
|
|
299
|
-
df_patient = df[df.PatientID == uid_patient]
|
|
300
|
-
patient['key'] = df_patient.index[0]
|
|
301
|
-
patient['studies'] = []
|
|
302
|
-
for uid_study in df_patient.StudyInstanceUID.dropna().unique():
|
|
303
|
-
study = {'uid': uid_study}
|
|
304
|
-
patient['studies'].append(study)
|
|
305
|
-
if depth >= 2:
|
|
306
|
-
df_study = df_patient[df_patient.StudyInstanceUID == uid_study]
|
|
307
|
-
study['key'] = df_study.index[0]
|
|
308
|
-
study['series'] = []
|
|
309
|
-
for uid_sery in df_study.SeriesInstanceUID.dropna().unique():
|
|
310
|
-
series = {'uid': uid_sery}
|
|
311
|
-
study['series'].append(series)
|
|
312
|
-
if depth == 3:
|
|
313
|
-
df_series = df_study[df_study.SeriesInstanceUID == uid_sery]
|
|
314
|
-
series['key'] = df_series.index[0]
|
|
315
|
-
return database
|
|
316
|
-
|
|
317
|
-
|
|
318
|
-
def keys(self,
|
|
319
|
-
uid = None,
|
|
320
|
-
patient = None,
|
|
321
|
-
study = None,
|
|
322
|
-
series = None,
|
|
323
|
-
instance = None,
|
|
324
|
-
dropna = False):
|
|
325
|
-
"""Return a list of indices for all dicom datasets managed by the index.
|
|
326
|
-
|
|
327
|
-
These indices are strings with unique relative paths
|
|
328
|
-
that either link to an existing file in the database or can be used for
|
|
329
|
-
writing a database that is in memory.
|
|
330
|
-
"""
|
|
331
|
-
|
|
332
|
-
df = self.register
|
|
333
|
-
if df is None:
|
|
334
|
-
raise ValueError('Cant return dicom files - no database open')
|
|
335
|
-
|
|
336
|
-
# If no arguments are provided
|
|
337
|
-
if (uid is None) & (patient is None) & (study is None) & (series is None) & (instance is None):
|
|
338
|
-
return []
|
|
339
|
-
|
|
340
|
-
if isinstance(uid, list):
|
|
341
|
-
if 'Database' in uid:
|
|
342
|
-
return self.keys('Database', dropna=dropna)
|
|
343
|
-
|
|
344
|
-
not_deleted = df.removed == False
|
|
345
|
-
|
|
346
|
-
if uid == 'Database':
|
|
347
|
-
keys = not_deleted[not_deleted].index.tolist()
|
|
348
|
-
if dropna:
|
|
349
|
-
keys = [key for key in keys if self.register.at[key,'SOPInstanceUID'] is not None]
|
|
350
|
-
return keys
|
|
351
|
-
|
|
352
|
-
# If arguments are provided, create a list of unique datasets
|
|
353
|
-
# keys = []
|
|
354
|
-
if uid is not None:
|
|
355
|
-
if not isinstance(uid, list):
|
|
356
|
-
uid = [uid]
|
|
357
|
-
uid = [i for i in uid if i is not None]
|
|
358
|
-
rows = np.isin(df, uid).any(axis=1) & not_deleted
|
|
359
|
-
if patient is not None:
|
|
360
|
-
if not isinstance(patient, list):
|
|
361
|
-
rows = (df.PatientID==patient) & not_deleted
|
|
362
|
-
else:
|
|
363
|
-
patient = [i for i in patient if i is not None]
|
|
364
|
-
rows = df.PatientID.isin(patient) & not_deleted
|
|
365
|
-
if study is not None:
|
|
366
|
-
if not isinstance(study, list):
|
|
367
|
-
rows = (df.StudyInstanceUID==study) & not_deleted
|
|
368
|
-
else:
|
|
369
|
-
study = [i for i in study if i is not None]
|
|
370
|
-
rows = df.StudyInstanceUID.isin(study) & not_deleted
|
|
371
|
-
if series is not None:
|
|
372
|
-
if not isinstance(series, list):
|
|
373
|
-
rows = (df.SeriesInstanceUID==series) & not_deleted
|
|
374
|
-
else:
|
|
375
|
-
series = [i for i in series if i is not None]
|
|
376
|
-
rows = df.SeriesInstanceUID.isin(series) & not_deleted
|
|
377
|
-
if instance is not None:
|
|
378
|
-
if not isinstance(instance, list):
|
|
379
|
-
rows = (df.SOPInstanceUID==instance) & not_deleted
|
|
380
|
-
else:
|
|
381
|
-
instance = [i for i in instance if i is not None]
|
|
382
|
-
rows = df.SOPInstanceUID.isin(instance) & not_deleted
|
|
383
|
-
|
|
384
|
-
keys = df.index[rows].tolist()
|
|
385
|
-
if dropna:
|
|
386
|
-
keys = [key for key in keys if self.register.at[key,'SOPInstanceUID'] is not None]
|
|
387
|
-
return keys
|
|
388
|
-
|
|
389
|
-
def value(self, key, column):
|
|
390
|
-
try:
|
|
391
|
-
if isinstance(key, pd.Index):
|
|
392
|
-
return self.register.loc[key, column].values
|
|
393
|
-
if not isinstance(key, list) and not isinstance(column, list):
|
|
394
|
-
return self.register.at[key, column]
|
|
395
|
-
else:
|
|
396
|
-
return self.register.loc[key, column].values
|
|
397
|
-
except:
|
|
398
|
-
return None
|
|
399
|
-
|
|
400
|
-
def parent(self, uid=None):
|
|
401
|
-
# For consistency with other definitions
|
|
402
|
-
# Allow uid to be list and return list if multiple parents are found
|
|
403
|
-
"""Returns the UID of the parent object"""
|
|
404
|
-
|
|
405
|
-
keys = self.keys(uid)
|
|
406
|
-
if keys == []:
|
|
407
|
-
return None
|
|
408
|
-
row = self.register.loc[keys[0]].values.tolist()
|
|
409
|
-
i = row.index(uid)
|
|
410
|
-
if self.columns[i] == 'PatientID':
|
|
411
|
-
return 'Database'
|
|
412
|
-
else:
|
|
413
|
-
return row[i-1]
|
|
414
|
-
|
|
415
|
-
|
|
416
|
-
def filter(self, uids=None, **kwargs):
|
|
417
|
-
uids = [id for id in uids if id is not None]
|
|
418
|
-
if not kwargs:
|
|
419
|
-
return uids
|
|
420
|
-
vals = list(kwargs.values())
|
|
421
|
-
attr = list(kwargs.keys())
|
|
422
|
-
return [id for id in uids if self.get_values(attr, uid=id) == vals]
|
|
423
|
-
#return [id for id in uids if function(self.get_values(attr, uid=id), vals)]
|
|
424
|
-
|
|
425
|
-
|
|
426
|
-
def filter_instances(self, df, select={}, **filters):
|
|
427
|
-
df.dropna(inplace=True)
|
|
428
|
-
filters = {**select, **filters}
|
|
429
|
-
if filters == {}:
|
|
430
|
-
return df
|
|
431
|
-
vals = list(filters.values())
|
|
432
|
-
attr = list(filters.keys())
|
|
433
|
-
# keys = [key for key in df.index if self.get_values(attr, [key]) == vals]
|
|
434
|
-
keys = []
|
|
435
|
-
for key in df.index:
|
|
436
|
-
v = self.get_values(attr, [key])
|
|
437
|
-
append = True
|
|
438
|
-
for i, vi in enumerate(v):
|
|
439
|
-
if isinstance(vals[i], np.ndarray):
|
|
440
|
-
if vi not in vals[i]:
|
|
441
|
-
append = False
|
|
442
|
-
break
|
|
443
|
-
else:
|
|
444
|
-
if vi != vals[i]:
|
|
445
|
-
append = False
|
|
446
|
-
break
|
|
447
|
-
if append:
|
|
448
|
-
keys.append(key)
|
|
449
|
-
return df[keys]
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
def instances(self, uid=None, keys=None, sort=True, sortby=None, images=False, select={}, **filters):
|
|
453
|
-
if keys is None:
|
|
454
|
-
keys = self.keys(uid)
|
|
455
|
-
if sort:
|
|
456
|
-
if sortby is None:
|
|
457
|
-
sortby = ['PatientName', 'StudyDescription', 'SeriesNumber', 'InstanceNumber']
|
|
458
|
-
df = self.register.loc[keys, sortby + ['SOPInstanceUID']]
|
|
459
|
-
df.sort_values(sortby, inplace=True)
|
|
460
|
-
df = df.SOPInstanceUID
|
|
461
|
-
else:
|
|
462
|
-
df = self.register.loc[keys,'SOPInstanceUID']
|
|
463
|
-
df = self.filter_instances(df, select=select, **filters)
|
|
464
|
-
if images == True:
|
|
465
|
-
keys = [key for key in df.index if self.get_values('Rows', [key]) is not None]
|
|
466
|
-
df = df[keys]
|
|
467
|
-
return df
|
|
468
|
-
|
|
469
|
-
|
|
470
|
-
def series(self, uid=None, keys=None, sort=True, sortby=['PatientName', 'StudyDescription', 'SeriesNumber'], **kwargs):
|
|
471
|
-
if keys is None:
|
|
472
|
-
keys = self.keys(uid)
|
|
473
|
-
if sort:
|
|
474
|
-
if not isinstance(sortby, list):
|
|
475
|
-
sortby = [sortby]
|
|
476
|
-
df = self.register.loc[keys, sortby + ['SeriesInstanceUID']]
|
|
477
|
-
df.sort_values(sortby, inplace=True)
|
|
478
|
-
df = df.SeriesInstanceUID
|
|
479
|
-
else:
|
|
480
|
-
df = self.register.loc[keys,'SeriesInstanceUID']
|
|
481
|
-
uids = df.unique().tolist()
|
|
482
|
-
return self.filter(uids, **kwargs)
|
|
483
|
-
|
|
484
|
-
|
|
485
|
-
def studies(self, uid=None, keys=None, sort=True, sortby=['PatientName', 'StudyDescription'], **kwargs):
|
|
486
|
-
if keys is None:
|
|
487
|
-
keys = self.keys(uid)
|
|
488
|
-
if sort:
|
|
489
|
-
df = self.register.loc[keys, sortby + ['StudyInstanceUID']]
|
|
490
|
-
df.sort_values(sortby, inplace=True)
|
|
491
|
-
df = df.StudyInstanceUID
|
|
492
|
-
else:
|
|
493
|
-
df = self.register.loc[keys,'StudyInstanceUID']
|
|
494
|
-
uids = df.unique().tolist()
|
|
495
|
-
return self.filter(uids, **kwargs)
|
|
496
|
-
|
|
497
|
-
|
|
498
|
-
def patients(self, uid=None, keys=None, sort=True, sortby=['PatientName'], **kwargs):
|
|
499
|
-
if keys is None:
|
|
500
|
-
keys = self.keys(uid)
|
|
501
|
-
if sort:
|
|
502
|
-
df = self.register.loc[keys, sortby + ['PatientID']]
|
|
503
|
-
df.sort_values(sortby, inplace=True)
|
|
504
|
-
df = df.PatientID
|
|
505
|
-
else:
|
|
506
|
-
df = self.register.loc[keys,'PatientID']
|
|
507
|
-
uids = df.unique().tolist()
|
|
508
|
-
return self.filter(uids, **kwargs)
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
def get_instance_dataset(self, key):
|
|
512
|
-
|
|
513
|
-
"""Gets a datasets for a single instance
|
|
514
|
-
|
|
515
|
-
Datasets in memory will be returned.
|
|
516
|
-
If they are not in memory, and the database exists on disk, they will be read from disk.
|
|
517
|
-
If they are not in memory, and the database does not exist on disk, an exception is raised.
|
|
518
|
-
"""
|
|
519
|
-
if key in self.dataset:
|
|
520
|
-
# If in memory, get from memory
|
|
521
|
-
return self.dataset[key]
|
|
522
|
-
# If not in memory, read from disk
|
|
523
|
-
file = self.filepath(key)
|
|
524
|
-
if file is None: # No dataset assigned yet
|
|
525
|
-
return
|
|
526
|
-
if not os.path.exists(file): # New instance, series, study or patient
|
|
527
|
-
return
|
|
528
|
-
return read_dataset(file, self.dialog)
|
|
529
|
-
|
|
530
|
-
|
|
531
|
-
def get_dataset(self, uid, keys=None, message=None):
|
|
532
|
-
"""Gets a list of datasets for a single record
|
|
533
|
-
|
|
534
|
-
Datasets in memory will be returned.
|
|
535
|
-
If they are not in memory, and the database exists on disk, they will be read from disk.
|
|
536
|
-
If they are not in memory, and the database does not exist on disk, an exception is raised.
|
|
537
|
-
"""
|
|
538
|
-
if uid is None: # empty record
|
|
539
|
-
return
|
|
540
|
-
if keys is None:
|
|
541
|
-
keys = self.keys(uid)
|
|
542
|
-
dataset = []
|
|
543
|
-
for key in keys:
|
|
544
|
-
ds = self.get_instance_dataset(key)
|
|
545
|
-
dataset.append(ds)
|
|
546
|
-
if self.type(uid, keys[0]) == 'Instance':
|
|
547
|
-
if dataset == []:
|
|
548
|
-
return
|
|
549
|
-
else:
|
|
550
|
-
return dataset[0]
|
|
551
|
-
else:
|
|
552
|
-
return dataset
|
|
553
|
-
|
|
554
|
-
|
|
555
|
-
def _get_values(self, keys, attr):
|
|
556
|
-
"""Helper function"""
|
|
557
|
-
|
|
558
|
-
#ds = self._get_dataset(instances)
|
|
559
|
-
ds = None
|
|
560
|
-
for key in keys:
|
|
561
|
-
ds = self.get_instance_dataset(key)
|
|
562
|
-
if ds is not None:
|
|
563
|
-
break
|
|
564
|
-
if ds is None:
|
|
565
|
-
return [None] * len(attr)
|
|
566
|
-
else:
|
|
567
|
-
return ds.get_values(attr)
|
|
568
|
-
|
|
569
|
-
|
|
570
|
-
def series_header(self, key):
|
|
571
|
-
"""Attributes and values inherited from series, study and patient"""
|
|
572
|
-
|
|
573
|
-
attr_patient = ['PatientID', 'PatientName']
|
|
574
|
-
attr_study = ['StudyInstanceUID', 'StudyDescription', 'StudyDate']
|
|
575
|
-
attr_series = ['SeriesInstanceUID', 'SeriesDescription', 'SeriesNumber']
|
|
576
|
-
|
|
577
|
-
parent = self.register.at[key, 'SeriesInstanceUID']
|
|
578
|
-
keys = self.keys(series=parent, dropna=True)
|
|
579
|
-
if keys != []:
|
|
580
|
-
attr = list(set(dbdataset.module_patient() + dbdataset.module_study() + dbdataset.module_series()))
|
|
581
|
-
vals = self._get_values(keys, attr)
|
|
582
|
-
else:
|
|
583
|
-
parent = self.register.at[key, 'StudyInstanceUID']
|
|
584
|
-
keys = self.keys(study=parent, dropna=True)
|
|
585
|
-
if keys != []:
|
|
586
|
-
attr = list(set(dbdataset.module_patient() + dbdataset.module_study()))
|
|
587
|
-
vals = self._get_values(keys, attr)
|
|
588
|
-
attr += attr_series
|
|
589
|
-
vals += self.value(key, attr_series).tolist()
|
|
590
|
-
else:
|
|
591
|
-
parent = self.register.at[key, 'PatientID']
|
|
592
|
-
keys = self.keys(patient=parent, dropna=True)
|
|
593
|
-
if keys != []:
|
|
594
|
-
attr = dbdataset.module_patient()
|
|
595
|
-
vals = self._get_values(keys, attr)
|
|
596
|
-
attr += attr_study + attr_series
|
|
597
|
-
vals += self.value(key, attr_study + attr_series).tolist()
|
|
598
|
-
else:
|
|
599
|
-
attr = attr_patient + attr_study + attr_series
|
|
600
|
-
vals = self.value(key, attr).tolist()
|
|
601
|
-
return attr, vals
|
|
602
|
-
|
|
603
|
-
|
|
604
|
-
def study_header(self, key):
|
|
605
|
-
"""Attributes and values inherited from series, study and patient"""
|
|
606
|
-
|
|
607
|
-
attr_patient = ['PatientID', 'PatientName']
|
|
608
|
-
attr_study = ['StudyInstanceUID', 'StudyDescription', 'StudyDate']
|
|
609
|
-
|
|
610
|
-
parent = self.register.at[key, 'StudyInstanceUID']
|
|
611
|
-
keys = self.keys(study=parent, dropna=True)
|
|
612
|
-
if keys != []:
|
|
613
|
-
attr = list(set(dbdataset.module_patient() + dbdataset.module_study()))
|
|
614
|
-
vals = self._get_values(keys, attr)
|
|
615
|
-
else:
|
|
616
|
-
parent = self.register.at[key, 'PatientID']
|
|
617
|
-
keys = self.keys(patient=parent, dropna=True)
|
|
618
|
-
if keys != []:
|
|
619
|
-
attr = dbdataset.module_patient()
|
|
620
|
-
vals = self._get_values(keys, attr)
|
|
621
|
-
attr += attr_study
|
|
622
|
-
vals += self.value(key, attr_study).tolist()
|
|
623
|
-
else:
|
|
624
|
-
attr = attr_patient + attr_study
|
|
625
|
-
vals = self.value(key, attr).tolist()
|
|
626
|
-
return attr, vals
|
|
627
|
-
|
|
628
|
-
|
|
629
|
-
def patient_header(self, key):
|
|
630
|
-
"""Attributes and values inherited from series, study and patient"""
|
|
631
|
-
|
|
632
|
-
attr_patient = ['PatientID', 'PatientName']
|
|
633
|
-
|
|
634
|
-
parent = self.register.at[key, 'PatientID']
|
|
635
|
-
keys = self.keys(patient=parent, dropna=True)
|
|
636
|
-
if keys != []:
|
|
637
|
-
attr = dbdataset.module_patient()
|
|
638
|
-
vals = self._get_values(keys, attr)
|
|
639
|
-
else:
|
|
640
|
-
attr = attr_patient
|
|
641
|
-
vals = self.value(key, attr).tolist()
|
|
642
|
-
return attr, vals
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
def label(self, uid=None, key=None, type=None):
|
|
646
|
-
"""Return a label to describe a row as Patient, Study, Series or Instance"""
|
|
647
|
-
|
|
648
|
-
if self.register is None:
|
|
649
|
-
raise ValueError('Cant provide labels - no database open')
|
|
650
|
-
|
|
651
|
-
if uid is None:
|
|
652
|
-
if key is None:
|
|
653
|
-
return ''
|
|
654
|
-
|
|
655
|
-
if uid == 'Database':
|
|
656
|
-
if self.path is None:
|
|
657
|
-
return 'Database [in memory]'
|
|
658
|
-
else:
|
|
659
|
-
return 'Database [' + self.path + ']'
|
|
660
|
-
|
|
661
|
-
if type is None:
|
|
662
|
-
type = self.type(uid)
|
|
663
|
-
|
|
664
|
-
if type == 'Patient':
|
|
665
|
-
if key is None:
|
|
666
|
-
key = self.keys(patient=uid)[0]
|
|
667
|
-
row = self.register.loc[key]
|
|
668
|
-
name = row.PatientName
|
|
669
|
-
#id = row.PatientID
|
|
670
|
-
label = str(name)
|
|
671
|
-
#label += ' [' + str(id) + ']'
|
|
672
|
-
return type + " {}".format(label)
|
|
673
|
-
if type == 'Study':
|
|
674
|
-
if key is None:
|
|
675
|
-
key = self.keys(study=uid)[0]
|
|
676
|
-
row = self.register.loc[key]
|
|
677
|
-
descr = row.StudyDescription
|
|
678
|
-
date = row.StudyDate
|
|
679
|
-
label = str(descr)
|
|
680
|
-
label += ' [' + str(date) + ']'
|
|
681
|
-
return type + " {}".format(label)
|
|
682
|
-
if type == 'Series':
|
|
683
|
-
if key is None:
|
|
684
|
-
key = self.keys(series=uid)[0]
|
|
685
|
-
row = self.register.loc[key]
|
|
686
|
-
descr = row.SeriesDescription
|
|
687
|
-
nr = row.SeriesNumber
|
|
688
|
-
label = str(nr).zfill(3)
|
|
689
|
-
label += ' [' + str(descr) + ']'
|
|
690
|
-
return type + " {}".format(label)
|
|
691
|
-
if type == 'Instance':
|
|
692
|
-
if key is None:
|
|
693
|
-
key = self.keys(instance=uid)[0]
|
|
694
|
-
row = self.register.loc[key]
|
|
695
|
-
nr = row.InstanceNumber
|
|
696
|
-
label = str(nr).zfill(6)
|
|
697
|
-
return SOPClass(row.SOPClassUID) + " {}".format(label)
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
def print_database(self):
|
|
701
|
-
print('---------- DATABASE --------------')
|
|
702
|
-
if self.path is None:
|
|
703
|
-
print('Location: ', 'In memory')
|
|
704
|
-
else:
|
|
705
|
-
print('Location: ', self.path)
|
|
706
|
-
for patient in self.patients('Database'):
|
|
707
|
-
print(' ' + self.label(patient, type='Patient'))
|
|
708
|
-
for study in self.studies(patient):
|
|
709
|
-
print(' ' + self.label(study, type='Study'))
|
|
710
|
-
for series in self.series(study):
|
|
711
|
-
print(' ' + self.label(series, type='Series'))
|
|
712
|
-
print(' Nr of instances: ' + str(len(self.instances(series))))
|
|
713
|
-
print('----------------------------------')
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
def print_patient(self, patient):
|
|
717
|
-
print('---------- PATIENT -------------')
|
|
718
|
-
print('' + self.label(patient, type='Patient'))
|
|
719
|
-
for study in self.studies(patient):
|
|
720
|
-
print(' ' + self.label(study, type='Study'))
|
|
721
|
-
for series in self.series(study):
|
|
722
|
-
print(' ' + self.label(series, type='Series'))
|
|
723
|
-
print(' Nr of instances: ' + str(len(self.instances(series))))
|
|
724
|
-
print('--------------------------------')
|
|
725
|
-
|
|
726
|
-
|
|
727
|
-
def print_study(self, study):
|
|
728
|
-
print('---------- STUDY ---------------')
|
|
729
|
-
print('' + self.label(study, type='Study'))
|
|
730
|
-
for series in self.series(study):
|
|
731
|
-
print(' ' + self.label(series, type='Series'))
|
|
732
|
-
print(' Nr of instances: ' + str(len(self.instances(series))))
|
|
733
|
-
print('--------------------------------')
|
|
734
|
-
|
|
735
|
-
|
|
736
|
-
def print_series(self, series):
|
|
737
|
-
print('---------- SERIES --------------')
|
|
738
|
-
instances = self.instances(series)
|
|
739
|
-
print('' + self.label(series, type='Series'))
|
|
740
|
-
print(' Nr of instances: ' + str(len(instances)))
|
|
741
|
-
for instance in self.instances(series):
|
|
742
|
-
print(' ' + self.label(instance, type='Instance'))
|
|
743
|
-
print('--------------------------------')
|
|
744
|
-
|
|
745
|
-
|
|
746
|
-
def print_instance(self, instance):
|
|
747
|
-
print('---------- INSTANCE -------------')
|
|
748
|
-
print('' + self.label(instance, type='Instance'))
|
|
749
|
-
print('--------------------------------')
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
def print(self, uid='Database', name='Database'):
|
|
753
|
-
if name=='Database':
|
|
754
|
-
self.print_database()
|
|
755
|
-
elif name=='PatientID':
|
|
756
|
-
self.print_patient(uid)
|
|
757
|
-
elif name=='StudyInstanceUID':
|
|
758
|
-
self.print_study(uid)
|
|
759
|
-
elif name=='SeriesInstanceUID':
|
|
760
|
-
self.print_series(uid)
|
|
761
|
-
elif name=='SOPInstanceUID':
|
|
762
|
-
self.print_instance(uid)
|
|
763
|
-
|
|
764
|
-
|
|
765
|
-
def read(self, *args, keys=None, message=None, **kwargs):
|
|
766
|
-
"""Read the dataset from disk.
|
|
767
|
-
"""
|
|
768
|
-
if keys is None:
|
|
769
|
-
keys = self.keys(*args, **kwargs)
|
|
770
|
-
for i, key in enumerate(keys):
|
|
771
|
-
#if message is not None:
|
|
772
|
-
# self.status.progress(i, len(keys), message)
|
|
773
|
-
# do not read if they are already in memory
|
|
774
|
-
# this could overwrite changes made in memory only
|
|
775
|
-
if not key in self.dataset:
|
|
776
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
777
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
778
|
-
if ds is not None:
|
|
779
|
-
self.dataset[key] = ds
|
|
780
|
-
|
|
781
|
-
def write(self, *args, keys=None, message=None, **kwargs):
|
|
782
|
-
"""Writing data from memory to disk.
|
|
783
|
-
|
|
784
|
-
This does nothing if the data are not in memory, or if the database does not exist on disk.
|
|
785
|
-
"""
|
|
786
|
-
if keys is None:
|
|
787
|
-
keys = self.keys(*args, **kwargs)
|
|
788
|
-
for i, key in enumerate(keys):
|
|
789
|
-
if key in self.dataset:
|
|
790
|
-
file = self.filepath(key)
|
|
791
|
-
if file is not None:
|
|
792
|
-
self.dataset[key].write(file, self.status)
|
|
793
|
-
|
|
794
|
-
def clear(self, *args, keys=None, **kwargs):
|
|
795
|
-
"""Clear all data from memory"""
|
|
796
|
-
|
|
797
|
-
# Instances are only cleared from memory if the database exists on disk.
|
|
798
|
-
if self.path is None:
|
|
799
|
-
return
|
|
800
|
-
|
|
801
|
-
if keys is None:
|
|
802
|
-
keys = self.keys(*args, **kwargs)
|
|
803
|
-
# write to disk first so that any changes made in memory are not lost
|
|
804
|
-
self.write(*args, keys=keys, **kwargs)
|
|
805
|
-
# then delete the instances from memory
|
|
806
|
-
for key in keys:
|
|
807
|
-
self.dataset.pop(key, None)
|
|
808
|
-
|
|
809
|
-
def close(self):
|
|
810
|
-
"""Close an open database.
|
|
811
|
-
"""
|
|
812
|
-
|
|
813
|
-
#if not self.is_open():
|
|
814
|
-
if self.register is None:
|
|
815
|
-
return True
|
|
816
|
-
# This is the case where the database exists in memory only
|
|
817
|
-
# Needs testing..
|
|
818
|
-
if self.path is None:
|
|
819
|
-
reply = self.dialog.question(
|
|
820
|
-
title = 'Closing DICOM folder',
|
|
821
|
-
message = 'Save changes before closing?',
|
|
822
|
-
cancel = True,
|
|
823
|
-
)
|
|
824
|
-
if reply == "Cancel":
|
|
825
|
-
return False
|
|
826
|
-
elif reply == "Yes":
|
|
827
|
-
path = self.dialog.directory('Please enter the full path to an existing folder')
|
|
828
|
-
if path is None:
|
|
829
|
-
return False
|
|
830
|
-
self.path = path
|
|
831
|
-
self.save()
|
|
832
|
-
#self.save('Database')
|
|
833
|
-
return self.close()
|
|
834
|
-
elif reply == "No":
|
|
835
|
-
return True
|
|
836
|
-
|
|
837
|
-
if not self.is_saved():
|
|
838
|
-
reply = self.dialog.question(
|
|
839
|
-
title = 'Closing DICOM folder',
|
|
840
|
-
message = 'Save changes before closing?',
|
|
841
|
-
cancel = True,
|
|
842
|
-
)
|
|
843
|
-
if reply == "Cancel":
|
|
844
|
-
return False
|
|
845
|
-
if reply == "Yes":
|
|
846
|
-
self.save()
|
|
847
|
-
#self.save('Database')
|
|
848
|
-
elif reply == "No":
|
|
849
|
-
self.restore()
|
|
850
|
-
|
|
851
|
-
self._write_df()
|
|
852
|
-
self.write()
|
|
853
|
-
self.register = None
|
|
854
|
-
self.path = None
|
|
855
|
-
return True
|
|
856
|
-
|
|
857
|
-
|
|
858
|
-
def is_saved(self):
|
|
859
|
-
"""Check if the folder is saved.
|
|
860
|
-
|
|
861
|
-
Returns:
|
|
862
|
-
True if the folder is saved and False otherwise.
|
|
863
|
-
"""
|
|
864
|
-
# Needs a formal test for completeness
|
|
865
|
-
if (self.register.removed==True).any():
|
|
866
|
-
return False
|
|
867
|
-
if (self.register.created==True).any():
|
|
868
|
-
return False
|
|
869
|
-
return True
|
|
870
|
-
|
|
871
|
-
def is_open(self):
|
|
872
|
-
"""Check if a database is currently open, either in memory or on disk
|
|
873
|
-
|
|
874
|
-
Returns:
|
|
875
|
-
True if a database is open and False otherwise.
|
|
876
|
-
"""
|
|
877
|
-
# Needs a formal test for completeness
|
|
878
|
-
return self.register is not None
|
|
879
|
-
|
|
880
|
-
|
|
881
|
-
def delete(self, *args, keys=None, **kwargs):
|
|
882
|
-
"""Deletes some datasets
|
|
883
|
-
|
|
884
|
-
Deleted datasets are stashed and can be recovered with restore()
|
|
885
|
-
Using save() will delete them permanently
|
|
886
|
-
"""
|
|
887
|
-
if keys is None:
|
|
888
|
-
keys = self.keys(*args, **kwargs)
|
|
889
|
-
self.register.loc[keys,'removed'] = True
|
|
890
|
-
|
|
891
|
-
|
|
892
|
-
def save(self, rows=None):
|
|
893
|
-
|
|
894
|
-
self.status.message('Saving changes..')
|
|
895
|
-
|
|
896
|
-
created = self.register.created & (self.register.removed==False)
|
|
897
|
-
removed = self.register.removed
|
|
898
|
-
if rows is not None:
|
|
899
|
-
created = created & rows
|
|
900
|
-
removed = removed & rows
|
|
901
|
-
created = created[created].index
|
|
902
|
-
removed = removed[removed].index
|
|
903
|
-
|
|
904
|
-
# delete datasets marked for removal
|
|
905
|
-
for key in removed.tolist():
|
|
906
|
-
# delete in memory
|
|
907
|
-
if key in self.dataset:
|
|
908
|
-
del self.dataset[key]
|
|
909
|
-
# delete on disk
|
|
910
|
-
file = self.filepath(key)
|
|
911
|
-
if file is not None:
|
|
912
|
-
if os.path.exists(file):
|
|
913
|
-
os.remove(file)
|
|
914
|
-
# and drop then from the dataframe
|
|
915
|
-
self.register.drop(index=removed, inplace=True)
|
|
916
|
-
|
|
917
|
-
# for new or edited data, mark as saved.
|
|
918
|
-
self.register.loc[created, 'created'] = False
|
|
919
|
-
|
|
920
|
-
self._write_df()
|
|
921
|
-
self.write()
|
|
922
|
-
|
|
923
|
-
|
|
924
|
-
def restore(self, rows=None):
|
|
925
|
-
|
|
926
|
-
created = self.register.created
|
|
927
|
-
removed = self.register.removed & (self.register.created==False)
|
|
928
|
-
if rows is not None:
|
|
929
|
-
created = created & rows
|
|
930
|
-
removed = removed & rows
|
|
931
|
-
created = created[created].index
|
|
932
|
-
removed = removed[removed].index
|
|
933
|
-
|
|
934
|
-
# permanently delete newly created datasets
|
|
935
|
-
for key in created.tolist():
|
|
936
|
-
# delete in memory
|
|
937
|
-
if key in self.dataset:
|
|
938
|
-
del self.dataset[key]
|
|
939
|
-
# if on disk, delete files
|
|
940
|
-
file = self.filepath(key)
|
|
941
|
-
if file is not None:
|
|
942
|
-
if os.path.exists(file):
|
|
943
|
-
os.remove(file)
|
|
944
|
-
self.register.drop(index=created, inplace=True)
|
|
945
|
-
|
|
946
|
-
# Restore those that were marked for removal
|
|
947
|
-
self.register.loc[removed, 'removed'] = False
|
|
948
|
-
|
|
949
|
-
self._write_df()
|
|
950
|
-
# self.write()
|
|
951
|
-
|
|
952
|
-
|
|
953
|
-
def new_row(self, data, key=None):
|
|
954
|
-
if key is None:
|
|
955
|
-
key = self.new_key()
|
|
956
|
-
if key in self.register.index:
|
|
957
|
-
self.register.loc[key,self.columns] = data
|
|
958
|
-
else:
|
|
959
|
-
df = pd.DataFrame([data], [key], columns=self.columns)
|
|
960
|
-
df['removed'] = False
|
|
961
|
-
df['created'] = True
|
|
962
|
-
try:
|
|
963
|
-
self.register = pd.concat([self.register, df])
|
|
964
|
-
except:
|
|
965
|
-
msg = 'Cannot update the header \n'
|
|
966
|
-
msg += 'Some of the new values are of the incorrect type.\n'
|
|
967
|
-
raise TypeError(msg)
|
|
968
|
-
return key
|
|
969
|
-
|
|
970
|
-
def delete_row(self, key):
|
|
971
|
-
if self.register.at[key, 'created']:
|
|
972
|
-
# If the row was newly created, it can be dropped
|
|
973
|
-
self.register.drop(index=key, inplace=True)
|
|
974
|
-
else:
|
|
975
|
-
# If this is the first modification, mark for removal
|
|
976
|
-
self.register.at[key, 'removed'] == True
|
|
977
|
-
|
|
978
|
-
|
|
979
|
-
def drop_placeholder_row(self, parent_key, missing='SOPInstanceUID'):
|
|
980
|
-
# If a parent has more than one children, and one of them is None, then delete that row.
|
|
981
|
-
if missing == 'SOPInstanceUID':
|
|
982
|
-
parent_uid = self.value(parent_key, 'SeriesInstanceUID')
|
|
983
|
-
parent_keys = self.keys(series=parent_uid)
|
|
984
|
-
elif missing == 'SeriesInstanceUID':
|
|
985
|
-
parent_uid = self.value(parent_key, 'StudyInstanceUID')
|
|
986
|
-
parent_keys = self.keys(study=parent_uid)
|
|
987
|
-
elif missing == 'StudyInstanceUID':
|
|
988
|
-
parent_uid = self.value(parent_key, 'PatientID')
|
|
989
|
-
parent_keys = self.keys(patient=parent_uid)
|
|
990
|
-
elif missing == 'PatientID':
|
|
991
|
-
parent_keys = self.register.index
|
|
992
|
-
if len(parent_keys) > 1:
|
|
993
|
-
df = self.register.loc[parent_keys, missing]
|
|
994
|
-
empty = df[df.values == None].index
|
|
995
|
-
if len(empty) == 1:
|
|
996
|
-
self.delete_row(empty[0])
|
|
997
|
-
# Return new parent key
|
|
998
|
-
if missing == 'SOPInstanceUID':
|
|
999
|
-
return self.keys(series=parent_uid)[0]
|
|
1000
|
-
if missing == 'SeriesInstanceUID':
|
|
1001
|
-
return self.keys(study=parent_uid)[0]
|
|
1002
|
-
if missing == 'StudyInstanceUID':
|
|
1003
|
-
return self.keys(patient=parent_uid)[0]
|
|
1004
|
-
if missing == 'PatientID':
|
|
1005
|
-
return self.register.index[0]
|
|
1006
|
-
return parent_key
|
|
1007
|
-
|
|
1008
|
-
|
|
1009
|
-
def update_row_data(self, key, data):
|
|
1010
|
-
|
|
1011
|
-
# If the row has been created or modified, use existing row
|
|
1012
|
-
if self.register.at[key, 'created'] == True:
|
|
1013
|
-
for i, c in enumerate(self.columns): # Same as above but faster
|
|
1014
|
-
try:
|
|
1015
|
-
self.register.at[key, c] = data[i]
|
|
1016
|
-
except:
|
|
1017
|
-
msg = 'Cannot write header value in register. \n'
|
|
1018
|
-
msg += 'The value of ' + c +' is of incorrect type.\n'
|
|
1019
|
-
msg += 'Value: ' + str(data[i])
|
|
1020
|
-
raise TypeError(msg)
|
|
1021
|
-
|
|
1022
|
-
# If the row has never been modified, save in new row and remove current
|
|
1023
|
-
else:
|
|
1024
|
-
self.register.at[key, 'removed'] = True
|
|
1025
|
-
key = self.new_row(data)
|
|
1026
|
-
|
|
1027
|
-
return key
|
|
1028
|
-
|
|
1029
|
-
|
|
1030
|
-
def clone_study_data(self, key, **kwargs):
|
|
1031
|
-
data = self.default()
|
|
1032
|
-
data[0] = self.value(key, 'PatientID')
|
|
1033
|
-
data[1] = dbdataset.new_uid()
|
|
1034
|
-
data[5] = self.value(key, 'PatientName')
|
|
1035
|
-
data[6] = kwargs['StudyDescription'] if 'StudyDescription' in kwargs else 'New Study'
|
|
1036
|
-
for val in kwargs:
|
|
1037
|
-
if val in self._descriptives:
|
|
1038
|
-
data[self._descriptives[val]] = kwargs[val]
|
|
1039
|
-
return data
|
|
1040
|
-
|
|
1041
|
-
def clone_series_data(self, key, study, **kwargs):
|
|
1042
|
-
data = self.register.loc[key, self.columns].values.tolist()
|
|
1043
|
-
data[2] = dbdataset.new_uid()
|
|
1044
|
-
data[3] = self.default()[3]
|
|
1045
|
-
data[4] = self.default()[4]
|
|
1046
|
-
data[8] = kwargs['SeriesDescription'] if 'SeriesDescription' in kwargs else 'New Series'
|
|
1047
|
-
data[9] = self.new_series_number(study)
|
|
1048
|
-
data[10] = self.default()[10]
|
|
1049
|
-
for val in kwargs:
|
|
1050
|
-
if val in self._descriptives:
|
|
1051
|
-
data[self._descriptives[val]] = kwargs[val]
|
|
1052
|
-
return data
|
|
1053
|
-
|
|
1054
|
-
|
|
1055
|
-
def new_patient(self, parent='Database', **kwargs):
|
|
1056
|
-
data = self.default()
|
|
1057
|
-
data[0] = dbdataset.new_uid()
|
|
1058
|
-
data[5] = kwargs['PatientName'] if 'PatientName' in kwargs else 'New Patient'
|
|
1059
|
-
for val in kwargs:
|
|
1060
|
-
if val in self._descriptives:
|
|
1061
|
-
data[self._descriptives[val]] = kwargs[val]
|
|
1062
|
-
key = self.new_row(data)
|
|
1063
|
-
return data[0], key
|
|
1064
|
-
|
|
1065
|
-
|
|
1066
|
-
def new_study(self, parent=None, key=None, **kwargs):
|
|
1067
|
-
if key is None:
|
|
1068
|
-
if parent is None:
|
|
1069
|
-
parent, key = self.new_patient()
|
|
1070
|
-
elif self.type(parent) != 'Patient':
|
|
1071
|
-
parent, key = self.new_patient(parent)
|
|
1072
|
-
else:
|
|
1073
|
-
key = self.keys(patient=parent)[0]
|
|
1074
|
-
data = self.clone_study_data(key, **kwargs)
|
|
1075
|
-
if self.value(key, 'StudyInstanceUID') is None:
|
|
1076
|
-
key = self.update_row_data(key, data)
|
|
1077
|
-
else:
|
|
1078
|
-
key = self.new_row(data)
|
|
1079
|
-
return data[1], key
|
|
1080
|
-
|
|
1081
|
-
|
|
1082
|
-
def new_series(self, parent=None, key=None, **kwargs):
|
|
1083
|
-
if key is None:
|
|
1084
|
-
if parent is None:
|
|
1085
|
-
parent, key = self.new_study()
|
|
1086
|
-
elif self.type(parent) != 'Study':
|
|
1087
|
-
#parent = self.studies(parent)[0]
|
|
1088
|
-
parent, key = self.new_study(parent)
|
|
1089
|
-
else:
|
|
1090
|
-
key = self.keys(study=parent)[0]
|
|
1091
|
-
data = self.clone_series_data(key, parent, **kwargs)
|
|
1092
|
-
if self.value(key, 'SeriesInstanceUID') is None:
|
|
1093
|
-
key = self.update_row_data(key, data) # Empty study
|
|
1094
|
-
else:
|
|
1095
|
-
key = self.new_row(data) # Study with existing series
|
|
1096
|
-
return data[2], key
|
|
1097
|
-
|
|
1098
|
-
|
|
1099
|
-
|
|
1100
|
-
def new_instance(self, parent=None, dataset=None, key=None, **kwargs):
|
|
1101
|
-
|
|
1102
|
-
if key is None:
|
|
1103
|
-
if parent is None:
|
|
1104
|
-
parent, key = self.new_series()
|
|
1105
|
-
keys = self.keys(series=parent)
|
|
1106
|
-
elif self.type(parent) != 'Series':
|
|
1107
|
-
# parent = self.series(parent)[0]
|
|
1108
|
-
parent, key = self.new_series(parent)
|
|
1109
|
-
keys = self.keys(series=parent)
|
|
1110
|
-
else:
|
|
1111
|
-
keys = self.keys(series=parent)
|
|
1112
|
-
key = keys[0]
|
|
1113
|
-
else:
|
|
1114
|
-
if parent is None:
|
|
1115
|
-
parent = self.register.at[key, 'SeriesInstanceUID']
|
|
1116
|
-
keys = self.keys(series=parent)
|
|
1117
|
-
|
|
1118
|
-
# Find largest instance number
|
|
1119
|
-
n = self.register.loc[keys,'InstanceNumber'].values
|
|
1120
|
-
n = n[n != -1]
|
|
1121
|
-
max_number=0 if n.size==0 else np.amax(n)
|
|
1122
|
-
|
|
1123
|
-
# Populate attributes in index file
|
|
1124
|
-
data = self.value(key, self.columns)
|
|
1125
|
-
data[3] = dbdataset.new_uid()
|
|
1126
|
-
data[4] = self.default()[4]
|
|
1127
|
-
#data[10] = 1 + len(self.instances(parent))
|
|
1128
|
-
#data[10] = 1 + len(self.instances(keys=self.keys(series=parent)))
|
|
1129
|
-
data[10] = 1 + max_number
|
|
1130
|
-
for val in kwargs:
|
|
1131
|
-
if val in self._descriptives:
|
|
1132
|
-
data[self._descriptives[val]] = kwargs[val]
|
|
1133
|
-
|
|
1134
|
-
if self.value(key, 'SOPInstanceUID') is None:
|
|
1135
|
-
# Empty series
|
|
1136
|
-
key = self.update_row_data(key, data)
|
|
1137
|
-
else:
|
|
1138
|
-
# Series with existing instances
|
|
1139
|
-
key = self.new_row(data)
|
|
1140
|
-
|
|
1141
|
-
if dataset is not None:
|
|
1142
|
-
self.set_instance_dataset(data[3], dataset, key)
|
|
1143
|
-
|
|
1144
|
-
return data[3], key
|
|
1145
|
-
|
|
1146
|
-
|
|
1147
|
-
def set_instance_dataset(self, instance, ds, key=None):
|
|
1148
|
-
|
|
1149
|
-
if isinstance(ds, list):
|
|
1150
|
-
if len(ds) > 1:
|
|
1151
|
-
raise ValueError('Cannot set multiple datasets to a single instance')
|
|
1152
|
-
else:
|
|
1153
|
-
ds = ds[0]
|
|
1154
|
-
if key is None:
|
|
1155
|
-
keys = self.keys(instance)
|
|
1156
|
-
if keys == []: # instance does not exist
|
|
1157
|
-
return
|
|
1158
|
-
key = keys[0]
|
|
1159
|
-
|
|
1160
|
-
data = self.register.loc[key, self.columns]
|
|
1161
|
-
data[4] = ds.SOPClassUID
|
|
1162
|
-
data[11:] = ds.get_values(self.columns[11:])
|
|
1163
|
-
key = self.update_row_data(key, data)
|
|
1164
|
-
ds.set_values(self.columns[:11], data[:11])
|
|
1165
|
-
self.dataset[key] = ds
|
|
1166
|
-
return key
|
|
1167
|
-
|
|
1168
|
-
def set_dataset(self, uid, dataset, keys=None):
|
|
1169
|
-
|
|
1170
|
-
if keys is None:
|
|
1171
|
-
parent_keys = self.keys(uid)
|
|
1172
|
-
else:
|
|
1173
|
-
parent_keys = keys
|
|
1174
|
-
|
|
1175
|
-
parent_key = parent_keys[0]
|
|
1176
|
-
if self.type(uid, parent_key) == 'Instance':
|
|
1177
|
-
self.set_instance_dataset(uid, dataset, parent_key)
|
|
1178
|
-
return
|
|
1179
|
-
|
|
1180
|
-
if not isinstance(dataset, list):
|
|
1181
|
-
dataset = [dataset]
|
|
1182
|
-
|
|
1183
|
-
attr, vals = self.series_header(parent_keys[0])
|
|
1184
|
-
instances = self.value(parent_keys, 'SOPInstanceUID').tolist()
|
|
1185
|
-
|
|
1186
|
-
for ds in dataset:
|
|
1187
|
-
try:
|
|
1188
|
-
ind = instances.index(ds.SOPInstanceUID)
|
|
1189
|
-
except:
|
|
1190
|
-
#If there is no corresponding instance, save dataset in new instance
|
|
1191
|
-
|
|
1192
|
-
# Set parent modules
|
|
1193
|
-
ds.set_values(attr, vals)
|
|
1194
|
-
|
|
1195
|
-
# Create updated row data
|
|
1196
|
-
key = parent_keys[0]
|
|
1197
|
-
data = self.value(key, self.columns)
|
|
1198
|
-
data[3] = dbdataset.new_uid()
|
|
1199
|
-
data[4] = ds.SOPClassUID
|
|
1200
|
-
nrs = self.value(parent_keys, 'InstanceNumber')
|
|
1201
|
-
nrs = [n for n in nrs if n != -1]
|
|
1202
|
-
if nrs == []:
|
|
1203
|
-
data[10] = 1
|
|
1204
|
-
else:
|
|
1205
|
-
data[10] = 1 + max(nrs)
|
|
1206
|
-
data[11:] = ds.get_values(self.columns[11:]) # added 27/07/23
|
|
1207
|
-
|
|
1208
|
-
# Add to database in memory as a new row
|
|
1209
|
-
key = self.new_row(data)
|
|
1210
|
-
ds.set_values(self.columns[:11], data[:11]) # modified 27/07/23
|
|
1211
|
-
self.dataset[key] = ds
|
|
1212
|
-
|
|
1213
|
-
else: # If the instance is already in the object
|
|
1214
|
-
|
|
1215
|
-
key = parent_keys[ind]
|
|
1216
|
-
data = self.value(key, self.columns)
|
|
1217
|
-
data[4] = ds.SOPClassUID
|
|
1218
|
-
data[11:] = ds.get_values(self.columns[11:]) # added 27/07/23
|
|
1219
|
-
key = self.update_row_data(key, data)
|
|
1220
|
-
ds.set_values(self.columns[:11], data[:11]) # added 27/07/23
|
|
1221
|
-
self.dataset[key] = ds
|
|
1222
|
-
|
|
1223
|
-
# If the series is empty and new instances have been added then delete the row
|
|
1224
|
-
parent_key = self.drop_placeholder_row(parent_key, missing='SOPInstanceUID')
|
|
1225
|
-
|
|
1226
|
-
|
|
1227
|
-
|
|
1228
|
-
def delete_studies(self, studies: list):
|
|
1229
|
-
"""Delete a list of studies"""
|
|
1230
|
-
|
|
1231
|
-
for study in studies:
|
|
1232
|
-
keys = self.keys(study=study)
|
|
1233
|
-
self.register.loc[keys,'removed'] = True
|
|
1234
|
-
# If this was the last study in the patient
|
|
1235
|
-
# keep the patient as an empty patient
|
|
1236
|
-
patient = self.register.at[keys[0], 'PatientID']
|
|
1237
|
-
patient = (self.register.removed == False) & (self.register.PatientID == patient)
|
|
1238
|
-
patient_studies = self.register.StudyInstanceUID[patient]
|
|
1239
|
-
patient_studies_cnt = len(patient_studies.unique())
|
|
1240
|
-
if patient_studies_cnt == 0:
|
|
1241
|
-
row = self.default()
|
|
1242
|
-
row[0] = self.register.at[keys[0], 'PatientID']
|
|
1243
|
-
row[5] = self.register.at[keys[0], 'PatientName']
|
|
1244
|
-
self.new_row(row)
|
|
1245
|
-
|
|
1246
|
-
|
|
1247
|
-
def delete_series(self, series: list):
|
|
1248
|
-
"""Delete a list of series"""
|
|
1249
|
-
|
|
1250
|
-
for sery in series:
|
|
1251
|
-
keys = self.keys(series=sery)
|
|
1252
|
-
self.register.loc[keys,'removed'] = True
|
|
1253
|
-
# If this was the last series in the study
|
|
1254
|
-
# keep the study as an empty study
|
|
1255
|
-
study = self.register.at[keys[0], 'StudyInstanceUID']
|
|
1256
|
-
study = (self.register.removed == False) & (self.register.StudyInstanceUID == study)
|
|
1257
|
-
study_series = self.register.SeriesInstanceUID[study]
|
|
1258
|
-
study_series_cnt = len(study_series.unique())
|
|
1259
|
-
if study_series_cnt == 0:
|
|
1260
|
-
row = self.default()
|
|
1261
|
-
row[0] = self.register.at[keys[0], 'PatientID']
|
|
1262
|
-
row[1] = self.register.at[keys[0], 'StudyInstanceUID']
|
|
1263
|
-
row[5] = self.register.at[keys[0], 'PatientName']
|
|
1264
|
-
row[6] = self.register.at[keys[0], 'StudyDescription']
|
|
1265
|
-
row[7] = self.register.at[keys[0], 'StudyDate']
|
|
1266
|
-
self.new_row(row)
|
|
1267
|
-
|
|
1268
|
-
|
|
1269
|
-
def new_key(self):
|
|
1270
|
-
# Generate a new key
|
|
1271
|
-
return os.path.join('dbdicom', dbdataset.new_uid() + '.dcm')
|
|
1272
|
-
|
|
1273
|
-
|
|
1274
|
-
def copy_instance_to_series(self, instance_key, target_keys, tmp, **kwargs):
|
|
1275
|
-
"""Copy instances to another series"""
|
|
1276
|
-
|
|
1277
|
-
new_parent_key = target_keys[0]
|
|
1278
|
-
attributes, values = self.series_header(new_parent_key)
|
|
1279
|
-
self.append_kwargs(kwargs, attributes, values)
|
|
1280
|
-
|
|
1281
|
-
n = self.register.loc[target_keys,'InstanceNumber'].values
|
|
1282
|
-
n = n[n != -1]
|
|
1283
|
-
max_number=0 if n.size==0 else np.amax(n)
|
|
1284
|
-
|
|
1285
|
-
new_instance = dbdataset.new_uid()
|
|
1286
|
-
new_key = self.new_key()
|
|
1287
|
-
ds = self.get_instance_dataset(instance_key)
|
|
1288
|
-
|
|
1289
|
-
if ds is None:
|
|
1290
|
-
row = self.value(instance_key, self.columns).tolist()
|
|
1291
|
-
row = self.copy_series_data(new_parent_key, row)
|
|
1292
|
-
row[3] = new_instance
|
|
1293
|
-
row[10] = 1 + max_number
|
|
1294
|
-
for val in kwargs:
|
|
1295
|
-
if val in self._descriptives:
|
|
1296
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1297
|
-
else:
|
|
1298
|
-
if instance_key in self.dataset:
|
|
1299
|
-
ds = copy.deepcopy(ds)
|
|
1300
|
-
self.dataset[new_key] = ds
|
|
1301
|
-
ds.set_values(
|
|
1302
|
-
attributes + ['SOPInstanceUID', 'InstanceNumber'],
|
|
1303
|
-
values + [new_instance, 1+max_number])
|
|
1304
|
-
if not instance_key in self.dataset:
|
|
1305
|
-
ds.write(self.filepath(new_key), self.status)
|
|
1306
|
-
row = ds.get_values(self.columns)
|
|
1307
|
-
|
|
1308
|
-
new_parent_key = self.drop_placeholder_row(new_parent_key, missing='SOPInstanceUID')
|
|
1309
|
-
self.new_row(row, new_key)
|
|
1310
|
-
|
|
1311
|
-
return new_instance
|
|
1312
|
-
|
|
1313
|
-
def new_instance_number(self, series):
|
|
1314
|
-
series_keys = self.keys(series=series)
|
|
1315
|
-
n = self.register.loc[series_keys,'InstanceNumber'].values
|
|
1316
|
-
n = n[n != -1]
|
|
1317
|
-
max_number=0 if n.size==0 else np.amax(n)
|
|
1318
|
-
return max_number + 1
|
|
1319
|
-
|
|
1320
|
-
def copy_to_series(self, uids, target, **kwargs):
|
|
1321
|
-
"""Copy instances to another series"""
|
|
1322
|
-
|
|
1323
|
-
target_keys = self.keys(series=target)
|
|
1324
|
-
new_parent_key = target_keys[0]
|
|
1325
|
-
attributes, values = self.series_header(new_parent_key)
|
|
1326
|
-
self.append_kwargs(kwargs, attributes, values)
|
|
1327
|
-
|
|
1328
|
-
max_number = self.new_instance_number(target)
|
|
1329
|
-
keys = self.keys(uids)
|
|
1330
|
-
new_instances = dbdataset.new_uid(len(keys))
|
|
1331
|
-
|
|
1332
|
-
for i, key in enumerate(keys):
|
|
1333
|
-
|
|
1334
|
-
if len(keys) > 1:
|
|
1335
|
-
self.status.progress(i+1, len(keys), message='Copying to series..')
|
|
1336
|
-
|
|
1337
|
-
new_key = self.new_key()
|
|
1338
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1339
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1340
|
-
if ds is None:
|
|
1341
|
-
row = self.value(key, self.columns).tolist()
|
|
1342
|
-
row = self.copy_series_data(new_parent_key, row)
|
|
1343
|
-
row[3] = new_instances[i]
|
|
1344
|
-
row[10] = i + max_number
|
|
1345
|
-
for val in kwargs:
|
|
1346
|
-
if val in self._descriptives:
|
|
1347
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1348
|
-
else:
|
|
1349
|
-
if key in self.dataset:
|
|
1350
|
-
ds = copy.deepcopy(ds)
|
|
1351
|
-
self.dataset[new_key] = ds
|
|
1352
|
-
ds.set_values(
|
|
1353
|
-
attributes + ['SOPInstanceUID', 'InstanceNumber'],
|
|
1354
|
-
values + [new_instances[i], i + max_number])
|
|
1355
|
-
if not key in self.dataset:
|
|
1356
|
-
ds.write(self.filepath(new_key), self.status)
|
|
1357
|
-
row = ds.get_values(self.columns)
|
|
1358
|
-
|
|
1359
|
-
# Add new data for the dataframe
|
|
1360
|
-
self.new_row(row, new_key)
|
|
1361
|
-
|
|
1362
|
-
# If the series is empty and new instances have been added, then delete the row
|
|
1363
|
-
new_parent_key = self.drop_placeholder_row(new_parent_key, missing='SOPInstanceUID')
|
|
1364
|
-
|
|
1365
|
-
if len(keys) > 1:
|
|
1366
|
-
self.status.hide()
|
|
1367
|
-
|
|
1368
|
-
if len(new_instances) == 1:
|
|
1369
|
-
return new_instances[0]
|
|
1370
|
-
else:
|
|
1371
|
-
return new_instances
|
|
1372
|
-
|
|
1373
|
-
|
|
1374
|
-
def new_series_number(self, study):
|
|
1375
|
-
study_keys = self.keys(study=study)
|
|
1376
|
-
n = self.value(study_keys, 'SeriesNumber')
|
|
1377
|
-
n = n[n != -1]
|
|
1378
|
-
max_number=0 if n.size==0 else np.amax(n)
|
|
1379
|
-
return max_number + 1
|
|
1380
|
-
|
|
1381
|
-
|
|
1382
|
-
def copy_to_study(self, uid, target, **kwargs):
|
|
1383
|
-
"""Copy series to another study"""
|
|
1384
|
-
|
|
1385
|
-
target_keys = self.keys(study=target)
|
|
1386
|
-
target_key = target_keys[0]
|
|
1387
|
-
attributes, values = self.study_header(target_key)
|
|
1388
|
-
self.append_kwargs(kwargs, attributes, values)
|
|
1389
|
-
|
|
1390
|
-
max_number = self.new_series_number(target)
|
|
1391
|
-
all_series = self.series(uid)
|
|
1392
|
-
new_series = dbdataset.new_uid(len(all_series))
|
|
1393
|
-
|
|
1394
|
-
for s, series in enumerate(all_series):
|
|
1395
|
-
|
|
1396
|
-
new_number = s + max_number
|
|
1397
|
-
series_keys = self.keys(series=series)
|
|
1398
|
-
for k, key in enumerate(series_keys):
|
|
1399
|
-
|
|
1400
|
-
desc = self.value(key, 'SeriesDescription')
|
|
1401
|
-
if desc is None:
|
|
1402
|
-
desc = 'Unknown'
|
|
1403
|
-
msg = 'Copying series ' + desc
|
|
1404
|
-
msg += ' (' + str(s+1) + '/' + str(len(all_series)) + ')'
|
|
1405
|
-
self.status.progress(k+1, len(series_keys), msg)
|
|
1406
|
-
|
|
1407
|
-
new_key = self.new_key()
|
|
1408
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1409
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1410
|
-
if ds is None:
|
|
1411
|
-
# Fill in any register data provided
|
|
1412
|
-
row = self.value(key, self.columns).tolist()
|
|
1413
|
-
row = self.copy_study_data(target_key, row)
|
|
1414
|
-
row[2] = new_series[s]
|
|
1415
|
-
#row[3] = dbdataset.new_uid()
|
|
1416
|
-
row[9] = new_number
|
|
1417
|
-
for val in kwargs:
|
|
1418
|
-
if val in self._descriptives:
|
|
1419
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1420
|
-
else:
|
|
1421
|
-
|
|
1422
|
-
# If the series exists in memory, create a copy in memory
|
|
1423
|
-
if key in self.dataset:
|
|
1424
|
-
ds = copy.deepcopy(ds)
|
|
1425
|
-
self.dataset[new_key] = ds
|
|
1426
|
-
|
|
1427
|
-
# Generate new UIDs
|
|
1428
|
-
ds.set_values(
|
|
1429
|
-
attributes + ['SeriesInstanceUID', 'SeriesNumber', 'SOPInstanceUID'],
|
|
1430
|
-
values + [new_series[s], new_number, dbdataset.new_uid()])
|
|
1431
|
-
|
|
1432
|
-
# If the series is not in memory, create a copy on disk
|
|
1433
|
-
if not key in self.dataset:
|
|
1434
|
-
ds.write(self.filepath(new_key), self.status)
|
|
1435
|
-
|
|
1436
|
-
# Get row values to add to dataframe
|
|
1437
|
-
row = ds.get_values(self.columns)
|
|
1438
|
-
|
|
1439
|
-
# Get new data for the dataframe
|
|
1440
|
-
self.new_row(row, new_key)
|
|
1441
|
-
|
|
1442
|
-
# Update the dataframe in the index
|
|
1443
|
-
|
|
1444
|
-
# If the study is empty and new series have been added
|
|
1445
|
-
# then delete the row
|
|
1446
|
-
target_key = self.drop_placeholder_row(target_key, missing='SeriesInstanceUID')
|
|
1447
|
-
self.status.hide()
|
|
1448
|
-
|
|
1449
|
-
if len(new_series) == 1:
|
|
1450
|
-
return new_series[0]
|
|
1451
|
-
else:
|
|
1452
|
-
return new_series
|
|
1453
|
-
|
|
1454
|
-
|
|
1455
|
-
def copy_to_patient(self, uid, target_key, **kwargs):
|
|
1456
|
-
"""Copy studies to another patient"""
|
|
1457
|
-
|
|
1458
|
-
attributes, values = self.patient_header(target_key)
|
|
1459
|
-
self.append_kwargs(kwargs, attributes, values)
|
|
1460
|
-
|
|
1461
|
-
all_studies = self.studies(uid)
|
|
1462
|
-
new_studies = dbdataset.new_uid(len(all_studies))
|
|
1463
|
-
|
|
1464
|
-
for s, study in enumerate(all_studies):
|
|
1465
|
-
all_series = self.series(study)
|
|
1466
|
-
if all_series == []:
|
|
1467
|
-
# Create an empty study
|
|
1468
|
-
new_key = self.new_key()
|
|
1469
|
-
key = self.keys(study=study)[0]
|
|
1470
|
-
row = self.value(key, self.columns).tolist()
|
|
1471
|
-
row[0] = self.value(target_key, 'PatientID')
|
|
1472
|
-
row[1] = new_studies[s]
|
|
1473
|
-
row[5] = self.value(target_key, 'PatientName')
|
|
1474
|
-
row[6] = self.value(target_key, 'StudyDescription')
|
|
1475
|
-
row[7] = self.value(target_key, 'StudyDate')
|
|
1476
|
-
for val in kwargs:
|
|
1477
|
-
if val in self._descriptives:
|
|
1478
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1479
|
-
# Get new data for the dataframe
|
|
1480
|
-
self.new_row(row, new_key)
|
|
1481
|
-
for series in all_series:
|
|
1482
|
-
new_series_uid = dbdataset.new_uid()
|
|
1483
|
-
for key in self.keys(series=series):
|
|
1484
|
-
new_key = self.new_key()
|
|
1485
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1486
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1487
|
-
if ds is None:
|
|
1488
|
-
row = self.value(key, self.columns).tolist()
|
|
1489
|
-
row[0] = self.value(target_key, 'PatientID')
|
|
1490
|
-
row[1] = new_studies[s]
|
|
1491
|
-
row[2] = new_series_uid
|
|
1492
|
-
row[3] = dbdataset.new_uid()
|
|
1493
|
-
row[5] = self.value(target_key, 'PatientName')
|
|
1494
|
-
for val in kwargs:
|
|
1495
|
-
if val in self._descriptives:
|
|
1496
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1497
|
-
else:
|
|
1498
|
-
if key in self.dataset:
|
|
1499
|
-
ds = copy.deepcopy(ds)
|
|
1500
|
-
self.dataset[new_key] = ds
|
|
1501
|
-
ds.set_values(
|
|
1502
|
-
attributes + ['StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID'],
|
|
1503
|
-
values + [new_studies[s], new_series_uid, dbdataset.new_uid()])
|
|
1504
|
-
if not key in self.dataset:
|
|
1505
|
-
ds.write(self.filepath(new_key), self.status)
|
|
1506
|
-
row = ds.get_values(self.columns)
|
|
1507
|
-
|
|
1508
|
-
# Get new data for the dataframe
|
|
1509
|
-
self.new_row(row, new_key)
|
|
1510
|
-
|
|
1511
|
-
# If the patient is empty and new studies have been added, then delete the row
|
|
1512
|
-
target_key = self.drop_placeholder_row(target_key, missing='StudyInstanceUID')
|
|
1513
|
-
|
|
1514
|
-
if len(new_studies) == 1:
|
|
1515
|
-
return new_studies[0]
|
|
1516
|
-
else:
|
|
1517
|
-
return new_studies
|
|
1518
|
-
|
|
1519
|
-
|
|
1520
|
-
def copy_to_database(self, uid, **kwargs):
|
|
1521
|
-
"""Copy patient to the database"""
|
|
1522
|
-
|
|
1523
|
-
all_patients = self.patients(uid)
|
|
1524
|
-
new_patients = dbdataset.new_uid(len(all_patients))
|
|
1525
|
-
|
|
1526
|
-
for i, patient in enumerate(all_patients):
|
|
1527
|
-
keys = self.keys(patient=patient)
|
|
1528
|
-
new_patient_uid = new_patients[i]
|
|
1529
|
-
new_patient_name = 'Copy of ' + self.value(keys[0], 'PatientName')
|
|
1530
|
-
for study in self.studies(patient):
|
|
1531
|
-
new_study_uid = dbdataset.new_uid()
|
|
1532
|
-
for sery in self.series(study):
|
|
1533
|
-
new_series_uid = dbdataset.new_uid()
|
|
1534
|
-
for key in self.keys(series=sery):
|
|
1535
|
-
new_instance_uid = dbdataset.new_uid()
|
|
1536
|
-
new_key = self.new_key()
|
|
1537
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1538
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1539
|
-
if ds is None:
|
|
1540
|
-
row = self.value(key, self.columns).tolist()
|
|
1541
|
-
row[0] = new_patient_uid
|
|
1542
|
-
row[1] = new_study_uid
|
|
1543
|
-
row[2] = new_series_uid
|
|
1544
|
-
row[3] = new_instance_uid
|
|
1545
|
-
row[5] = new_patient_name
|
|
1546
|
-
for val in kwargs:
|
|
1547
|
-
if val in self._descriptives:
|
|
1548
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1549
|
-
else:
|
|
1550
|
-
#TODO: Simplify with set_dataset_values()
|
|
1551
|
-
if key in self.dataset:
|
|
1552
|
-
ds = copy.deepcopy(ds)
|
|
1553
|
-
self.dataset[new_key] = ds
|
|
1554
|
-
ds.set_values(
|
|
1555
|
-
list(kwargs.keys())+['PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID', 'PatientName'],
|
|
1556
|
-
list(kwargs.values())+[new_patient_uid, new_study_uid, new_series_uid, new_instance_uid, new_patient_name])
|
|
1557
|
-
if not key in self.dataset:
|
|
1558
|
-
ds.write(self.filepath(new_key), self.status)
|
|
1559
|
-
row = ds.get_values(self.columns)
|
|
1560
|
-
|
|
1561
|
-
# Get new data for the dataframe
|
|
1562
|
-
self.new_row(row, new_key)
|
|
1563
|
-
|
|
1564
|
-
if len(new_patients) == 1:
|
|
1565
|
-
return new_patients[0]
|
|
1566
|
-
else:
|
|
1567
|
-
return new_patients
|
|
1568
|
-
|
|
1569
|
-
|
|
1570
|
-
def copy_series_data(self, key, row):
|
|
1571
|
-
row[0] = self.register.at[key, 'PatientID']
|
|
1572
|
-
row[1] = self.register.at[key, 'StudyInstanceUID']
|
|
1573
|
-
row[2] = self.register.at[key, 'SeriesInstanceUID']
|
|
1574
|
-
row[5] = self.register.at[key, 'PatientName']
|
|
1575
|
-
row[6] = self.register.at[key, 'StudyDescription']
|
|
1576
|
-
row[7] = self.register.at[key, 'StudyDate']
|
|
1577
|
-
row[8] = self.register.at[key, 'SeriesDescription']
|
|
1578
|
-
row[9] = self.register.at[key, 'SeriesNumber']
|
|
1579
|
-
return row
|
|
1580
|
-
|
|
1581
|
-
|
|
1582
|
-
def preserve_series_record(self, key):
|
|
1583
|
-
# If this is the last instance in the series,
|
|
1584
|
-
# keep the series as an empty series.
|
|
1585
|
-
source_series = self.register.at[key, 'SeriesInstanceUID']
|
|
1586
|
-
source_series = (self.register.removed == False) & (self.register.SeriesInstanceUID == source_series)
|
|
1587
|
-
source_series_instances = self.register.SOPInstanceUID[source_series]
|
|
1588
|
-
source_series_instances_cnt = source_series_instances.shape[0]
|
|
1589
|
-
if source_series_instances_cnt == 1:
|
|
1590
|
-
row = self.default()
|
|
1591
|
-
row = self.copy_series_data(key, row)
|
|
1592
|
-
self.new_row(row)
|
|
1593
|
-
|
|
1594
|
-
|
|
1595
|
-
def append_kwargs(self, kwargs, attributes, values):
|
|
1596
|
-
for key in kwargs:
|
|
1597
|
-
try:
|
|
1598
|
-
ind = attributes.index(key)
|
|
1599
|
-
except:
|
|
1600
|
-
attributes.append(key)
|
|
1601
|
-
values.append(kwargs[key])
|
|
1602
|
-
else:
|
|
1603
|
-
values[ind] = kwargs[key]
|
|
1604
|
-
|
|
1605
|
-
|
|
1606
|
-
def move_to_series(self, uid, target, **kwargs):
|
|
1607
|
-
"""Copy datasets to another series"""
|
|
1608
|
-
|
|
1609
|
-
target_keys = self.keys(series=target)
|
|
1610
|
-
if target_keys == []:
|
|
1611
|
-
msg = 'Moving data to a series that does not exist in the database'
|
|
1612
|
-
raise ValueError(msg)
|
|
1613
|
-
new_parent_key = target_keys[0]
|
|
1614
|
-
attributes, values = self.series_header(new_parent_key)
|
|
1615
|
-
self.append_kwargs(kwargs, attributes, values)
|
|
1616
|
-
|
|
1617
|
-
n = self.value(target_keys, 'InstanceNumber')
|
|
1618
|
-
n = n[n != -1]
|
|
1619
|
-
max_number=0 if n.size==0 else np.amax(n)
|
|
1620
|
-
|
|
1621
|
-
keys = self.keys(uid)
|
|
1622
|
-
|
|
1623
|
-
for i, key in enumerate(keys):
|
|
1624
|
-
|
|
1625
|
-
self.status.progress(i+1, len(keys), message='Moving dataset..')
|
|
1626
|
-
self.preserve_series_record(key)
|
|
1627
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1628
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1629
|
-
|
|
1630
|
-
if ds is None:
|
|
1631
|
-
row = self.value(key, self.columns).tolist()
|
|
1632
|
-
row = self.copy_series_data(new_parent_key, row)
|
|
1633
|
-
row[10] = i + 1 + max_number
|
|
1634
|
-
for val in kwargs:
|
|
1635
|
-
if val in self._descriptives:
|
|
1636
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1637
|
-
self.update_row_data(key, row)
|
|
1638
|
-
else:
|
|
1639
|
-
self.set_dataset_values(ds, key, attributes+['InstanceNumber'], values+[i+1+max_number])
|
|
1640
|
-
|
|
1641
|
-
# If the series is empty and new instances have been added, then delete the row
|
|
1642
|
-
new_parent_key = self.drop_placeholder_row(new_parent_key, 'SOPInstanceUID')
|
|
1643
|
-
|
|
1644
|
-
if len(keys) == 1:
|
|
1645
|
-
return self.value(keys, 'SOPInstanceUID')
|
|
1646
|
-
else:
|
|
1647
|
-
return list(self.value(keys, 'SOPInstanceUID'))
|
|
1648
|
-
|
|
1649
|
-
|
|
1650
|
-
def copy_study_data(self, key, row):
|
|
1651
|
-
row[0] = self.register.at[key, 'PatientID']
|
|
1652
|
-
row[1] = self.register.at[key, 'StudyInstanceUID']
|
|
1653
|
-
row[5] = self.register.at[key, 'PatientName']
|
|
1654
|
-
row[6] = self.register.at[key, 'StudyDescription']
|
|
1655
|
-
row[7] = self.register.at[key, 'StudyDate']
|
|
1656
|
-
return row
|
|
1657
|
-
|
|
1658
|
-
|
|
1659
|
-
def preserve_study_record(self, key):
|
|
1660
|
-
# If this is the last series in the study
|
|
1661
|
-
# The create a new row for the empty study
|
|
1662
|
-
source_study = self.register.at[key, 'StudyInstanceUID']
|
|
1663
|
-
source_study_series = (self.register.removed == False) & (self.register.StudyInstanceUID == source_study)
|
|
1664
|
-
source_study_series = self.register.SeriesInstanceUID[source_study_series]
|
|
1665
|
-
source_study_series_cnt = len(source_study_series.unique())
|
|
1666
|
-
if source_study_series_cnt == 1:
|
|
1667
|
-
row = self.default()
|
|
1668
|
-
row = self.copy_study_data(key, row)
|
|
1669
|
-
self.new_row(row)
|
|
1670
|
-
|
|
1671
|
-
def move_to_study(self, uid, target, **kwargs):
|
|
1672
|
-
"""Copy series to another study"""
|
|
1673
|
-
|
|
1674
|
-
target_keys = self.keys(study=target)
|
|
1675
|
-
new_parent_key = target_keys[0]
|
|
1676
|
-
attributes, values = self.study_header(new_parent_key)
|
|
1677
|
-
self.append_kwargs(kwargs, attributes, values)
|
|
1678
|
-
|
|
1679
|
-
n = self.value(target_keys, 'SeriesNumber')
|
|
1680
|
-
n = n[n != -1]
|
|
1681
|
-
max_number=0 if n.size==0 else np.amax(n)
|
|
1682
|
-
|
|
1683
|
-
all_series = self.series(uid)
|
|
1684
|
-
|
|
1685
|
-
for s, series in enumerate(all_series):
|
|
1686
|
-
|
|
1687
|
-
self.status.progress(s+1, len(all_series), message='Moving series..')
|
|
1688
|
-
new_number = s + 1 + max_number
|
|
1689
|
-
keys = self.keys(series=series)
|
|
1690
|
-
self.preserve_study_record(keys[0])
|
|
1691
|
-
|
|
1692
|
-
for key in keys:
|
|
1693
|
-
|
|
1694
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1695
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1696
|
-
|
|
1697
|
-
# If the instance is empty, just replace study data in the register.
|
|
1698
|
-
if ds is None:
|
|
1699
|
-
row = self.value(key, self.columns).tolist()
|
|
1700
|
-
row = self.copy_study_data(new_parent_key, row)
|
|
1701
|
-
row[9] = new_number
|
|
1702
|
-
for val in kwargs:
|
|
1703
|
-
if val in self._descriptives:
|
|
1704
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1705
|
-
self.update_row_data(key, row)
|
|
1706
|
-
|
|
1707
|
-
# Else set the values in the dataset and register.
|
|
1708
|
-
else:
|
|
1709
|
-
self.set_dataset_values(ds, key, attributes+['SeriesNumber'], values+[new_number])
|
|
1710
|
-
|
|
1711
|
-
new_parent_key = self.drop_placeholder_row(new_parent_key, 'SeriesInstanceUID')
|
|
1712
|
-
|
|
1713
|
-
if len(all_series) == 1:
|
|
1714
|
-
return all_series[0]
|
|
1715
|
-
else:
|
|
1716
|
-
return all_series
|
|
1717
|
-
|
|
1718
|
-
|
|
1719
|
-
def copy_patient_data(self, key, row):
|
|
1720
|
-
row[0] = self.register.at[key, 'PatientID']
|
|
1721
|
-
row[5] = self.register.at[key, 'PatientName']
|
|
1722
|
-
return row
|
|
1723
|
-
|
|
1724
|
-
|
|
1725
|
-
def preserve_patient_record(self, key):
|
|
1726
|
-
# If this is the last study in the patient, create a new row for the empty patient record.
|
|
1727
|
-
source_patient = self.register.at[key, 'PatientID']
|
|
1728
|
-
source_patient = (self.register.removed == False) & (self.register.PatientID == source_patient)
|
|
1729
|
-
source_patient_studies = self.register.StudyInstanceUID[source_patient]
|
|
1730
|
-
source_patient_studies_cnt = len(source_patient_studies.unique())
|
|
1731
|
-
if source_patient_studies_cnt == 1:
|
|
1732
|
-
row = self.default()
|
|
1733
|
-
row = self.copy_patient_data(key, row)
|
|
1734
|
-
self.new_row(row)
|
|
1735
|
-
|
|
1736
|
-
|
|
1737
|
-
def move_to_patient(self, uid, target, **kwargs):
|
|
1738
|
-
"""Copy series to another study"""
|
|
1739
|
-
|
|
1740
|
-
target_keys = self.keys(patient=target)
|
|
1741
|
-
new_parent_key = target_keys[0]
|
|
1742
|
-
attributes, values = self.patient_header(new_parent_key)
|
|
1743
|
-
self.append_kwargs(kwargs, attributes, values)
|
|
1744
|
-
all_studies = self.studies(uid)
|
|
1745
|
-
|
|
1746
|
-
for s, study in enumerate(all_studies):
|
|
1747
|
-
|
|
1748
|
-
self.status.progress(s+1, len(all_studies), message='Moving study..')
|
|
1749
|
-
keys = self.keys(study=study)
|
|
1750
|
-
self.preserve_patient_record(keys[0])
|
|
1751
|
-
|
|
1752
|
-
for series in self.series(keys=keys):
|
|
1753
|
-
|
|
1754
|
-
# Move all instances one-by-one to new patient
|
|
1755
|
-
for key in self.keys(series=series):
|
|
1756
|
-
|
|
1757
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1758
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1759
|
-
|
|
1760
|
-
# If the instance is empty, just update the register.
|
|
1761
|
-
if ds is None:
|
|
1762
|
-
row = self.value(key, self.columns).tolist()
|
|
1763
|
-
row = self.copy_patient_data(new_parent_key, row)
|
|
1764
|
-
for val in kwargs:
|
|
1765
|
-
if val in self._descriptives:
|
|
1766
|
-
row[self._descriptives[val]] = kwargs[val]
|
|
1767
|
-
self.update_row_data(key, row)
|
|
1768
|
-
|
|
1769
|
-
# Else set the values in the dataset and register.
|
|
1770
|
-
else:
|
|
1771
|
-
self.set_dataset_values(ds, key, attributes, values)
|
|
1772
|
-
|
|
1773
|
-
new_parent_key = self.drop_placeholder_row(new_parent_key, 'StudyInstanceUID')
|
|
1774
|
-
|
|
1775
|
-
if len(all_studies) == 1:
|
|
1776
|
-
return all_studies[0]
|
|
1777
|
-
else:
|
|
1778
|
-
return all_studies
|
|
1779
|
-
|
|
1780
|
-
|
|
1781
|
-
def move_to(self, source, target, **kwargs):
|
|
1782
|
-
|
|
1783
|
-
type = self.type(target)
|
|
1784
|
-
if type == 'Patient':
|
|
1785
|
-
return self.move_to_patient(source, target, **kwargs)
|
|
1786
|
-
if type == 'Study':
|
|
1787
|
-
return self.move_to_study(source, target, **kwargs)
|
|
1788
|
-
if type == 'Series':
|
|
1789
|
-
return self.move_to_series(source, target, **kwargs)
|
|
1790
|
-
if type == 'Instance':
|
|
1791
|
-
raise ValueError('Cannot move to an instance. Please move to series, study or patient.')
|
|
1792
|
-
|
|
1793
|
-
|
|
1794
|
-
def create_new_instance(self, key, ds):
|
|
1795
|
-
series_uid = self.value(key, 'SeriesInstanceUID')
|
|
1796
|
-
if series_uid is None:
|
|
1797
|
-
study_uid = self.value(key, 'StudyInstanceUID')
|
|
1798
|
-
if study_uid is None:
|
|
1799
|
-
patient_uid = self.value(key, 'PatientID')
|
|
1800
|
-
if patient_uid is None:
|
|
1801
|
-
_, new_key = self.new_instance('Database', ds)
|
|
1802
|
-
else:
|
|
1803
|
-
_, new_key = self.new_instance(patient_uid, ds)
|
|
1804
|
-
else:
|
|
1805
|
-
_, new_key = self.new_instance(study_uid, ds)
|
|
1806
|
-
else:
|
|
1807
|
-
_, new_key = self.new_instance(series_uid, ds)
|
|
1808
|
-
return new_key
|
|
1809
|
-
|
|
1810
|
-
|
|
1811
|
-
|
|
1812
|
-
|
|
1813
|
-
def save_dataset(self, key, ds):
|
|
1814
|
-
if key in self.dataset:
|
|
1815
|
-
self.dataset[key] = ds
|
|
1816
|
-
else:
|
|
1817
|
-
path = self.filepath(key)
|
|
1818
|
-
ds.write(path, self.status)
|
|
1819
|
-
|
|
1820
|
-
|
|
1821
|
-
def set_dataset_values(self, ds, key, attributes, values):
|
|
1822
|
-
|
|
1823
|
-
# If the dataset is in memory and has not yet been modified, then edit a copy.
|
|
1824
|
-
if key in self.dataset:
|
|
1825
|
-
if not self.value(key, 'created'):
|
|
1826
|
-
ds = copy.deepcopy(ds)
|
|
1827
|
-
|
|
1828
|
-
# Change the values and get the register row data
|
|
1829
|
-
ds.set_values(attributes, values)
|
|
1830
|
-
row = ds.get_values(self.columns)
|
|
1831
|
-
|
|
1832
|
-
# Update the register and save the modified dataset
|
|
1833
|
-
key = self.update_row_data(key, row)
|
|
1834
|
-
self.save_dataset(key, ds)
|
|
1835
|
-
return key # added
|
|
1836
|
-
|
|
1837
|
-
# def force_get_dataset(self, key):
|
|
1838
|
-
|
|
1839
|
-
# # Get a dataset for the instance, and create one in memory if needed.
|
|
1840
|
-
# instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1841
|
-
|
|
1842
|
-
# # If the record is empty, create a new instance and a dataset in memory
|
|
1843
|
-
# if instance_uid is None:
|
|
1844
|
-
# ds = new_dataset('MRImage')
|
|
1845
|
-
# new_key = self.create_new_instance(key, ds)
|
|
1846
|
-
# return ds, new_key
|
|
1847
|
-
|
|
1848
|
-
# # If a dataset exists, return it.
|
|
1849
|
-
# ds = self.get_dataset(instance_uid, [key])
|
|
1850
|
-
# if ds is not None:
|
|
1851
|
-
# return ds, key
|
|
1852
|
-
|
|
1853
|
-
# # If the instance has no data yet, create a dataset in memory.
|
|
1854
|
-
# ds = new_dataset('MRImage')
|
|
1855
|
-
# new_key = self.set_instance_dataset(instance_uid, ds, key)
|
|
1856
|
-
# return ds, key
|
|
1857
|
-
|
|
1858
|
-
# def _set_values(self, attributes, values, keys=None, uid=None):
|
|
1859
|
-
# """Set values in a dataset"""
|
|
1860
|
-
# # PASSES ALL TESTS but creates datasets when attributes of empty records are set
|
|
1861
|
-
|
|
1862
|
-
# uids = ['PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID']
|
|
1863
|
-
# uids = [i for i in uids if i in attributes]
|
|
1864
|
-
# if uids != []:
|
|
1865
|
-
# raise ValueError('UIDs cannot be set using set_value(). Use copy_to() or move_to() instead.')
|
|
1866
|
-
|
|
1867
|
-
# if keys is None:
|
|
1868
|
-
# keys = self.keys(uid)
|
|
1869
|
-
|
|
1870
|
-
# for key in keys:
|
|
1871
|
-
|
|
1872
|
-
# # Get the dataset, and create one if needed
|
|
1873
|
-
# ds, new_key = self.force_get_dataset(key)
|
|
1874
|
-
|
|
1875
|
-
# # Set the new values
|
|
1876
|
-
# self.set_dataset_values(ds, new_key, attributes, values)
|
|
1877
|
-
|
|
1878
|
-
# return new_key
|
|
1879
|
-
|
|
1880
|
-
|
|
1881
|
-
def set_row_values(self, key, attributes, values):
|
|
1882
|
-
if not isinstance(values, list):
|
|
1883
|
-
values = [values]
|
|
1884
|
-
attributes = [attributes]
|
|
1885
|
-
row = self.value(key, self.columns).tolist()
|
|
1886
|
-
for i, attr in enumerate(attributes):
|
|
1887
|
-
if attr in self._descriptives:
|
|
1888
|
-
row[self._descriptives[attr]] = values[i]
|
|
1889
|
-
self.update_row_data(key, row)
|
|
1890
|
-
|
|
1891
|
-
|
|
1892
|
-
def set_values(self, attributes, values, keys=None, uid=None):
|
|
1893
|
-
"""Set values in a dataset"""
|
|
1894
|
-
|
|
1895
|
-
uids = ['PatientID', 'StudyInstanceUID', 'SeriesInstanceUID', 'SOPInstanceUID']
|
|
1896
|
-
uids = [i for i in uids if i in attributes]
|
|
1897
|
-
if uids != []:
|
|
1898
|
-
raise ValueError('UIDs cannot be set using set_value(). Use copy_to() or move_to() instead.')
|
|
1899
|
-
|
|
1900
|
-
if keys is None:
|
|
1901
|
-
keys = self.keys(uid)
|
|
1902
|
-
|
|
1903
|
-
for key in keys:
|
|
1904
|
-
|
|
1905
|
-
# Get the dataset
|
|
1906
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1907
|
-
if instance_uid is None:
|
|
1908
|
-
ds = None
|
|
1909
|
-
else:
|
|
1910
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1911
|
-
|
|
1912
|
-
if ds is None:
|
|
1913
|
-
# Update register entries only
|
|
1914
|
-
self.set_row_values(key, attributes, values)
|
|
1915
|
-
else:
|
|
1916
|
-
# Set the new values
|
|
1917
|
-
self.set_dataset_values(ds, key, attributes, values)
|
|
1918
|
-
|
|
1919
|
-
|
|
1920
|
-
def get_values(self, attributes, keys=None, uid=None):
|
|
1921
|
-
|
|
1922
|
-
if keys is None:
|
|
1923
|
-
keys = self.keys(uid)
|
|
1924
|
-
if keys == []:
|
|
1925
|
-
return
|
|
1926
|
-
|
|
1927
|
-
# Single attribute
|
|
1928
|
-
if not isinstance(attributes, list):
|
|
1929
|
-
|
|
1930
|
-
if attributes in self.columns:
|
|
1931
|
-
value = [self.register.at[key, attributes] for key in keys]
|
|
1932
|
-
# Get unique elements
|
|
1933
|
-
value = [x for i, x in enumerate(value) if i==value.index(x)]
|
|
1934
|
-
else:
|
|
1935
|
-
value = []
|
|
1936
|
-
for i, key in enumerate(keys):
|
|
1937
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1938
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1939
|
-
if ds is None:
|
|
1940
|
-
v = None
|
|
1941
|
-
else:
|
|
1942
|
-
v = ds.get_values(attributes)
|
|
1943
|
-
if v not in value:
|
|
1944
|
-
value.append(v)
|
|
1945
|
-
if len(value) == 1:
|
|
1946
|
-
return value[0]
|
|
1947
|
-
try:
|
|
1948
|
-
value.sort() # added 30/12/22
|
|
1949
|
-
except:
|
|
1950
|
-
pass
|
|
1951
|
-
return value
|
|
1952
|
-
|
|
1953
|
-
# Multiple attributes
|
|
1954
|
-
# Create a np array v with values for each instance and attribute
|
|
1955
|
-
if set(attributes) <= set(self.columns):
|
|
1956
|
-
v = self.value(keys, attributes)
|
|
1957
|
-
else:
|
|
1958
|
-
v = np.empty((len(keys), len(attributes)), dtype=object)
|
|
1959
|
-
for i, key in enumerate(keys):
|
|
1960
|
-
instance_uid = self.value(key, 'SOPInstanceUID')
|
|
1961
|
-
ds = self.get_dataset(instance_uid, [key])
|
|
1962
|
-
if isinstance(ds, list):
|
|
1963
|
-
instances = self.register.SOPInstanceUID == instance_uid
|
|
1964
|
-
msg = 'Multiple instances with the same SOPInstanceUID \n'
|
|
1965
|
-
msg += instance_uid + '\n'
|
|
1966
|
-
msg += str(self.register.loc[instances].transpose())
|
|
1967
|
-
raise DatabaseCorrupted(msg)
|
|
1968
|
-
if ds is None:
|
|
1969
|
-
v[i,:] = [None] * len(attributes)
|
|
1970
|
-
else:
|
|
1971
|
-
v[i,:] = ds.get_values(attributes)
|
|
1972
|
-
|
|
1973
|
-
# Return a list with unique values for each attribute
|
|
1974
|
-
values = []
|
|
1975
|
-
for a in range(v.shape[1]):
|
|
1976
|
-
va = v[:,a]
|
|
1977
|
-
va = va[va != np.array(None)]
|
|
1978
|
-
#va = np.unique(va)
|
|
1979
|
-
va = list(va)
|
|
1980
|
-
# Get unique values
|
|
1981
|
-
va = [x for i, x in enumerate(va) if i==va.index(x)]
|
|
1982
|
-
#if va.size == 0:
|
|
1983
|
-
if len(va) == 0:
|
|
1984
|
-
va = None
|
|
1985
|
-
elif len(va) == 1:
|
|
1986
|
-
#elif va.size == 1:
|
|
1987
|
-
va = va[0]
|
|
1988
|
-
else:
|
|
1989
|
-
#va = list(va)
|
|
1990
|
-
try:
|
|
1991
|
-
va.sort() # added 30/12/22
|
|
1992
|
-
except:
|
|
1993
|
-
pass
|
|
1994
|
-
values.append(va)
|
|
1995
|
-
return values
|
|
1996
|
-
|
|
1997
|
-
|
|
1998
|
-
def import_dataset(self, ds):
|
|
1999
|
-
|
|
2000
|
-
# Do not import SOPInstances that are already in the database
|
|
2001
|
-
uid = ds.SOPInstanceUID
|
|
2002
|
-
keys = self.keys(instance=uid)
|
|
2003
|
-
if keys != []:
|
|
2004
|
-
msg = 'Cannot import a dataset that is already in the database.'
|
|
2005
|
-
raise ValueError(msg)
|
|
2006
|
-
|
|
2007
|
-
# Add a row to the register
|
|
2008
|
-
row = ds.get_values(self.columns)
|
|
2009
|
-
new_key = self.new_key()
|
|
2010
|
-
self.new_row(row, new_key)
|
|
2011
|
-
|
|
2012
|
-
# If the database exists on disk, write file
|
|
2013
|
-
if self.path is not None:
|
|
2014
|
-
path = self.filepath(new_key)
|
|
2015
|
-
ds.write(path)
|
|
2016
|
-
|
|
2017
|
-
|
|
2018
|
-
# Misleading name because files are not datasets - e.g. does not work for datasets in memory.
|
|
2019
|
-
def import_datasets(self, files):
|
|
2020
|
-
|
|
2021
|
-
# Read manager data
|
|
2022
|
-
df = dbdataset.read_dataframe(files, self.columns, self.status)
|
|
2023
|
-
df['removed'] = False
|
|
2024
|
-
df['created'] = True
|
|
2025
|
-
|
|
2026
|
-
# Do not import SOPInstances that are already in the database
|
|
2027
|
-
uids = df.SOPInstanceUID.values.tolist()
|
|
2028
|
-
keys = self.keys(instance=uids)
|
|
2029
|
-
if keys != []:
|
|
2030
|
-
do_not_import = self.value(keys, 'SOPInstanceUID')
|
|
2031
|
-
rows = df.SOPInstanceUID.isin(do_not_import)
|
|
2032
|
-
df.drop(df[rows].index, inplace=True)
|
|
2033
|
-
if df.empty:
|
|
2034
|
-
return
|
|
2035
|
-
|
|
2036
|
-
# Add those that are left to the database
|
|
2037
|
-
files = df.index.tolist()
|
|
2038
|
-
for i, file in enumerate(files):
|
|
2039
|
-
self.status.progress(i+1, len(files), 'Copying files..')
|
|
2040
|
-
new_key = self.new_key()
|
|
2041
|
-
ds = dbdataset.read(file)
|
|
2042
|
-
ds.write(self.filepath(new_key), self.status)
|
|
2043
|
-
df.rename(index={file:new_key}, inplace=True)
|
|
2044
|
-
self.register = pd.concat([self.register, df])
|
|
2045
|
-
|
|
2046
|
-
# return the UIDs of the new instances
|
|
2047
|
-
return df.SOPInstanceUID.values.tolist()
|
|
2048
|
-
|
|
2049
|
-
|
|
2050
|
-
def import_datasets_from_nifti(self, files, study=None):
|
|
2051
|
-
|
|
2052
|
-
if study is None:
|
|
2053
|
-
study, _ = self.new_study()
|
|
2054
|
-
|
|
2055
|
-
# Create new
|
|
2056
|
-
nifti_series = None
|
|
2057
|
-
for i, file in enumerate(files):
|
|
2058
|
-
|
|
2059
|
-
# Read the nifti file
|
|
2060
|
-
nim = nib.load(file)
|
|
2061
|
-
sx, sy, sz = nim.header.get_zooms() # spacing
|
|
2062
|
-
|
|
2063
|
-
# If a dicom header is stored, get it
|
|
2064
|
-
# Else create one from scratch
|
|
2065
|
-
try:
|
|
2066
|
-
dcmext = nim.header.extensions
|
|
2067
|
-
dataset = DbDataset(dcmext[0].get_content())
|
|
2068
|
-
except:
|
|
2069
|
-
dataset = new_dataset()
|
|
2070
|
-
|
|
2071
|
-
# Read the array and reshape to 3D
|
|
2072
|
-
array = np.squeeze(nim.get_fdata())
|
|
2073
|
-
array.reshape((array.shape[0], array.shape[1], -1))
|
|
2074
|
-
n_slices = array.shape[-1]
|
|
2075
|
-
|
|
2076
|
-
# If there is only one slice,
|
|
2077
|
-
# load it into the nifti series.
|
|
2078
|
-
if n_slices == 1:
|
|
2079
|
-
if nifti_series is None:
|
|
2080
|
-
desc = os.path.basename(file)
|
|
2081
|
-
nifti_series, _ = self.new_series(study, SeriesDescription=desc)
|
|
2082
|
-
affine = dbimage.affine_to_RAH(nim.affine)
|
|
2083
|
-
dataset.set_pixel_array(array[:,:,0])
|
|
2084
|
-
dataset.set_values('affine_matrix', affine)
|
|
2085
|
-
#dataset.set_values('PixelSpacing', [sy, sx])
|
|
2086
|
-
self.new_instance(nifti_series, dataset)
|
|
2087
|
-
|
|
2088
|
-
# If there are multiple slices in the file,
|
|
2089
|
-
# Create a new series and save all files in there.
|
|
2090
|
-
else:
|
|
2091
|
-
desc = os.path.basename(file)
|
|
2092
|
-
series, _ = self.new_series(study, SeriesDescription=desc)
|
|
2093
|
-
affine = dbimage.affine_to_RAH(nim.affine)
|
|
2094
|
-
for z in range(n_slices):
|
|
2095
|
-
ds = copy.deepcopy(dataset)
|
|
2096
|
-
ds.set_pixel_array(array[:,:,z])
|
|
2097
|
-
ds.set_values('affine_matrix', affine)
|
|
2098
|
-
#ds.set_values('PixelSpacing', [sy, sx])
|
|
2099
|
-
self.new_instance(series, ds)
|
|
2100
|
-
|
|
2101
|
-
|
|
2102
|
-
def export_datasets(self, uids, database):
|
|
2103
|
-
|
|
2104
|
-
files = self.filepaths(uids)
|
|
2105
|
-
database.import_datasets(files)
|
|
2106
|
-
|
|
2107
|
-
|
|
2108
|
-
# Helper functions to hide the register from classes other than manager
|
|
2109
|
-
# Consider removing after eliminating dataframe
|
|
2110
|
-
|
|
2111
|
-
def _empty(self):
|
|
2112
|
-
return self.register.empty
|
|
2113
|
-
|
|
2114
|
-
def _dbloc(self):
|
|
2115
|
-
return self.register.removed==False
|
|
2116
|
-
|
|
2117
|
-
def _keys(self, loc):
|
|
2118
|
-
return self.register.index[loc]
|
|
2119
|
-
|
|
2120
|
-
def _at(self, row, col):
|
|
2121
|
-
return self.register.at[row, col]
|
|
2122
|
-
|
|
2123
|
-
def _extract(self, rows):
|
|
2124
|
-
return self.register.loc[rows,:]
|
|
2125
|
-
|
|
2126
|
-
def _loc(self, name, uid):
|
|
2127
|
-
df = self.register
|
|
2128
|
-
return (df.removed==False) & (df[name]==uid)
|
|
2129
|
-
|
|
2130
|
-
def _extract_record(self, name, uid):
|
|
2131
|
-
return self.register[name] == uid
|
|
2132
|
-
|