dbdicom 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbdicom might be problematic. Click here for more details.

Files changed (52) hide show
  1. dbdicom/__init__.py +5 -3
  2. dbdicom/create.py +77 -70
  3. dbdicom/dro.py +174 -0
  4. dbdicom/ds/dataset.py +30 -3
  5. dbdicom/ds/types/mr_image.py +18 -7
  6. dbdicom/extensions/__init__.py +10 -0
  7. dbdicom/{wrappers → extensions}/dipy.py +191 -205
  8. dbdicom/extensions/elastix.py +503 -0
  9. dbdicom/extensions/matplotlib.py +107 -0
  10. dbdicom/extensions/numpy.py +271 -0
  11. dbdicom/{wrappers → extensions}/scipy.py +131 -32
  12. dbdicom/{wrappers → extensions}/skimage.py +1 -1
  13. dbdicom/extensions/sklearn.py +243 -0
  14. dbdicom/extensions/vreg.py +1390 -0
  15. dbdicom/external/dcm4che/bin/emf2sf +57 -57
  16. dbdicom/manager.py +91 -36
  17. dbdicom/pipelines.py +66 -0
  18. dbdicom/record.py +447 -80
  19. dbdicom/types/instance.py +46 -20
  20. dbdicom/types/series.py +2182 -399
  21. dbdicom/utils/image.py +152 -21
  22. dbdicom/utils/variables.py +8 -2
  23. dbdicom/utils/vreg.py +327 -135
  24. dbdicom-0.2.3.dist-info/METADATA +88 -0
  25. dbdicom-0.2.3.dist-info/RECORD +67 -0
  26. {dbdicom-0.2.0.dist-info → dbdicom-0.2.3.dist-info}/WHEEL +1 -1
  27. dbdicom/external/__pycache__/__init__.cpython-310.pyc +0 -0
  28. dbdicom/external/__pycache__/__init__.cpython-37.pyc +0 -0
  29. dbdicom/external/dcm4che/__pycache__/__init__.cpython-310.pyc +0 -0
  30. dbdicom/external/dcm4che/__pycache__/__init__.cpython-37.pyc +0 -0
  31. dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-310.pyc +0 -0
  32. dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-37.pyc +0 -0
  33. dbdicom/external/dcm4che/lib/linux-x86/libclib_jiio.so +0 -0
  34. dbdicom/external/dcm4che/lib/linux-x86-64/libclib_jiio.so +0 -0
  35. dbdicom/external/dcm4che/lib/linux-x86-64/libopencv_java.so +0 -0
  36. dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio.so +0 -0
  37. dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio_vis.so +0 -0
  38. dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio_vis2.so +0 -0
  39. dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio.so +0 -0
  40. dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio_vis.so +0 -0
  41. dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio_vis2.so +0 -0
  42. dbdicom/external/dcm4che/lib/solaris-x86/libclib_jiio.so +0 -0
  43. dbdicom/external/dcm4che/lib/solaris-x86-64/libclib_jiio.so +0 -0
  44. dbdicom/wrappers/__init__.py +0 -7
  45. dbdicom/wrappers/elastix.py +0 -855
  46. dbdicom/wrappers/numpy.py +0 -119
  47. dbdicom/wrappers/sklearn.py +0 -151
  48. dbdicom/wrappers/vreg.py +0 -273
  49. dbdicom-0.2.0.dist-info/METADATA +0 -276
  50. dbdicom-0.2.0.dist-info/RECORD +0 -81
  51. {dbdicom-0.2.0.dist-info → dbdicom-0.2.3.dist-info}/LICENSE +0 -0
  52. {dbdicom-0.2.0.dist-info → dbdicom-0.2.3.dist-info}/top_level.txt +0 -0
dbdicom/record.py CHANGED
@@ -1,10 +1,14 @@
1
1
  # Importing annotations to handle or sign in import type hints
2
2
  from __future__ import annotations
3
3
 
4
+ import os
5
+ import datetime
6
+
4
7
  # Import packages
5
8
  import numpy as np
6
9
  import pandas as pd
7
10
  import dbdicom.ds.dataset as dbdataset
11
+ from dbdicom.ds import MRImage
8
12
  from dbdicom.utils.files import export_path
9
13
 
10
14
 
@@ -15,12 +19,14 @@ class Record():
15
19
 
16
20
  def __init__(self, create, manager, uid='Database', key=None, **kwargs):
17
21
 
22
+ self._logfile = None
18
23
  self._key = key
19
24
  self._mute = False
20
25
  self.uid = uid
21
26
  self.attributes = kwargs
22
27
  self.manager = manager
23
28
  self.new = create
29
+
24
30
 
25
31
  def __eq__(self, other):
26
32
  if other is None:
@@ -34,13 +40,13 @@ class Record():
34
40
  return self.get_values(attributes)
35
41
 
36
42
  def __setattr__(self, attribute, value):
37
- if attribute in ['_key','_mute', 'uid', 'manager', 'attributes', 'new']:
43
+ if attribute in ['_key','_mute', 'uid', 'manager', 'attributes', 'new', '_logfile']:
38
44
  self.__dict__[attribute] = value
39
45
  else:
40
- self.set_values([attribute], [value])
46
+ self._set_values([attribute], [value])
41
47
 
42
48
  def __setitem__(self, attributes, values):
43
- self.set_values(attributes, values)
49
+ self._set_values(attributes, values)
44
50
 
45
51
  def loc(self):
46
52
  return self.manager._loc(self.name, self.uid)
@@ -86,7 +92,80 @@ class Record():
86
92
  def dialog(self):
87
93
  return self.manager.dialog
88
94
 
95
+ def set_log(self, filepath:str=None):
96
+ """Set a new file for logging.
97
+
98
+ Args:
99
+ filepath: full path to a log file. If not provided the current log file is removed. Alternatively the value 'Default' can be assigned, in which case a standard file at the same location of the database is automatically opened. Defaults to None.
100
+
101
+ Raises:
102
+ FileNotFoundError: if the log file cannot be written to.
103
+
104
+ See also:
105
+ `log`
106
+
107
+ Examples:
89
108
 
109
+ Set a new log file:
110
+
111
+ >>> record.set_log('path/to/logfile')
112
+
113
+ and start logging:
114
+
115
+ >>> record.log('Starting new calculation...)
116
+
117
+ Alternatively, start a new log at the default location:
118
+
119
+ >>> record.set_log('Default')
120
+ """
121
+ if filepath is None:
122
+ self._logfile = None
123
+ return
124
+ if filepath == 'Default':
125
+ # Use default log name
126
+ self._logfile = os.path.join(self.manager.path, "activity_log.txt")
127
+ else:
128
+ self._logfile = filepath
129
+ try:
130
+ file = open(self._logfile, 'a')
131
+ file.write(str(datetime.datetime.now())[0:19] + "Starting a new log..")
132
+ file.close()
133
+ except:
134
+ msg = 'Cannot write to log ' + self._logfile
135
+ raise FileNotFoundError(msg)
136
+
137
+ def log(self, message:str):
138
+ """Write an entry in the log file.
139
+
140
+ If no logfile is set, this function only writes a message in the terminal.
141
+
142
+ Args:
143
+ message (str): text message to be written in the log file. The function automatically includes some timing information so this does not need to be included in the message.
144
+
145
+ Raises:
146
+ FileNotFoundError: if the log file cannot be written to.
147
+
148
+ See also:
149
+ `set_log`
150
+
151
+ Examples:
152
+ Set a default file for logging and write a first message:
153
+
154
+ >>> record.set_log('Default')
155
+ >>> record.log('Starting new calculation...)
156
+ """
157
+
158
+ self.message(message)
159
+ if self._logfile is None:
160
+ return
161
+ try:
162
+ file = open(self._logfile, 'a')
163
+ file.write("\n"+str(datetime.datetime.now())[0:19] + ": " + message)
164
+ file.close()
165
+ except:
166
+ msg = 'Cannot write to log ' + self._logfile
167
+ raise FileNotFoundError(msg)
168
+
90
169
 
91
170
  # Properties
92
171
 
@@ -100,7 +179,7 @@ class Record():
100
179
  Example:
101
180
  Print a summary of a database:
102
181
 
103
- >>> database = db.database_hollywood()
182
+ >>> database = db.dro.database_hollywood()
104
183
  >>> database.print()
105
184
  ---------- DATABASE --------------
106
185
  Location: In memory
@@ -212,7 +291,7 @@ class Record():
212
291
  Populate the series with a numpy array and verify that it is now no longer empty:
213
292
 
214
293
  >>> zeros = np.zeros((3, 2, 128, 128))
215
- >>> series.set_ndarray(zeros)
294
+ >>> series.set_pixel_values(zeros)
216
295
  >>> print(series.empty())
217
296
  False
218
297
  """
@@ -327,7 +406,7 @@ class Record():
327
406
  Example:
328
407
  Find the patients of a given database:
329
408
 
330
- >>> database = db.database_hollywood()
409
+ >>> database = db.dro.database_hollywood()
331
410
  >>> patients = database.children()
332
411
  >>> print([p.PatientName for p in patients])
333
412
  ['James Bond', 'Scarface']
@@ -368,7 +447,7 @@ class Record():
368
447
  Example:
369
448
  Retrieve a study from a database, and find all other studies performed on the same patient:
370
449
 
371
- >>> database = db.database_hollywood()
450
+ >>> database = db.dro.database_hollywood()
372
451
  >>> study = database.studies()[0]
373
452
  >>> print([s.StudyDescription for s in study.siblings()])
374
453
  ['Xray']
@@ -402,7 +481,7 @@ class Record():
402
481
  Example:
403
482
  Find all series in a database, and print their labels:
404
483
 
405
- >>> database = db.database_hollywood()
484
+ >>> database = db.dro.database_hollywood()
406
485
  >>> series_list = database.series()
407
486
  >>> print([s.label() for s in series_list])
408
487
  ['Series 001 [Localizer]', 'Series 002 [T2w]', 'Series 001 [Chest]', 'Series 002 [Head]', 'Series 001 [Localizer]', 'Series 002 [T2w]', 'Series 001 [Chest]', 'Series 002 [Head]']
@@ -447,7 +526,7 @@ class Record():
447
526
  Example:
448
527
  Find all studies in a database:
449
528
 
450
- >>> database = db.database_hollywood()
529
+ >>> database = db.dro.database_hollywood()
451
530
  >>> studies_list = database.studies()
452
531
  >>> print([s.label() for s in studies_list])
453
532
  ['Study MRI [19821201]', 'Study Xray [19821205]', 'Study MRI [19850105]', 'Study Xray [19850106]']
@@ -486,7 +565,7 @@ class Record():
486
565
  Example:
487
566
  Find all patients in a database:
488
567
 
489
- >>> database = db.database_hollywood()
568
+ >>> database = db.dro.database_hollywood()
490
569
  >>> patients_list = database.patients()
491
570
  >>> print([s.label() for s in patients_list])
492
571
  ['Patient James Bond', 'Patient Scarface']
@@ -906,8 +985,6 @@ class Record():
906
985
  return self
907
986
 
908
987
 
909
-
910
-
911
988
  def copy_to(self, parent, **kwargs):
912
989
  """Return a copy of the record under another parent.
913
990
 
@@ -1117,6 +1194,187 @@ class Record():
1117
1194
  self.manager.clear(self.uid, keys=self.keys())
1118
1195
 
1119
1196
 
1197
+ def export_as_dicom(self, path:str):
1198
+ """Export record in DICOM format to an external directory.
1199
+
1200
+ Note since this is exporting outside of the current database this will assign new identifiers to the exported data.
1201
+
1202
+ Args:
1203
+ path (str): path to export directory.
1204
+
1205
+ See Also:
1206
+ :func:`~export_as_png`
1207
+ :func:`~export_as_nifti`
1208
+ :func:`~export_as_npy`
1209
+ :func:`~export_as_csv`
1210
+
1211
+ Example:
1212
+
1213
+ Create a 4D series and export as DICOM
1214
+
1215
+ >>> series = db.ones((128, 128, 10, 5))
1216
+ >>> path = 'path\\to\\empty\\folder'
1217
+ >>> series.export_as_dicom(path)
1218
+
1219
+ This should create a single folder in the directory, populated with 50 DICOM files.
1220
+ """
1221
+ if self.name == 'Database':
1222
+ folder = 'Database'
1223
+ else:
1224
+ folder = self.label()
1225
+ path = export_path(path, folder)
1226
+ for child in self.children():
1227
+ child.export_as_dicom(path)
1228
+
1229
+
1230
+ def export_as_png(self, path:str, center:float=None, width:float=None, colormap:str=None):
1231
+ """Export record in PNG format.
1232
+
1233
+ Args:
1234
+ path (str): path to export directory.
1235
+ center (float, optional): center of the color window. Defaults to None, in which case the center is taken from the DICOM header.
1236
+ width (float, optional): width of the color window. Defaults to None, in which case the width is taken from the DICOM header.
1237
+ colormap (str, optional): color map to use as lookup table. Any valid matplotlib colormap can be entered here. Please the `matplotlib colormap reference <https://matplotlib.org/stable/gallery/color/colormap_reference.html>`_ for a complete list. Defaults to None, in which case the colormap is taken from the DICOM header.
1238
+
1239
+ See Also:
1240
+ :func:`~export_as_dicom`
1241
+ :func:`~export_as_nifti`
1242
+ :func:`~export_as_npy`
1243
+ :func:`~export_as_csv`
1244
+
1245
+ Example:
1246
+
1247
+ Create a 4D series and export as PNG, using the colormap plasma:
1248
+
1249
+ >>> series = db.ones((128, 128, 10, 5))
1250
+ >>> path = 'path\\to\\empty\\folder'
1251
+ >>> series.export_as_png(path, center=1, width=0.5, colormap='plasma')
1252
+
1253
+ This should create a single folder in the directory, populated with 50 PNG files.
1254
+ """
1255
+ if self.name == 'Database':
1256
+ folder = 'Database'
1257
+ else:
1258
+ folder = self.label()
1259
+ path = export_path(path, folder)
1260
+ for child in self.children():
1261
+ child.export_as_png(path, center=center, width=width, colormap=colormap)
1262
+
1263
+ def export_as_csv(self, path:str):
1264
+ """Export record in CSV format to an external directory.
1265
+
1266
+ Args:
1267
+ path (str): path to export directory.
1268
+
1269
+ See Also:
1270
+ :func:`~export_as_png`
1271
+ :func:`~export_as_nifti`
1272
+ :func:`~export_as_npy`
1273
+ :func:`~export_as_dicom`
1274
+
1275
+ Example:
1276
+
1277
+ Create a 4D series and export as CSV:
1278
+
1279
+ >>> series = db.ones((128, 128, 10, 5))
1280
+ >>> path = 'path\\to\\empty\\folder'
1281
+ >>> series.export_as_csv(path)
1282
+
1283
+ This should create a single folder in the directory, populated with 50 CSV files.
1284
+ """
1285
+ if self.name == 'Database':
1286
+ folder = 'Database'
1287
+ else:
1288
+ folder = self.label()
1289
+ path = export_path(path, folder)
1290
+ for child in self.children():
1291
+ child.export_as_csv(path)
1292
+
1293
+
1294
+ def export_as_nifti(self, path:str, dims:tuple=None):
1295
+ """Export record in NIFTI format to an external directory.
1296
+
1297
+ Args:
1298
+ path (str): path to export directory.
1299
+ dims (tuple, optional): when set, volumes are extracted along the given dimensions and exported in single files. If dims is not set, each image will be exported in its own file.
1300
+
1301
+ See Also:
1302
+ :func:`~export_as_png`
1303
+ :func:`~export_as_dicom`
1304
+ :func:`~export_as_npy`
1305
+ :func:`~export_as_csv`
1306
+
1307
+ Example:
1308
+
1309
+ Create a 4D series and export as NIFTI:
1310
+
1311
+ >>> series = db.ones((128, 128, 10, 5))
1312
+ >>> path = 'path\\to\\empty\\folder'
1313
+ >>> series.export_as_nifti(path)
1314
+
1315
+ This should create a single folder in the directory, populated with 50 NIFTI files.
1316
+
1317
+ In order to export the entire series in a single volume, provide the dimensions along which the volume is to be taken:
1318
+
1319
+ >>> dims = ('SliceLocation', 'AcquisitionTime')
1320
+ >>> series.export_as_nifti(path, dims=dims)
1321
+
1322
+ This will now create a single nifti file.
1323
+
1324
+ Note: in this case the dimensions must be specified as slice location and acquisition time because these are the default dimensions used by series creation functions like :func:`~ones`.
1325
+ """
1326
+ if self.name == 'Database':
1327
+ folder = 'Database'
1328
+ else:
1329
+ folder = self.label()
1330
+ path = export_path(path, folder)
1331
+ for child in self.children():
1332
+ child.export_as_nifti(path, dims=dims)
1333
+
1334
+
1335
+ def export_as_npy(self, path:str, dims:tuple=None):
1336
+ """Export record in numpy's NPY format to an external directory.
1337
+
1338
+ Args:
1339
+ path (str): path to export directory.
1340
+ dims (tuple, optional): when set, volumes are extracted along the given dimensions and exported in single files. If dims is not set (None), each image will be exported in its own file. Defaults to None.
1341
+
1342
+ See Also:
1343
+ :func:`~export_as_png`
1344
+ :func:`~export_as_nifti`
1345
+ :func:`~export_as_dicom`
1346
+ :func:`~export_as_csv`
1347
+
1348
+ Example:
1349
+
1350
+ Create a 4D series:
1351
+
1352
+ >>> series = db.ones((128, 128, 10, 5))
1353
+
1354
+ Export the series as npy, with each slice in a separate file:
1355
+
1356
+ >>> path = 'path\\to\\empty\\folder'
1357
+ >>> series.export_as_npy(path)
1358
+
1359
+ This will create 50 npy files in the folder, one for each image. To save the entire volume in a single file, specify the dimensions of the volume:
1360
+
1361
+ >>> dims = ('SliceLocation', 'AcquisitionTime')
1362
+ >>> series.export_as_npy(path, dims)
1363
+
1364
+ This will create a single npy file.
1365
+
1366
+ Note: in this case the dimensions must be specified as slice location and acquisition time because these are the default dimensions used by series creation functions like :func:`~ones`.
1367
+ """
1368
+ if self.name == 'Database':
1369
+ folder = 'Database'
1370
+ else:
1371
+ folder = self.label()
1372
+ path = export_path(path, folder)
1373
+ for child in self.children():
1374
+ child.export_as_npy(path, dims=dims)
1375
+
1376
+
1377
+
1120
1378
  def progress(self, value: float, maximum: float, message: str=None):
1121
1379
  """Print progress message to the terminal..
1122
1380
 
@@ -1204,6 +1462,7 @@ class Record():
1204
1462
  My message:
1205
1463
  """
1206
1464
  self._mute = True
1465
+ self.status.muted = True
1207
1466
 
1208
1467
  def unmute(self):
1209
1468
  """Allow the object from sending status updates to the user
@@ -1234,6 +1493,7 @@ class Record():
1234
1493
  Hello World
1235
1494
  """
1236
1495
  self._mute = False
1496
+ self.status.muted = False
1237
1497
 
1238
1498
  def type(self):
1239
1499
  return self.__class__.__name__
@@ -1255,8 +1515,8 @@ class Record():
1255
1515
  return self.manager._extract(self.keys())
1256
1516
  #return self.manager.register.loc[self.keys(),:]
1257
1517
 
1258
- def instances(self, sort=True, sortby=None, **kwargs):
1259
- inst = self.manager.instances(keys=self.keys(), sort=sort, sortby=sortby, **kwargs)
1518
+ def instances(self, sort=True, sortby=None, select={}, **kwargs):
1519
+ inst = self.manager.instances(keys=self.keys(), sort=sort, sortby=sortby, select=select, **kwargs)
1260
1520
  return [self.record('Instance', uid, key) for key, uid in inst.items()]
1261
1521
 
1262
1522
  def images(self, sort=True, sortby=None, **kwargs):
@@ -1348,21 +1608,26 @@ class Record():
1348
1608
  self.manager._write_df()
1349
1609
 
1350
1610
 
1351
-
1352
-
1353
-
1354
-
1355
1611
  def new_instance(self, dataset=None, **kwargs):
1356
1612
  attr = {**kwargs, **self.attributes}
1357
1613
  uid, key = self.manager.new_instance(parent=self.uid, dataset=dataset, **attr)
1358
1614
  return self.record('Instance', uid, key, **attr)
1359
1615
 
1360
- def set_values(self, attributes, values):
1616
+ def _set_values(self, attributes, values):
1361
1617
  keys = self.keys()
1362
1618
  self._key = self.manager.set_values(attributes, values, keys)
1363
1619
 
1364
1620
  def get_values(self, attributes):
1365
1621
  return self.manager.get_values(attributes, self.keys())
1622
+
1623
+ def init_dataset(self, dtype='mri'):
1624
+ if dtype=='mri':
1625
+ ds = MRImage()
1626
+ else: # dummy option for now
1627
+ ds = MRImage()
1628
+ for a in self.attributes:
1629
+ ds.set_values(a, self.attributes[a])
1630
+ return ds
1366
1631
 
1367
1632
  def get_dataset(self):
1368
1633
  ds = self.manager.get_dataset(self.uid, self.keys())
@@ -1371,45 +1636,6 @@ class Record():
1371
1636
  def set_dataset(self, dataset):
1372
1637
  self.manager.set_dataset(self.uid, dataset, self.keys())
1373
1638
 
1374
- def export_as_dicom(self, path):
1375
- if self.name == 'Database':
1376
- folder = 'Database'
1377
- else:
1378
- folder = self.label()
1379
- path = export_path(path, folder)
1380
- for child in self.children():
1381
- child.export_as_dicom(path)
1382
-
1383
- def export_as_png(self, path):
1384
- if self.name == 'Database':
1385
- folder = 'Database'
1386
- else:
1387
- folder = self.label()
1388
- path = export_path(path, folder)
1389
- for child in self.children():
1390
- child.export_as_png(path)
1391
-
1392
- def export_as_csv(self, path):
1393
- if self.name == 'Database':
1394
- folder = 'Database'
1395
- else:
1396
- folder = self.label()
1397
- path = export_path(path, folder)
1398
- for child in self.children():
1399
- child.export_as_csv(path)
1400
-
1401
- def export_as_nifti(self, path):
1402
- if self.name == 'Database':
1403
- folder = 'Database'
1404
- else:
1405
- folder = self.label()
1406
- path = export_path(path, folder)
1407
- for child in self.children():
1408
- child.export_as_nifti(path)
1409
-
1410
- # def sort(self, sortby=['StudyDate','SeriesNumber','InstanceNumber']):
1411
- # self.manager.register.sort_values(sortby, inplace=True)
1412
-
1413
1639
  def read_dataframe(*args, **kwargs):
1414
1640
  return read_dataframe(*args, **kwargs)
1415
1641
 
@@ -1438,14 +1664,55 @@ class Record():
1438
1664
  #
1439
1665
 
1440
1666
 
1441
- def copy_to(records, target):
1667
+ def copy_to(records:list, parent:Record):
1668
+ """Copy a list of records to a new parent.
1669
+
1670
+ Args:
1671
+ records (list): list of Records of the same type
1672
+ parent (Record): location for the copies.
1673
+
1674
+ See also:
1675
+ `copy`
1676
+ `move_to`
1677
+
1678
+ Example:
1679
+
1680
+ Consider the hollywood demo database:
1681
+
1682
+ >>> database = db.dro.database_hollywood()
1683
+
1684
+ There are currently two MRI studies in the database:
1685
+
1686
+ >>> MRIs = database.studies(StudyDescription='MRI)
1687
+ >>> len(MRIs)
1688
+ 2
1689
+
1690
+ Create a new patient and copy the MRI studies there:
1691
+
1692
+ >>> tarantino = database.new_patient(PatientName='Tarantino')
1693
+ >>> db.copy_to(MRIs, tarantino)
1694
+ >>> tarantino_MRIs = tarantino.studies()
1695
+ >>> len(tarantino_MRIs)
1696
+ 2
1697
+
1698
+ Note that all header information is automatically updated:
1699
+
1700
+ >>> tarantino_MRIs[0].PatientName
1701
+ Tarantino
1702
+
1703
+ Since the studies were copied, the originals remained and the total number of studies in the database has increased:
1704
+
1705
+ >>> MRIs = database.studies(StudyDescription='MRI)
1706
+ >>> len(MRIs)
1707
+ 4
1708
+ """
1442
1709
  if not isinstance(records, list):
1443
- return records.copy_to(target)
1710
+ return records.copy_to(parent)
1444
1711
  copy = []
1445
- desc = target.label()
1712
+ desc = parent.label()
1446
1713
  for r, record in enumerate(records):
1447
- record.status.progress(r+1, len(records), 'Copying ' + desc)
1448
- copy_record = record.copy_to(target)
1714
+ record.progress(r+1, len(records), 'Copying ' + desc)
1715
+ copy_record = record.copy_to(parent)
1449
1716
  if isinstance(copy_record, list):
1450
1717
  copy += copy_record
1451
1718
  else:
@@ -1453,9 +1720,55 @@ def copy_to(records, target):
1453
1720
  record.status.hide()
1454
1721
  return copy
1455
1722
 
1456
- def move_to(records, target):
1457
- #if type(records) is np.ndarray:
1458
- # records = records.tolist()
1723
+ def move_to(records:list, target:Record):
1724
+ """Move a list of records to a new parent.
1725
+
1726
+ Args:
1727
+ records (list): list of Records of the same type
1728
+ parent (Record): location for the copies.
1729
+
1730
+ See also:
1731
+ `copy`
1732
+ `copy_to`
1733
+
1734
+ Example:
1735
+
1736
+ Consider the hollywood demo database:
1737
+
1738
+ >>> database = db.dro.database_hollywood()
1739
+
1740
+ There are currently two MRI studies in the database:
1741
+
1742
+ >>> MRIs = database.studies(StudyDescription='MRI)
1743
+ >>> len(MRIs)
1744
+ 2
1745
+
1746
+ Create a new patient and move the MRI studies there:
1747
+
1748
+ >>> tarantino = database.new_patient(PatientName='Tarantino')
1749
+ >>> db.copy_to(MRIs, tarantino)
1750
+ >>> tarantino_MRIs = tarantino.studies()
1751
+ >>> len(tarantino_MRIs)
1752
+ 2
1753
+
1754
+ Note that all header information is automatically updated:
1755
+
1756
+ >>> tarantino_MRIs[0].PatientName
1757
+ Tarantino
1758
+
1759
+ Since the studies were moved, the total number of studies in the database has stayed the same:
1760
+
1761
+ >>> MRIs = database.studies(StudyDescription='MRI)
1762
+ >>> len(MRIs)
1763
+ 2
1764
+
1765
+ And the original patients do not have any MRI studies left:
1766
+
1767
+ >>> jb = database.patients(PatientName = 'James Bond')
1768
+ >>> MRIs = jb[0].studies(StudyDescription='MRI')
1769
+ >>> len(MRIs)
1770
+ 0
1771
+ """
1459
1772
  if not isinstance(records, list):
1460
1773
  records = [records]
1461
1774
  mgr = records[0].manager
@@ -1463,7 +1776,7 @@ def move_to(records, target):
1463
1776
  mgr.move_to(uids, target.uid, **target.attributes)
1464
1777
  return records
1465
1778
 
1466
- def group(records, into=None, inplace=False):
1779
+ def group(records:list, into:Record=None, inplace=False)->Record:
1467
1780
  if not isinstance(records, list):
1468
1781
  records = [records]
1469
1782
  if into is None:
@@ -1474,17 +1787,69 @@ def group(records, into=None, inplace=False):
1474
1787
  copy_to(records, into)
1475
1788
  return into
1476
1789
 
1477
- def merge(records, into=None, inplace=False):
1790
+ def merge(records:list, into:Record=None, inplace=False)->Record:
1791
+ """Merge a list of records into a single new record.
1792
+
1793
+ Args:
1794
+ records (list): list of Records of the same type
1795
+ into (Record, optional): location for the merged series. If None is provided, the merged series is created in the parent of the first record in the list. Defaults to None.
1796
+ inplace (bool, optional): If set to True, the original series will be removed and only the merged series retain. If set to False the original series will contine to exist. Default is False.
1797
+
1798
+ Returns:
1799
+ new_record (Record): the merged record.
1800
+
1801
+ See also:
1802
+ `copy`
1803
+ `copy_to`
1804
+
1805
+ Example:
1806
+
1807
+ The first patient in the hollywood demo database currently has two studies
1808
+
1809
+ >>> database = db.dro.database_hollywood()
1810
+ >>> jb = database.patients(PatientName = 'James Bond')[0]
1811
+ >>> len(jb.studies())
1812
+ 2
1813
+
1814
+ If we merge them together, the patient now has three studies, the original MRI and Xray studies, and the new merged study:
1815
+
1816
+ >>> new_study = db.merge(jb.studies())
1817
+ >>> len(jb.studies())
1818
+ 3
1819
+ >>> jb.StudyDescription
1820
+ ['MRI', 'New Study', 'Xray']
1821
+
1822
+ Since the original MRI and Xray studies had two series each, the new study now has 2+2=4 series:
1823
+
1824
+ >>> len(new_study.series())
1825
+ 4
1826
+
1827
+ We have used here the default setting of ``inplace=False``, so the original series are preserved. To see what happens with ``inplace=True``, lets merge all 3 studies of the patient:
1828
+
1829
+ >>> single_jb_study = db.merge(jb.studies(), inplace=True)
1830
+
1831
+ Since we have merged in place, the original 3 studies have been removed and there is now only one study left.
1832
+
1833
+ >>> len(jb.studies())
1834
+ 1
1835
+
1836
+ The new study now groups the 8 series that were in the original 3 studies:
1837
+
1838
+ >>> len(single_jb_study.series())
1839
+ 8
1840
+ """
1478
1841
  if not isinstance(records, list):
1479
1842
  records = [records]
1480
1843
  children = []
1481
1844
  for record in records:
1482
1845
  children += record.children()
1483
- new_series = group(children, into=into, inplace=inplace)
1846
+ new_record = group(children, into=into, inplace=inplace)
1484
1847
  if inplace:
1485
1848
  for record in records:
1486
1849
  record.remove()
1487
- return new_series
1850
+ return new_record
1851
+
1852
+
1488
1853
 
1489
1854
 
1490
1855
  #
@@ -1493,11 +1858,18 @@ def merge(records, into=None, inplace=False):
1493
1858
 
1494
1859
 
1495
1860
 
1496
-
1497
- def read_dataframe(record, tags):
1861
+ def read_dataframe(record, tags, select={}, **filters):
1498
1862
  if set(tags) <= set(record.manager.columns):
1499
- return record.register()[tags]
1500
- instances = record.instances()
1863
+ df = record.register()[tags]
1864
+ filters = {**select, **filters}
1865
+ for f in filters:
1866
+ if f in df:
1867
+ if isinstance(filters[f], np.ndarray):
1868
+ df = df[df[f].isin(filters[f])]
1869
+ else:
1870
+ df = df[df[f] == filters[f]]
1871
+ return df
1872
+ instances = record.instances(select=select, **filters)
1501
1873
  return _read_dataframe_from_instance_array_values(instances, tags)
1502
1874
 
1503
1875
 
@@ -1518,9 +1890,4 @@ def _read_dataframe_from_instance_array_values(instances, tags):
1518
1890
  indices.append(index)
1519
1891
  data.append(values)
1520
1892
  instance.progress(i+1, len(instances), 'Reading dataframe..')
1521
- return pd.DataFrame(data, index=indices, columns=tags)
1522
-
1523
-
1524
-
1525
-
1526
-
1893
+ return pd.DataFrame(data, index=indices, columns=tags)