dbdicom 0.2.0__py3-none-any.whl → 0.2.3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbdicom might be problematic. Click here for more details.

Files changed (52) hide show
  1. dbdicom/__init__.py +5 -3
  2. dbdicom/create.py +77 -70
  3. dbdicom/dro.py +174 -0
  4. dbdicom/ds/dataset.py +30 -3
  5. dbdicom/ds/types/mr_image.py +18 -7
  6. dbdicom/extensions/__init__.py +10 -0
  7. dbdicom/{wrappers → extensions}/dipy.py +191 -205
  8. dbdicom/extensions/elastix.py +503 -0
  9. dbdicom/extensions/matplotlib.py +107 -0
  10. dbdicom/extensions/numpy.py +271 -0
  11. dbdicom/{wrappers → extensions}/scipy.py +131 -32
  12. dbdicom/{wrappers → extensions}/skimage.py +1 -1
  13. dbdicom/extensions/sklearn.py +243 -0
  14. dbdicom/extensions/vreg.py +1390 -0
  15. dbdicom/external/dcm4che/bin/emf2sf +57 -57
  16. dbdicom/manager.py +91 -36
  17. dbdicom/pipelines.py +66 -0
  18. dbdicom/record.py +447 -80
  19. dbdicom/types/instance.py +46 -20
  20. dbdicom/types/series.py +2182 -399
  21. dbdicom/utils/image.py +152 -21
  22. dbdicom/utils/variables.py +8 -2
  23. dbdicom/utils/vreg.py +327 -135
  24. dbdicom-0.2.3.dist-info/METADATA +88 -0
  25. dbdicom-0.2.3.dist-info/RECORD +67 -0
  26. {dbdicom-0.2.0.dist-info → dbdicom-0.2.3.dist-info}/WHEEL +1 -1
  27. dbdicom/external/__pycache__/__init__.cpython-310.pyc +0 -0
  28. dbdicom/external/__pycache__/__init__.cpython-37.pyc +0 -0
  29. dbdicom/external/dcm4che/__pycache__/__init__.cpython-310.pyc +0 -0
  30. dbdicom/external/dcm4che/__pycache__/__init__.cpython-37.pyc +0 -0
  31. dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-310.pyc +0 -0
  32. dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-37.pyc +0 -0
  33. dbdicom/external/dcm4che/lib/linux-x86/libclib_jiio.so +0 -0
  34. dbdicom/external/dcm4che/lib/linux-x86-64/libclib_jiio.so +0 -0
  35. dbdicom/external/dcm4che/lib/linux-x86-64/libopencv_java.so +0 -0
  36. dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio.so +0 -0
  37. dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio_vis.so +0 -0
  38. dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio_vis2.so +0 -0
  39. dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio.so +0 -0
  40. dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio_vis.so +0 -0
  41. dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio_vis2.so +0 -0
  42. dbdicom/external/dcm4che/lib/solaris-x86/libclib_jiio.so +0 -0
  43. dbdicom/external/dcm4che/lib/solaris-x86-64/libclib_jiio.so +0 -0
  44. dbdicom/wrappers/__init__.py +0 -7
  45. dbdicom/wrappers/elastix.py +0 -855
  46. dbdicom/wrappers/numpy.py +0 -119
  47. dbdicom/wrappers/sklearn.py +0 -151
  48. dbdicom/wrappers/vreg.py +0 -273
  49. dbdicom-0.2.0.dist-info/METADATA +0 -276
  50. dbdicom-0.2.0.dist-info/RECORD +0 -81
  51. {dbdicom-0.2.0.dist-info → dbdicom-0.2.3.dist-info}/LICENSE +0 -0
  52. {dbdicom-0.2.0.dist-info → dbdicom-0.2.3.dist-info}/top_level.txt +0 -0
dbdicom/types/series.py CHANGED
@@ -3,14 +3,17 @@ from __future__ import annotations
3
3
 
4
4
  import os
5
5
  import math
6
+ from numbers import Number
6
7
 
7
8
  import numpy as np
9
+ import pandas as pd
10
+ import nibabel as nib
8
11
 
9
12
  from dbdicom.record import Record, read_dataframe_from_instance_array
10
13
  from dbdicom.ds import MRImage
11
14
  import dbdicom.utils.image as image_utils
12
15
  from dbdicom.manager import Manager
13
- # import dbdicom.wrappers.scipy as scipy_utils
16
+ # import dbdicom.extensions.scipy as scipy_utils
14
17
  from dbdicom.utils.files import export_path
15
18
 
16
19
 
@@ -49,6 +52,8 @@ class Series(Record):
49
52
 
50
53
  # replace by clone(). Adopt implies move rather than copy
51
54
  def adopt(self, instances):
55
+ if len(instances)==0:
56
+ return []
52
57
  uids = [i.uid for i in instances]
53
58
  uids = self.manager.copy_to_series(uids, self.uid, **self.attributes)
54
59
  if isinstance(uids, list):
@@ -64,84 +69,1337 @@ class Series(Record):
64
69
  else:
65
70
  return self.record('Instance', uids, **attr)
66
71
 
67
-
68
-
69
-
70
- def export_as_npy(self, directory=None, filename=None, sortby=None, pixels_first=False):
71
- """Export array in numpy format"""
72
-
73
- if directory is None:
74
- directory = self.dialog.directory(message='Please select a folder for the png data')
75
- if filename is None:
76
- filename = self.SeriesDescription
77
- array, _ = self.get_pixel_array(sortby=sortby, pixels_first=pixels_first)
78
- file = os.path.join(directory, filename + '.npy')
79
- with open(file, 'wb') as f:
80
- np.save(f, array)
81
-
82
-
83
72
  def export_as_dicom(self, path):
84
- # instance = self.instance()
85
- # patient = "".join([c if c.isalnum() else "_" for c in instance.PatientID])
86
- # study = "".join([c if c.isalnum() else "_" for c in instance.StudyDescription])
87
- # series = "".join([c if c.isalnum() else "_" for c in instance.SeriesDescription])
88
- # path = os.path.join(os.path.join(os.path.join(path, patient), study), series)
89
- # path = export_path(path)
90
-
91
73
  folder = self.label()
92
74
  path = export_path(path, folder)
93
-
75
+ # Create a copy so that exported datasets have different UIDs.
94
76
  copy = self.copy()
95
77
  mgr = Manager(path, status=self.status)
96
78
  mgr.open(path)
97
- mgr.import_datasets(copy.files())
79
+ for i in copy.instances():
80
+ ds = i.get_dataset()
81
+ mgr.import_dataset(ds)
98
82
  copy.remove()
99
83
 
100
-
101
- def export_as_png(self, path):
102
- """Export all images as png files"""
84
+ def export_as_png(self, path, **kwargs):
85
+ #Export all images as png files
103
86
  folder = self.label()
104
87
  path = export_path(path, folder)
105
88
  images = self.images()
106
89
  for i, img in enumerate(images):
107
- img.status.progress(i+1, len(images), 'Exporting png..')
108
- img.export_as_png(path)
109
-
90
+ img.progress(i+1, len(images), 'Exporting png..')
91
+ img.export_as_png(path, **kwargs)
110
92
 
111
93
  def export_as_csv(self, path):
112
- """Export all images as csv files"""
94
+ #Export all images as csv files
113
95
  folder = self.label()
114
96
  path = export_path(path, folder)
115
97
  images = self.images()
116
98
  for i, img in enumerate(images):
117
- img.status.progress(i+1, len(images), 'Exporting csv..')
99
+ img.progress(i+1, len(images), 'Exporting csv..')
118
100
  img.export_as_csv(path)
119
101
 
102
+ def export_as_npy(self, path, dims=None):
103
+ if dims is None:
104
+ folder = self.label()
105
+ path = export_path(path, folder)
106
+ images = self.images()
107
+ for i, img in enumerate(images):
108
+ img.progress(i+1, len(images), 'Exporting npy..')
109
+ img.export_as_npy(path)
110
+ else:
111
+ array = self.pixel_values(dims)
112
+ filepath = self.label()
113
+ filepath = os.path.join(path, filepath + '.npy')
114
+ with open(filepath, 'wb') as f:
115
+ np.save(f, array)
116
+
117
+ def export_as_nifti(self, path, dims=None):
118
+ if dims is None:
119
+ folder = self.label()
120
+ path = export_path(path, folder)
121
+ affine = self.affine_matrix()
122
+ if not isinstance(affine, list):
123
+ affine = [affine]
124
+ for a in affine:
125
+ matrix = a[0]
126
+ images = a[1]
127
+ for i, img in enumerate(images):
128
+ img.progress(i+1, len(images), 'Exporting nifti..')
129
+ img.export_as_nifti(path, matrix)
130
+ else:
131
+ ds = self.instance().get_dataset()
132
+ sgroups = self.slice_groups(dims=dims)
133
+ for i, sg in enumerate(sgroups):
134
+ self.progress(i+1, len(sgroups), 'Exporting nifti..')
135
+ dicom_header = nib.nifti1.Nifti1DicomExtension(2, ds)
136
+ nifti1_image = nib.Nifti1Image(sg['ndarray'], image_utils.affine_to_RAH(sg['affine']))
137
+ nifti1_image.header.extensions.append(dicom_header)
138
+ filepath = self.label()
139
+ filepath = os.path.join(path, filepath + '[' + str(i) + '].nii')
140
+ nib.save(nifti1_image, filepath)
120
141
 
121
- def export_as_nifti(self, path: str):
122
- """Export images in nifti format.
142
+ def import_dicom(self, files):
143
+ uids = self.manager.import_datasets(files)
144
+ self.manager.move_to(uids, self.uid)
145
+
146
+
147
+
148
+ def coords(self, dims=('InstanceNumber', ), mesh=False, slice={}, coords={}, exclude=False, **filters)->dict:
149
+ """return a dictionary of coordinates.
123
150
 
124
151
  Args:
125
- path (str): path where results are to be saved.
152
+ dims (tuple, optional): Dimensions along which the shape is to be determined. If dims is not provided, they default to InstanceNumber.
153
+
154
+ Raises:
155
+ ValueError: If the dimensions do not produce suitable coordinates.
156
+
157
+ Returns:
158
+ dict: dictionary of coordinates, one entry for each dimension. The values for each coordinate are returned as an darray with one dimension.
159
+
160
+ See also:
161
+ `set_coords`
162
+
163
+ Example:
164
+
165
+ Create an empty series with 3 slice dimensions:
166
+
167
+ >>> coords = {
168
+ ... 'SliceLocation': np.array([0,1,2,0,1,2]),
169
+ ... 'FlipAngle': np.array([2,2,2,10,10,10]),
170
+ ... 'RepetitionTime': np.array([1,5,15,1,5,15]),
171
+ ... }
172
+ >>> series = db.empty_series(coords)
173
+
174
+ Retrieve the coordinates:
175
+
176
+ >>> coords = series.coords(tuple(coords))
177
+ >>> coords['FlipAngle']
178
+ [2,10,2,10,2,10]
179
+ >>> coords['RepetitionTime']
180
+ [1,1,5,5,15,15]
181
+
182
+ Check the result in default dimensions:
183
+
184
+ >>> coords = series.coords()
185
+ >>> coords['InstanceNumber']
186
+ [1,2,3,4,5,6]
187
+
188
+ In this case the slice location and flip angle along are sufficient to identify the frames, so these are valid coordinates:
189
+
190
+ >>> coords = series.coords(('SliceLocation', 'FlipAngle'))
191
+ >>> coords['SliceLocation']
192
+ [0,0,1,1,2,2]
193
+
194
+ # However slice location and acquisition time are not sufficient as coordinates because each combination appears twice. So this throws an error:
195
+
196
+ >>> series.coords(('SliceLocation','RepetitionTime'))
197
+ ValueError: These are not proper coordinates. Coordinate values must be unique.
126
198
  """
127
- folder = self.label()
128
- path = export_path(path, folder)
129
- affine = self.affine_matrix()
130
- if not isinstance(affine, list):
131
- affine = [affine]
132
- for a in affine:
133
- matrix = a[0]
134
- images = a[1]
135
- for i, img in enumerate(images):
136
- img.status.progress(i+1, len(images), 'Exporting nifti..')
137
- img.export_as_nifti(path, matrix)
138
199
 
200
+ if np.isscalar(dims):
201
+ dims = (dims,)
202
+
203
+ # Default empty coordinates
204
+ vcoords = {}
205
+ for i, tag in enumerate(dims):
206
+ vcoords[tag] = np.array([])
207
+
208
+ # Get all frames and return if empty
209
+ frames = self.instances()
210
+ if frames == []:
211
+ return vcoords
212
+
213
+ # Read values and sort
214
+ fltr = {**slice, **filters}
215
+ values = [f[list(dims)+list(fltr)+list(tuple(coords))] for f in frames]
216
+ values.sort()
217
+
218
+ # Check dimensions
219
+ cvalues = [v[:len(dims)] for v in values]
220
+ cvalues = np.array(cvalues).T
221
+ _check_if_ivals(cvalues)
222
+
223
+ # Filter values
224
+ values = _filter_values(values, fltr, coords, exclude=exclude)
225
+
226
+ # If requested, mesh values
227
+ if mesh:
228
+ values = _meshvals(values)
229
+ mshape = values.shape[1:]
230
+
231
+ # Build coordinates
232
+ if values.size > 0:
233
+ for i, tag in enumerate(dims):
234
+ vcoords[tag] = values[i,...]
235
+ if mesh: # Is this necessary? Is already in the right shape
236
+ vcoords[tag] = vcoords[tag].reshape(mshape)
237
+
238
+ return vcoords
239
+
240
+
241
+ def values(self, *tags, dims=('InstanceNumber', ), return_coords=False, mesh=True, slice={}, coords={}, exclude=False, **filters)->np.ndarray:
242
+ """Return the values of one or more attributes for each frame in the series.
243
+
244
+ Args:
245
+ tag (str or tuple): either a keyword string or a (group, element) tag of a DICOM data element.
246
+ dims (tuple, optional): Dimensions of the resulting array. If *dims* is not provided, values are ordered by InstanceNumber. Defaults to None.
247
+ inds (dict, optional): Dictionary with indices to retrieve a slice of the entire array. Defaults to None.
248
+ select (dict, optional): A dictionary of values for DICOM attributes to filter the result. By default the data are not filtered.
249
+ filters (dict, optional): keyword arguments to filter the data by value of DICOM attributes.
250
+
251
+ Returns:
252
+ An `numpy.ndarray` of values with dimensions as specified by *dims*. If the value is not defined in *one or more* of the slices, an empty array is returned.
253
+
254
+ See also:
255
+ `unique`
256
+ `coords`
257
+ `gridcoords`
258
+
259
+ Note:
260
+ In order to list the values in the case one or more are absent in the headers, use `Series.unique()` instead.
261
+
262
+ Example:
263
+
264
+ Create a zero-filled series with 3 slice dimensions:
265
+
266
+ >>> coords = {
267
+ ... 'SliceLocation': 10*np.arange(4),
268
+ ... 'FlipAngle': np.array([2, 15, 30]),
269
+ ... 'RepetitionTime': np.array([2.5, 5.0]), }
270
+ >>> zeros = db.zeros((128,128,4,3,2), coords)
271
+
272
+ # If values() is called without dimensions, a flat array is returned with one value per frame, ordered by instance number:
273
+
274
+ >>> zeros.values('InstanceNumber')
275
+ [1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,191,20,21,22,23,24]
276
+ >>> zros.values('FlipAngle')
277
+ [2,2,15,15,30,30,2,2,15,15,30,30,2,2,15,15,30,30,2,2,15,15,30,30]
278
+
279
+ if dimensions are provided, an array of the appropriate shape is returned:
280
+
281
+ >>> dims = tuple(coords)
282
+ >>> tacq = series.values('AcquisitionTime', dims)
283
+ >>> tacq.shape
284
+ (4,3,2)
285
+ >>> tacq[0,0,0]
286
+ 28609.057496
287
+
288
+ In this case all values are the same:
289
+
290
+ >>> np.unique(tacq)
291
+ [28609.057496]
292
+
293
+ If a value is not defined in the header, None is returned:
294
+ >>> series.values('Gobbledigook')[:2]
295
+ [None None]
296
+
297
+ Specify keywords to select a subset of values:
298
+
299
+ >>> tacq = zeros.values('AcquisitionTime', dims, FlipAngle=15)
300
+ >>> tacq.shape
301
+ (4, 1, 2)
302
+
303
+ If none exist, and emptry array is returned:
304
+
305
+ >>> tacq = zeros.values('AcquisitionTime', dims, FlipAngle=0)
306
+ >>> tacq.size
307
+ 0
308
+
309
+ Multiple possible values can be selected with arrays:
310
+
311
+ >>> tacq = zeros.values('AcquisitionTime', dims, FlipAngle=np.array([15,30]))
312
+ >>> tacq.shape
313
+ (4, 2, 2)
314
+
315
+ Any number of keywords can be added as filters:
316
+
317
+ >>> tacq = zeros.values('AcquisitionTime', dims, FlipAngle=np.array([15,30]), SliceLocation=np.array([10,20]))
318
+ >>> tacq.shape
319
+ (2, 2, 2)
320
+
321
+ Filters can alos be set using the *select* argument:
322
+
323
+ >>> tacq = zeros.values('AcquisitionTime', dims, select={'FlipAngle': 15})
324
+ >>> tacq.shape
325
+ (4, 1, 2)
326
+
327
+ This also allows (group, element) tags:
328
+
329
+ >>> tacq = zeros.values('AcquisitionTime', dims, select={(0x0018, 0x1314): 15})
330
+ >>> tacq.shape
331
+ (4, 1, 2)
332
+
333
+ Selections can also be made using indices rather than values:
334
+
335
+ >>> tacq = zeros.values('FlipAngle', dims, inds={'FlipAngle': 1})
336
+ >>> tacq.shape
337
+ (4, 1, 2)
338
+
339
+ >>> tacq = zeros.values('AcquisitionTime', dims, inds={'FlipAngle':np.arange(2)})
340
+ >>> tacq.shape
341
+ (4, 2, 2)
342
+ """
343
+
344
+ if np.isscalar(dims):
345
+ dims = (dims,)
346
+
347
+ # Default return values
348
+ values = np.array([]).reshape((0,0))
349
+ vcoords = {}
350
+ for i, tag in enumerate(dims):
351
+ vcoords[tag] = np.array([])
352
+
353
+ # Get all frames and return if empty
354
+ frames = self.instances()
355
+ if frames == []:
356
+ if return_coords:
357
+ return values, vcoords
358
+ return values
359
+
360
+ # Read values and sort
361
+ filters = {**slice, **filters}
362
+ values = []
363
+ for i, f in enumerate(frames):
364
+ self.progress(i+1,len(frames), 'Reading values..')
365
+ v = f[list(dims)+list(tags)+list(tuple(filters))+list(tuple(coords))]
366
+ values.append(v)
367
+ fsort = sorted(range(len(values)), key=lambda k: values[k][:len(dims)])
368
+ values = [values[i] for i in fsort]
369
+
370
+ # Check if dimensions are proper
371
+ # Need object array here because the values can be different type including lists.
372
+ cvalues = [v[:len(dims)] for v in values]
373
+ cvalues = np.array(cvalues, dtype=object).T
374
+ _check_if_ivals(cvalues)
375
+
376
+ # Filter values
377
+ values = _filter_values(values, filters, coords, exclude=exclude)
378
+ if values.size == 0:
379
+ if return_coords:
380
+ if len(tags) == 1:
381
+ return values, vcoords
382
+ else:
383
+ values = [np.array([]) for _ in range(len(tags))]
384
+ return tuple(values) + (vcoords,)
385
+ return values
386
+ cvalues = values[:len(dims),:]
387
+ values = values[len(dims):,:]
388
+
389
+ # If requested, mesh values
390
+ if mesh:
391
+ cmesh = _meshvals(cvalues)
392
+ values = _meshdata(values, cvalues, cmesh)
393
+ cvalues = cmesh
394
+
395
+ # Create return values
396
+ if len(tags) == 1:
397
+ values = values[0,...]
398
+ else:
399
+ values = [values[i,...] for i in range(values.shape[0])]
400
+ values = tuple(values)
401
+
402
+ if return_coords:
403
+ for i, tag in enumerate(dims):
404
+ vcoords[tag] = cvalues[i,...]
405
+ if len(tags) == 1:
406
+ return values, vcoords
407
+ else:
408
+ return values + (vcoords,)
409
+ else:
410
+ return values
411
+
412
+
413
+ def frames(self, dims=('InstanceNumber', ), return_coords=False, return_vals=(), mesh=True, slice={}, coords={}, exclude=False, **filters):
414
+ """Return the frames of given coordinates in the correct order"""
415
+
416
+ if np.isscalar(dims):
417
+ dims = (dims,)
418
+
419
+ # Default return values
420
+ values = np.array([]).reshape((0,0))
421
+ vcoords = {}
422
+ for i, tag in enumerate(dims):
423
+ vcoords[tag] = np.array([])
424
+ if mesh:
425
+ fshape = tuple([0]*len(dims))
426
+ else:
427
+ fshape = (0,)
428
+
429
+ # Get all frames and return if empty
430
+ frames_sel = self.instances()
431
+ if frames_sel == []:
432
+
433
+ # Empty return values
434
+ frames = np.array([]).reshape(fshape)
435
+ rval = (frames,)
436
+ if return_coords:
437
+ rval += (vcoords, )
438
+ if return_vals != ():
439
+ rval += (values, )
440
+ if len(rval)==1:
441
+ return rval[0]
442
+ else:
443
+ return rval
444
+
445
+ # Read values and sort
446
+ filters = {**slice, **filters}
447
+ values = [f[list(dims)+list(return_vals)+list(tuple(filters))+list(tuple(coords))] for f in frames_sel]
448
+ fsort = sorted(range(len(values)), key=lambda k: values[k][:len(dims)])
449
+ values = [values[i] for i in fsort]
450
+
451
+ # Check dimensions
452
+ cvalues = [v[:len(dims)] for v in values]
453
+ cvalues = np.array(cvalues).T
454
+ _check_if_ivals(cvalues)
455
+
456
+ # Create array of frames.
457
+ frames = np.empty(len(frames_sel), dtype=object)
458
+ for i in range(len(fsort)):
459
+ frames[i] = frames_sel[fsort[i]]
460
+
461
+ # Filter values
462
+ finds = _filter_values_ind(values, filters, coords, exclude=exclude)
463
+ if finds.size==0:
464
+ # Empty return values
465
+ frames = np.array([]).reshape(fshape)
466
+ rval = (frames,)
467
+ if return_coords:
468
+ rval += (vcoords, )
469
+ if return_vals != ():
470
+ rval += (np.array([]), )
471
+ if len(rval)==1:
472
+ return rval[0]
473
+ else:
474
+ return rval
475
+ frames = frames[finds]
476
+ values = _filter_values(values, filters, coords, exclude=exclude)
477
+ cvalues = values[:len(dims),:]
478
+ values = values[len(dims):,:]
479
+
480
+ # If requested, mesh values
481
+ if mesh:
482
+ cmesh = _meshvals(cvalues)
483
+ values = _meshdata(values, cvalues, cmesh)
484
+ frames = _meshdata(frames.reshape((1,frames.size)), cvalues, cmesh)
485
+ frames = frames[0,...]
486
+ cvalues = cmesh
487
+
488
+ # Create return values
489
+ rval = (frames,)
490
+ if return_coords:
491
+ for i, tag in enumerate(dims):
492
+ vcoords[tag] = cvalues[i,...]
493
+ rval += (vcoords, )
494
+ if return_vals != ():
495
+ rval += (values, )
496
+ if len(rval)==1:
497
+ return rval[0]
498
+ else:
499
+ return rval
500
+
501
+
502
+ def expand(self, coords={}, gridcoords={}): # gridcoords -> slice
503
+
504
+ if coords != {}:
505
+ pass
506
+ elif gridcoords != {}:
507
+ coords = _grid_to_coords(gridcoords)
508
+ else:
509
+ msg = 'Cannot expand without new coordinates'
510
+ raise ValueError(msg)
511
+
512
+ # If the series is not empty, first check that the new coordinates are valid.
513
+ if not self.empty():
514
+ current_coords = self.coords(tuple(coords))
515
+ try:
516
+ _concatenate_coords((current_coords, coords))
517
+ except:
518
+ msg = 'Cannot expand - the new coordinates overlap with existing coordinates.'
519
+ raise ValueError(msg)
520
+
521
+ # Expand the series to the new coordinates
522
+ size = _coords_size(coords)
523
+ for i in range(size):
524
+ ds = self.init_dataset()
525
+ for c in coords:
526
+ ds.set_values(c, coords[c].ravel()[i])
527
+ self.new_instance(ds)
528
+
529
+
530
+ def set_coords(self, new_coords:dict, dims=(), slice={}, coords={}, **filters):
531
+ """Set a dictionary of coordinates.
532
+
533
+ Args:
534
+ coords (dict): Dictionary of coordinates.
535
+ dims (tuple, optional): Dimensions of at which the new coordinates are to be best. If *dims* is not set, the dimensions are assumed to be the same as those of *coords* or *grid*. Defaults to None.
536
+
537
+ Raises:
538
+ ValueError: if the coordinates provided are not properly formatted or have the wrong shape.
539
+
540
+ See also:
541
+ `coords`
542
+ `set_gridcoords`
543
+
544
+ Example:
545
+
546
+ Create an empty series:
547
+
548
+ >>> coords = {
549
+ ... 'SliceLocation': np.array([0,1,2,0,1,2]),
550
+ ... 'FlipAngle': np.array([2,2,2,10,10,10]),
551
+ ... 'RepetitionTime': np.array([1,5,15,1,5,15]),
552
+ ... }
553
+ >>> series = db.empty_series(coords)
554
+
555
+ Change the flip angle of 15 to 12:
556
+
557
+ >>> coords = series.coords(tuple(coords))
558
+ >>> fa = coords['FlipAngle']
559
+ >>> fa[np.where(fa==2)] = 5
560
+ >>> series.set_coords(coords)
561
+
562
+ Check the new coordinates:
563
+
564
+ >>> new_coords = series.coords(dims)
565
+ >>> new_coords['FlipAngle']
566
+ [5,10,5,10,5,10]
567
+
568
+ Create a new set of coordinates along slice location and acquisition time:
569
+
570
+ >>> new_coords = {
571
+ ... 'SliceLocation': np.array([0,0,1,1,2,2]),
572
+ ... 'AcquisitionTime': np.array([0,60,0,60,0,60]),
573
+ ... }
574
+ >>> series.set_coords(new_coords, ('SliceLocation', 'FlipAngle'))
575
+
576
+ # Inspect the new coordinates - each slice now has two acquisition times corresponding to the flip angles:
577
+
578
+ >>> coords['SliceLocation']
579
+ [0,0,1,1,2,2]
580
+ >>> coords['AcquisitionTime']
581
+ [0,60,0,60,0,60]
582
+ >>> coords['FlipAngle']
583
+ [5,10,5,10,5,10]
584
+
585
+ # Check that an error is raised if coordinate values have different sizes:
586
+ >>> new_coords = {
587
+ ... 'SliceLocation': np.zeros(24),
588
+ ... 'AcquisitionTime': np.ones(25),
589
+ ... }
590
+ >>> series.set_coords(new_coords, dims)
591
+ ValueError: Coordinate values must all have the same size
592
+
593
+ # An error is also raised if they have all the same size but the values are not unique:
594
+
595
+ >>> new_coords = {
596
+ ... 'SliceLocation': np.zeros(24),
597
+ ... 'AcquisitionTime': np.ones(24),
598
+ ... }
599
+ >>> series.set_coords(new_coords, dims)
600
+ ValueError: Coordinate values must all have the same size
601
+
602
+ # .. or when the number does not match up with the size of the series:
603
+
604
+ >>> new_coords = {
605
+ ... 'SliceLocation': np.arange(25),
606
+ ... 'AcquisitionTime': np.arange(25),
607
+ ... }
608
+ >>> series.set_coords(new_coords, dims)
609
+ ValueError: Shape of coordinates does not match up with the size of the series.
610
+
611
+ """
612
+ if dims == ():
613
+ dims = tuple(new_coords)
614
+ elif np.isscalar(dims):
615
+ dims = (dims,)
616
+ new_coords = _check_if_coords(new_coords)
617
+ frames = self.frames(dims, slice=slice, coords=coords, **filters)
618
+ if frames.size == 0:
619
+ # If the series is empty, assignment of coords is unambiguous
620
+ self.expand(new_coords)
621
+ else:
622
+ size = _coords_size(new_coords)
623
+ if size != frames.size:
624
+ msg = 'Cannot set ' + str(size) + ' coordinates in ' + str(frames.size) + ' frames.'
625
+ msg += '\nThe number of new coordinates must equal the number of frames.'
626
+ raise ValueError(msg)
627
+ # If setting a subset, check if the new set of coordinates is valid
628
+ if len({**slice, **coords, **filters}) > 0:
629
+ complement = self.coords(dims, slice=slice, coords=coords, exclude=True, **filters)
630
+ if _coords_size(complement) > 0:
631
+ try:
632
+ _concatenate_coords((new_coords, complement))
633
+ except:
634
+ msg = 'Cannot set coordinates - this would produce invalid coordinates for the series'
635
+ raise ValueError(msg)
636
+ frames = frames.flatten()
637
+ values = _coords_vals(new_coords)
638
+ for f, frame in enumerate(frames):
639
+ frame[list(new_coords)] = list(values[:,f])
640
+
641
+
642
+ def set_values(self, values, tags, dims=('InstanceNumber', ), slice={}, coords={}, **filters):
643
+ # Note tags, values is a more logical order considering we have self.values(tags)
644
+ """Set the values of an attribute.
645
+
646
+ Args:
647
+ tag: either a keyword string or a (group, element) tag of a DICOM data element.
648
+ value: a single value or a numpy array of values for the attribute.
649
+ dims (tuple, optional): Dimensions of *value*. If *value* is a single value, *dims* is ignored. Otherwise, if *dim* is not provided, values are ordered by instance number. Defaults to None.
650
+
651
+ Raises:
652
+ ValueError: if the size of *value* does not match the size of the series.
653
+
654
+ See also:
655
+ `value`
656
+
657
+ Example:
658
+
659
+ Create a zero-filled series with 3 slice dimensions.
660
+
661
+ >>> loc = np.arange(4)
662
+ >>> fa = [2, 15, 30]
663
+ >>> tr = [2.5, 5.0]
664
+ >>> coords = {
665
+ ... 'SliceLocation': np.arange(4),
666
+ ... 'FlipAngle': [2, 15, 30],
667
+ ... 'RepetitionTime': [2.5, 5.0] }
668
+ >>> series = db.zeros((128,128,8,3,2), coords)
669
+
670
+ Change the acquisition time of the series to midnight (0 sec):
671
+
672
+ >>> series.value('AcquisitionTime')
673
+ 28609.057496
674
+ >>> series.set_value('AcquisitionTime', 0)
675
+ >>> series.value('AcquisitionTime')
676
+ 0
677
+
678
+ Set the acquisition time to a different value for each flip angle:
679
+
680
+ >>> tacq = np.repeat(60*np.arange(3), 8)
681
+ >>> series.set_value('AcquisitionTime', tacq, dims=('FlipAngle','InstanceNumber'))
682
+
683
+ Set the acquisition time to a different value for each flip angle and acquisition time:
684
+
685
+ >>> tacq = np.repeat(60*np.arange(6), 4)
686
+ >>> series.set_value('AcquisitionTime', tacq, dims=('FlipAngle','RepetitionTime','SliceLocation'))
687
+
688
+ Note: the size of the value and of the series need to match up. If not, an error is raised:
689
+
690
+ >>> series.set_value('AcquisitionTime', np.arange(25), dims=tuple(coords))
691
+ ValueError: The size of the value array is different from the size of the series.
692
+ The value array has shape (25,), but the series has shape (4, 3).
693
+
694
+ """
695
+
696
+ if np.isscalar(dims):
697
+ dims = (dims,)
698
+
699
+ if not isinstance(values, tuple):
700
+ self.set_values((values,), (tags,), dims=dims, slice=slice, coords=coords, **filters)
701
+ return
702
+
703
+ # Get frames to set:
704
+ frames = self.frames(dims, mesh=False, slice=slice, coords=coords, **filters)
705
+ if frames.size == 0:
706
+ msg = 'Cannot set values to an empty series. Use Series.expand() to create empty frames first.'
707
+ raise ValueError(msg)
708
+
709
+ # Check that values all have the proper format:
710
+ values = list(values)
711
+ for i, v in enumerate(values):
712
+ #if not isinstance(v, np.ndarray):
713
+ # values[i] = np.full(frames.shape, v)
714
+ if isinstance(v, np.ndarray):
715
+ if values[i].size != frames.size:
716
+ msg = 'Cannot set values: number of values does not match number of frames.'
717
+ raise ValueError(msg)
718
+ values[i] = values[i].ravel()
719
+
720
+ # Set values
721
+ for f, frame in enumerate(frames):
722
+ self.progress(f+1, frames.size, 'Writing values..')
723
+ frame[list(tags)] = [v if np.isscalar(v) else v[f] for v in values]
724
+ #frame[list(tags)] = [v[f] for v in values]
725
+
726
+
727
+ def set_gridcoords(self, gridcoords:dict, dims=(), slice={}, coords={}, **filters):
728
+ """ Set a dictionary of grid coordinates.
729
+
730
+ Args:
731
+ coords (dict): dictionary of grid coordinates
732
+ dims (tuple, optional): Dimensions of at which the new coordinates are to be best. If *dims* is not set, the dimensions are assumed to be the same as those of *coords* or *grid*. Defaults to None.
733
+
734
+ See also:
735
+ `gridcoords`
736
+ `set_coords`
737
+
738
+ Examples:
739
+
740
+ Create an empty series with 3 slice dimensions:
741
+
742
+ >>> gridcoords = {
743
+ ... 'SliceLocation': np.arange(4),
744
+ ... 'FlipAngle': np.array([2, 15, 30]),
745
+ ... 'RepetitionTime': np.array([2.5, 5.0]),
746
+ ... }
747
+ >>> series = db.empty_series()
748
+ >>> series.set_gridcoords(gridcoords)
749
+
750
+ Get the coordinates as a mesh
751
+
752
+ >>> dims = tuple(gridcoords)
753
+ >>> coords = series.meshcoords(dims)
754
+ >>> coords['SliceLocation'].shape
755
+ (4, 3, 2)
756
+ >>> coords['FlipAngle'][1,1,1]
757
+ 15
758
+ """
759
+ setcoords = _grid_to_coords(gridcoords)
760
+ self.set_coords(setcoords, dims=dims, slice=slice, coords=coords, **filters)
761
+
762
+
763
+ def gridcoords(self, dims=('InstanceNumber', ), slice={}, coords={}, exclude=False, **filters)->dict:
764
+ """return a dictionary of grid coordinates.
765
+
766
+ Args:
767
+ dims (tuple): Attributes to be used as coordinates.
768
+
769
+ Returns:
770
+ dict: dictionary of coordinates, one entry for each dimension.
771
+
772
+ See also:
773
+ `coords`
774
+ `set_gridcoords`
775
+
776
+ Examples:
777
+
778
+ Create an empty series with 3 slice dimensions:
779
+
780
+ >>> gridcoords = {
781
+ ... 'SliceLocation': np.arange(4),
782
+ ... 'FlipAngle': np.array([2, 15, 30]),
783
+ ... 'RepetitionTime': np.array([2.5, 5.0]),
784
+ ... }
785
+ >>> series = db.empty_series(gridcoords=gridcoords)
786
+
787
+ Recover the grid coordinates:
788
+
789
+ >>> gridcoords_rec = series.gridcoords(tuple(gridcoords))
790
+ >>> coords_rec['SliceLocation']
791
+ [0. 1. 2. 3.]
792
+ >>> coords_rec['FlipAngle']
793
+ [ 2. 15. 30.]
794
+ >>> coords_rec['RepetitionTime']
795
+ [2.5 5. ]
796
+
797
+ Note an error is raised if the coordinates are not grid coordinates:
798
+
799
+ >>> coords = {
800
+ ... 'SliceLocation': np.array([0,1,2,0,1,2]),
801
+ ... 'FlipAngle': np.array([10,10,10,2,2,2]),
802
+ ... 'RepetitionTime': np.array([1,5,15,1,5,15]),
803
+ ... }
804
+ >>> series = db.empty_series(coords)
805
+
806
+ The coordinates form a proper mesh, so this works fine:
807
+
808
+ >>> coords = series.meshcoords(tuple(coords))
809
+
810
+ But this raises an error:
811
+
812
+ >>> series.gridcoords(tuple(coords))
813
+ ValueError: These are not grid coordinates.
814
+ """
815
+ meshcoords = self.coords(dims=dims, mesh=True, slice=slice, coords=coords, exclude=exclude, **filters)
816
+ return _meshcoords_to_grid(meshcoords)
817
+
818
+
819
+ def shape(self, dims=('InstanceNumber', ), mesh=True, slice={}, coords={}, exclude=False, **filters)->tuple:
820
+ """Return the shape of the series along given dimensions.
821
+
822
+ Args:
823
+ dims (tuple, optional): Dimensions along which the shape is to be determined. If dims is not provided, the shape of the flattened series is returned. Defaults to None.
824
+
825
+ Returns:
826
+ tuple: one value for each element of dims.
827
+
828
+ Raises:
829
+ ValueError: if the shape in the specified dimensions is ambiguous (because the number of slices is not unique at each location)
830
+ ValueError: if the shape in the specified dimensions is not well defined (because there is no slice at one or more locations).
831
+
832
+ See also:
833
+ `coords`
834
+ `gridcoords`
835
+ `spacing`
836
+
837
+ Example:
838
+
839
+ Create a zero-filled series with 3 dimensions.
840
+
841
+ >>> coords = {
842
+ >>> 'SliceLocation': np.arange(4),
843
+ >>> 'FlipAngle': [2, 15, 30],
844
+ >>> 'RepetitionTime': [2.5, 5.0] }
845
+ >>> series = db.zeros((128,128,4,3,2), coords)
846
+
847
+ Check the shape of a flattened series:
848
+ >>> series.shape()
849
+ (24,)
850
+
851
+ Check the shape along all 3 dimensions:
852
+
853
+ >>> dims = tuple(coords)
854
+ >>> series.shape(dims)
855
+ (4, 3, 2)
856
+
857
+ Swap the first two dimensions:
858
+
859
+ >>> series.shape((dims[1], dims[0], dims[2]))
860
+ (3, 4, 2)
861
+
862
+ Determine the shape along another DICOM attribute:
863
+
864
+ >>> series.shape(('FlipAngle', 'InstanceNumber'))
865
+ (3, 8)
866
+
867
+ The shape of an empty series is zero along any dimension:
868
+
869
+ >>> series.new_sibling().shape(dims)
870
+ (0, 0, 0)
871
+
872
+ If one or more of the dimensions is not defined in the header, this raises an error:
873
+
874
+ >>> series.shape(('FlipAngle', 'Gobbledigook'))
875
+ ValueError: series shape is not well defined in dimensions (FlipAngle, Gobbledigook, )
876
+ --> Some of the dimensions are not defined in the header.
877
+ --> Hint: use Series.value() to find the undefined values.
878
+
879
+ An error is also raised if the values are defined, but are not unique. In this case, all acquisition times are the same so this raises an error:
880
+
881
+ >>> series.shape(('FlipAngle', 'AcquisitionTime'))
882
+ ValueError: series shape is ambiguous in dimensions (FlipAngle, AcquisitionTime, )
883
+ --> Multiple slices exist at some or all locations.
884
+ --> Hint: use Series.unique() to list the values at all locations.
885
+
886
+ """
887
+ frames = self.frames(dims=dims, mesh=mesh, slice=slice, coords=coords, exclude=exclude, **filters)
888
+ return frames.shape
889
+
890
+
891
+ def unique(self, *tags, sortby=(), slice={}, coords={}, exclude=False, return_locs=False, **filters) -> np.ndarray:
892
+ """Return the unique values of an attribute, sorted by any number of variables.
893
+
894
+ Args:
895
+ tag: either a keyword string or a (group, element) tag of a DICOM data element.
896
+ sortby (tuple, optional): Dimensions of the resulting array. If *sortby* is not provided, then an array of unique values is returned.
897
+
898
+ Returns:
899
+ np.ndarray: a sorted array of unique values of the attribute, with dimensions as specified by *dims*. If *dims* is provided, the result has the dimensions of *dims* and each element of the array is an array unique values.
900
+
901
+ See also:
902
+ `value`
903
+ `unique_affines`
904
+ `coords`
905
+ `gridcoords`
906
+
907
+ Example:
908
+ Create a zero-filled series with 3 slice dimensions:
909
+
910
+ >>> loc = np.arange(4)
911
+ >>> fa = [2, 15, 30]
912
+ >>> tr = [2.5, 5.0]
913
+ >>> coords = {
914
+ ... 'SliceLocation': np.arange(4),
915
+ ... 'FlipAngle': [2, 15, 30],
916
+ ... 'RepetitionTime': [2.5, 5.0] }
917
+ >>> series = db.zeros((128,128,8,3,2), coords)
918
+
919
+ Recover the unique values of any coordinate, such as the flip angle:
920
+
921
+ >>> series.value('FlipAngle')
922
+ [ 2. 15. 30.]
923
+
924
+ List the flip angles for each slice location separately:
139
925
 
140
- def subseries(*args, move=False, **kwargs):
141
- return subseries(*args, move=move, **kwargs)
926
+ >>> fa = series.unique('FlipAngle', sortby=('SliceLocation', ))
927
+ >>> fa[0]
928
+ [ 2. 15. 30.]
929
+ >>> fa[3]
930
+ [ 2. 15. 30.]
142
931
 
932
+ List the flip angles for each slice location and repetition time:
933
+
934
+ >>> fa = series.unique('FlipAngle', sortby=('SliceLocation', 'RepetitionTime'))
935
+ >>> fa.shape
936
+ (4, 2)
937
+ >>> fa[1,1]
938
+ [ 2. 15. 30.]
939
+
940
+ Getting the values for a non-existing attribute produces an empty array:
941
+
942
+ >>> gbbl = series.unique('Gobbledigook')
943
+ >>> gbbl.size
944
+ 0
945
+ >>> gbbl.shape
946
+ (0,)
947
+
948
+ Getting a non-existing attribute for each slice location produces an array of the expected shape, where each element is an empty array:
949
+
950
+ >>> gbbl = series.unique('Gobbledigook', sortby=('SliceLocation',))
951
+ >>> gbbl.shape
952
+ (4,)
953
+ >>> gbbl.size
954
+ 4
955
+ >>> gbbl[-1].size
956
+ 0
957
+ """
958
+ # If no sorting is required, return an array of unique values
959
+
960
+ vals = self.values(*(tags+sortby), slice=slice, coords=coords, exclude=exclude, **filters)
961
+
962
+ if sortby == ():
963
+ if len(tags) == 1:
964
+ uv = vals[vals != np.array(None)]
965
+ return np.unique(uv)
966
+ uvals = []
967
+ for v in vals:
968
+ uv = v[v != np.array(None)]
969
+ uvals.append(np.unique(uv))
970
+ return tuple(uvals)
971
+
972
+ # Create a flat location array
973
+ loc = []
974
+ for k in range(len(sortby)):
975
+ v = vals[len(tags)+k]
976
+ v = v[v != np.array(None)]
977
+ loc.append(np.unique(v))
978
+ loc = np.meshgrid(*tuple(loc), indexing='ij')
979
+ shape = loc[0].shape
980
+ loc = [l.ravel() for l in loc]
981
+
982
+ # Build an array of unique values at each location and each tag
983
+ uvals = np.empty((len(tags), loc[0].size), dtype=np.ndarray)
984
+ for i in range(loc[0].size):
985
+ k = 0
986
+ ind = vals[len(tags)+k] == loc[k][i]
987
+ for k in range(1, len(sortby)):
988
+ ind = ind & (vals[len(tags)+k] == loc[k][i])
989
+ for t in range(len(tags)):
990
+ vti = vals[t][ind]
991
+ vti = vti[vti != np.array(None)]
992
+ uvals[t,i] = np.unique(vti)
993
+
994
+ # Refactor to return values
995
+ if len(tags) == 1:
996
+ uvals = uvals[0,:].reshape(shape)
997
+ else:
998
+ uvals = [uvals[t,:].reshape(shape) for t in range(len(tags))]
999
+ uvals = tuple(uvals)
1000
+ if return_locs:
1001
+ loc = [l.reshape(shape) for l in loc]
1002
+ loc = tuple(loc)
1003
+ return uvals, loc
1004
+ else:
1005
+ return uvals
1006
+
1007
+
1008
+ def pixel_values(self, dims=('InstanceNumber', ), return_coords=False, slice={}, coords={}, **filters) -> np.ndarray:
1009
+ """Return a numpy.ndarray with pixel data.
1010
+
1011
+ Args:
1012
+ dims (tuple, optional): Dimensions of the result, as a tuple of valid DICOM tags of any length. If *dims* is not provided, pixel values are ordered by instance number. Defaults to None.
1013
+ inds (dict, optional): Dictionary with indices to retrieve a slice of the entire array. Defaults to None.
1014
+ select (dict, optional): A dictionary of values for DICOM attributes to filter the result. By default the data are not filtered.
1015
+ filters (dict, optional): keyword arguments to filter the data by value of DICOM attributes.
1016
+
1017
+ Returns:
1018
+ np.ndarray: pixel data. The number of dimensions will be 2 plus the number of elements in *dim*. The first two indices will enumerate (column, row) indices in the slice, the other dimensions are as specified by the *dims* argument.
1019
+
1020
+ The function returns an empty array when no data are found at the specified locations.
1021
+
1022
+ Raises:
1023
+ ValueError: Indices must be in the dimensions provided. If *ind* is set but keys are not part of *dims*.
1024
+ ValueError: if the images are different shapes.
1025
+
1026
+ See also:
1027
+ `set_pixel_values`
1028
+
1029
+ Example:
1030
+ Create a zero-filled array with 3 slice dimensions:
1031
+
1032
+ >>> coords = {
1033
+ ... 'SliceLocation': 10*np.arange(4),
1034
+ ... 'FlipAngle': np.array([2, 15, 30]),
1035
+ ... 'RepetitionTime': np.array([2.5, 5.0]),
1036
+ ... }
1037
+ >>> zeros = db.zeros((128,64,4,3,2), coords)
1038
+
1039
+ Retrieve the pixel array of the series:
1040
+
1041
+ >>> dims = tuple(coords)
1042
+ >>> array = zeros.pixel_values(dims)
1043
+ >>> array.shape
1044
+ (128, 64, 4, 3, 2)
1045
+
1046
+ To retrieve an array containing only the data with flip angle 15:
1047
+
1048
+ >>> array = zeros.pixel_values(dims, FlipAngle=15)
1049
+ >>> array.shape
1050
+ (128, 64, 4, 1, 2)
1051
+
1052
+ If no data fit the requirement, and empty array is returned:
1053
+
1054
+ >>> array = zeros.pixel_values(dims, FlipAngle=15)
1055
+ >>> array.size
1056
+ 0
1057
+
1058
+ Multiple possible values can be specified as an array:
1059
+
1060
+ >>> array = zeros.pixel_values(dims, FlipAngle=np.array([15,30]))
1061
+ >>> array.shape
1062
+ (128, 64, 4, 2, 2)
1063
+
1064
+ And multiple filters can be specified by adding keyword arguments. The following returns an array of pixel values with flip angle of 15 or 30, and slice location of 10 or 20:
1065
+
1066
+ >>> array = zeros.pixel_values(dims, FlipAngle=np.array([15,30]), SliceLocation=np.array([10,20]))
1067
+ >>> array.shape
1068
+ (128, 64, 2, 2, 2)
1069
+
1070
+ The filters can be any DICOM attribute:
1071
+
1072
+ >>> array = zeros.pixel_values(dims, AcquisitionTime=0)
1073
+ >>> array.size
1074
+ 0
1075
+
1076
+ The filters can also be specified as a dictionary of values:
1077
+
1078
+ >>> array = zeros.pixel_values(dims, select={'FlipAngle': 15})
1079
+ >>> array.shape
1080
+ (128, 64, 4, 1, 2)
1081
+
1082
+ Since keywords need to be strings in python, this is the only way to specify filters with (group, element) tags:
1083
+
1084
+ >>> array = zeros.pixel_values(dims, select={(0x0018, 0x1314): 15})
1085
+ >>> array.shape
1086
+ (128, 64, 4, 1, 2)
1087
+
1088
+ Using the *inds* argument, the pixel array can be indexed to avoid reading a large array if only a subarray is required:
1089
+
1090
+ >>> array = zeros.pixel_values(dims, inds={'FlipAngle': 1})
1091
+ >>> array.shape
1092
+ (128, 64, 4, 1, 2)
1093
+
1094
+ Note unlike filters defind by *value*, the indices must be provided in the dimensions of the array. If not, a `ValueError` is raised:
1095
+
1096
+ >>> zeros.pixel_values(dims, inds={'AcquisitionTime':0})
1097
+ ValueError: Indices must be in the dimensions provided.
1098
+ """
1099
+ if np.isscalar(dims):
1100
+ dims = (dims,)
1101
+ frames = self.frames(dims, return_coords=return_coords, slice=slice, coords=coords, **filters)
1102
+ if return_coords:
1103
+ frames, fcoords = frames
1104
+ if frames.size == 0:
1105
+ shape = (0,0) + frames.shape
1106
+ values = np.array([]).reshape(shape)
1107
+ if return_coords:
1108
+ return values, fcoords
1109
+ else:
1110
+ return values
1111
+
1112
+ # Read values
1113
+ fshape = frames.shape
1114
+ frames = frames.ravel()
1115
+ values = []
1116
+ for f, frame in enumerate(frames):
1117
+ self.progress(f+1, len(frames), 'Reading pixel values..')
1118
+ values.append(frame.get_pixel_array())
1119
+
1120
+ # Check that all matrix sizes are the same
1121
+ vshape = np.array([v.shape for v in values])
1122
+ vshape = np.unique(vshape.T, axis=1)
1123
+ if vshape.shape[1] > 1:
1124
+ msg = 'Cannot extract an array of pixel values - not all frames have the same matrix size.'
1125
+ raise ValueError(msg)
1126
+
1127
+ # Create the array
1128
+ values = np.stack(values, axis=-1)
1129
+ values = values.reshape(values.shape[:2] + fshape)
1130
+ if return_coords:
1131
+ return values, fcoords
1132
+ else:
1133
+ return values
1134
+
1135
+
1136
+ def set_pixel_values(self, values:np.ndarray, dims:tuple=None, slice={}, coords={}, **filters):
1137
+ """Set a numpy.ndarray with pixel data.
1138
+
1139
+ Args:
1140
+ dims (tuple, optional): Dimensions of the pixel values, as a tuple of valid DICOM tags of any length. If *dims* is not provided, pixel values are ordered by instance number. Defaults to None.
1141
+ inds (dict, optional): Dictionary with indices to set a slice of the entire array. Defaults to None.
1142
+ select (dict, optional): A dictionary of values for DICOM attributes to set specific frames.
1143
+ filters (dict, optional): keyword arguments to set specific frames.
1144
+
1145
+ Raises:
1146
+ ValueError: if the values are the incorrect shape for the dimensions.
1147
+
1148
+ See also:
1149
+ `pixel_values`
1150
+
1151
+ Example:
1152
+ Create a zero-filled array with 3 slice dimensions:
1153
+
1154
+ >>> coords = {
1155
+ ... 'SliceLocation': 10*np.arange(4),
1156
+ ... 'FlipAngle': np.array([2, 15, 30]),
1157
+ ... 'RepetitionTime': np.array([2.5, 5.0]),
1158
+ ... }
1159
+ >>> zeros = db.zeros((128,64,4,3,2), coords)
1160
+ """
1161
+ if dims is None:
1162
+ if slice != {}:
1163
+ dims = tuple(slice)
1164
+ elif coords != {}:
1165
+ dims = tuple(coords)
1166
+ else:
1167
+ dims = ('InstanceNumber', )
1168
+ elif np.isscalar(dims):
1169
+ dims = (dims,)
1170
+ # Get frames to set:
1171
+ frames = self.frames(dims, slice=slice, coords=coords, **filters)
1172
+ if frames.size == 0:
1173
+ if slice != {}:
1174
+ self.expand(gridcoords=slice)
1175
+ frames = self.frames(dims)
1176
+ else:
1177
+ msg = 'Cannot set values to an empty series. Use Series.expand() to create empty frames first, or set the loc keyword to define coordinates for the new frames.'
1178
+ raise ValueError(msg)
1179
+
1180
+ if np.prod(values.shape[2:]) != frames.size:
1181
+ msg = 'The size of the pixel value array is different from the size of the series.'
1182
+ msg += '\nThe pixel array has shape ' + str(values.shape[2:]) + ', '
1183
+ msg += 'but the series has shape ' + str(frames.shape) + '.'
1184
+ raise ValueError(msg)
1185
+ frames = frames.ravel()
1186
+ values = values.reshape(values.shape[:2] + (-1,))
1187
+ for f, frame in enumerate(frames):
1188
+ self.progress(f+1, frames.size, 'Writing pixel values..')
1189
+ frame.set_pixel_array(values[:,:,f])
1190
+
1191
+
1192
+ def affine(self, slice={}, coords={}, **filters) -> np.ndarray:
1193
+ """Return the affine of the Series.
1194
+
1195
+ Raises:
1196
+ ValueError: if the DICOM file is corrupted
1197
+ ValueError: if the affine is not unique.
1198
+
1199
+ Returns:
1200
+ np.ndarray: affine matrix as a 4x4 numpy array.
1201
+
1202
+ See also:
1203
+ `set_affine`
1204
+ `unique_affines`
1205
+
1206
+ Example:
1207
+ Check that the default affine is the identity:
1208
+
1209
+ >>> zeros = db.zeros((128,128,10))
1210
+ >>> zeros.affine()
1211
+ [[1., 0., 0., 0.],
1212
+ [0., 1., 0., 0.],
1213
+ [0., 0., 1., 0.],
1214
+ [0., 0., 0., 1.]]
1215
+ """
1216
+
1217
+ # Read values
1218
+ tags = ('ImageOrientationPatient', 'ImagePositionPatient', 'PixelSpacing', 'SliceThickness', )
1219
+ orientation, pos, spacing, thick = self.values(*tags, slice=slice, coords=coords, **filters)
1220
+
1221
+ # Single slice
1222
+ if len(pos) == 1:
1223
+ return image_utils.affine_matrix(orientation[0], pos[0], spacing[0], thick[0])
1224
+
1225
+ # Multiple orientations - raise error
1226
+ orientation = np.unique(orientation)
1227
+ if len(orientation) > 1:
1228
+ msg = 'The series has multiple affines. '
1229
+ msg += '\nUse Series.unique_affines() to return an array of unique affines.'
1230
+ raise ValueError(msg)
1231
+ orientation = orientation[0]
1232
+
1233
+ # Multiple pixel spacings - raise error
1234
+ spacing = np.unique(spacing)
1235
+ if len(spacing) > 1:
1236
+ msg = 'The series has multiple pixel spacings. '
1237
+ msg += '\nAffine array of the series is not well defined.'
1238
+ raise ValueError(msg)
1239
+ spacing = spacing[0]
1240
+
1241
+ # All the same slice locations
1242
+ upos = np.unique(pos)
1243
+ if len(upos) == 1:
1244
+ return image_utils.affine_matrix(orientation, pos[0], spacing, thick[0])
1245
+
1246
+ # Different slice locations but not all different - raise error
1247
+ if len(upos) != len(pos):
1248
+ msg = 'Some frames have the same ImagePositionPatient. '
1249
+ msg += '\nAffine matrix of the series is not well defined.'
1250
+ raise ValueError(msg)
1251
+
1252
+ return image_utils.affine_matrix_multislice(orientation, pos, spacing)
1253
+
1254
+
1255
+ def set_affine(self, affine:np.ndarray, dims=('InstanceNumber',), slice={}, coords={}, multislice=False, **filters):
1256
+ """Set the affine matrix of a series.
1257
+
1258
+ The affine is defined as a 4x4 numpy array with bottom row [0,0,0,1]. The final column represents the position of the top right hand corner of the first slice. The first three columns represent rotation and scaling with respect to the axes of the reference frame.
1259
+
1260
+ Args:
1261
+ affine (numpy.ndarray): 4x4 numpy array
1262
+
1263
+ Raises:
1264
+ ValueError: if the series is empty. The information of the affine matrix is stored in the header and can not be stored in an empty series.
1265
+
1266
+ See also:
1267
+ `affine`
1268
+ `unique_affines`
1269
+
1270
+ Example:
1271
+ Create a series with unit affine array:
1272
+
1273
+ >>> zeros = db.zeros((128,128,10))
1274
+ >>> zeros.affine()
1275
+ [[1., 0., 0., 0.],
1276
+ [0., 1., 0., 0.],
1277
+ [0., 0., 1., 0.],
1278
+ [0., 0., 0., 1.]]
1279
+
1280
+ Rotate the volume over 90 degrees in the xy-plane:
1281
+
1282
+ >>> affine = np.array([
1283
+ ... [1., 0., 0., 0.],
1284
+ ... [0., 1., 0., 0.],
1285
+ ... [0., 0., 1., 0.],
1286
+ ... [0., 0., 0., 1.],
1287
+ ... ])
1288
+ >>> zeros.set_affine(affine)
1289
+
1290
+ Apart from the rotation, also change the resolution to (3mm, 3mm, 1.5mm):
1291
+
1292
+ >>> affine = np.array([
1293
+ ... [0., -3., 0., 0.],
1294
+ ... [3., 0., 0., 0.],
1295
+ ... [0., 0., 1.5, 0.],
1296
+ ... [0., 0., 0., 1.],
1297
+ ... ])
1298
+ >>> zeros.set_affine(affine)
1299
+
1300
+ Now rotate, change resolution, and shift the top right hand corner of the lowest slice to position (-30mm, 20mm, 120mm):
1301
+
1302
+ >>> affine = np.array([
1303
+ ... [0., -3., 0., -30.],
1304
+ ... [3., 0., 0., 20.],
1305
+ ... [0., 0., 1.5, 120.],
1306
+ ... [0., 0., 0., 1.],
1307
+ ... ])
1308
+ >>> zeros.set_affine(affine)
1309
+
1310
+ Note: changing the affine will affect multiple DICOM tags, such as slice location and image positions:
1311
+
1312
+ >>> zeros.SliceLocation
1313
+ [120.0, 121.5, 123.0, 124.5, 126.0, 127.5, 129.0, 130.5, 132.0, 133.5]
1314
+
1315
+ In this case, since the slices are stacked in parallel to the z-axis, the slice location starts at the lower z-coordinate of 120mm and then increments slice-by-slice with the slice thickness of 1.5mm.
1316
+
1317
+ """
1318
+
1319
+ frames = self.frames(dims=dims, slice=slice, coords=coords, **filters)
1320
+ if frames.size == 0:
1321
+ msg = 'Cannot set affine matrix in an empty series. Use Series.expand() to create empty frames first.'
1322
+ raise ValueError(msg)
143
1323
 
144
- def split_by(self, keyword: str | tuple) -> list:
1324
+ # For each slice location, the slice position needs to be updated too
1325
+ # Need the coordinates of the vector parallel to the z-axis of the volume.
1326
+ a = image_utils.dismantle_affine_matrix(affine)
1327
+ ez = a['SpacingBetweenSlices']*np.array(a['slice_cosine'])
1328
+
1329
+ # if multislice:
1330
+ # slice_thickness = self.unique('SliceThickness')[0]
1331
+
1332
+ # Set the affine slice-by-slice
1333
+ affine_z = affine.copy()
1334
+ for z, frame in enumerate(frames):
1335
+ self.progress(z+1, frames.size, 'Writing affine..')
1336
+ affine_z[:3, 3] = affine[:3, 3] + z*ez
1337
+ if multislice:
1338
+ thickness = frame.SliceThickness
1339
+ frame.affine_matrix = affine_z
1340
+ if multislice:
1341
+ frame.SliceThickness = thickness
1342
+
1343
+ # if multislice:
1344
+ # self.set_values(slice_thickness,'SliceThickness')
1345
+
1346
+
1347
+ # consider renaming copy() - but breaks backward compatibility - this is not a slice really
1348
+ def extract(self, slice={}, coords={}, **filters) -> Series:
1349
+ """Get a slice of the series by dimension values
1350
+
1351
+ Args:
1352
+ coordinates (dict, optional): dictionary of tag:value pairs where the value is either a single value or an array of values.
1353
+ coords (dict): Provide coordinates for the slice, either as dimension=value pairs, or as a dictionary where the keys list the dimensions, and the values are provided as scalars, 1D or meshgrid arrays of coordinates.
1354
+
1355
+ See also:
1356
+ `islice`
1357
+ `split_by`
1358
+
1359
+ Example:
1360
+ Create a zero-filled array, describing 8 MRI images each measured at 3 flip angles and 2 repetition times:
1361
+
1362
+ >>> coords = {
1363
+ ... 'SliceLocation': np.arange(8),
1364
+ ... 'FlipAngle': [2, 15, 30],
1365
+ ... 'RepetitionTime': [2.5, 5.0],
1366
+ ... }
1367
+ >>> series = db.zeros((128,128,8,3,2), coords)
1368
+
1369
+ Slice the series at flip angle 15:
1370
+
1371
+ >>> fa15 = series.slice(FlipAngle=15)
1372
+
1373
+ Retrieve the array and check the dimensions:
1374
+
1375
+ >>> array = fa15.pixel_values(dims=tuple(coords))
1376
+ >>> print(array.shape)
1377
+ (128, 128, 8, 1, 2)
1378
+
1379
+ Multiple possible values can be specified as a list or np.ndarray:
1380
+
1381
+ >>> fa15 = series.slice(SliceLocation=[0,5], FlipAngle=15)
1382
+ >>> array = fa15.pixel_values(dims=tuple(coords))
1383
+ >>> print(array.shape)
1384
+ (128, 128, 2, 1, 2)
1385
+
1386
+ Values can also be provided as a dictionary, which is useful for instance for private tags that do not have a keyword string. So the following are equivalent:
1387
+
1388
+ >>> fa15 = series.slice(SliceLocation=[0,5], FlipAngle=15)
1389
+ >>> fa15 = series.slice({SliceLocation:[0,5], FlipAngle:15})
1390
+ >>> fa15 = series.slice({(0x0020, 0x1041):[0,5], (0x0018, 0x1314):15})
1391
+ """
1392
+
1393
+ frames = self.frames(slice=slice, coords=coords, **filters)
1394
+ result = self.new_sibling()
1395
+ # result.adopt(frames) # faster but no progress bar
1396
+ for f, frame in enumerate(frames):
1397
+ self.progress(f+1, len(frames), 'Creating slice..')
1398
+ frame.copy_to(result)
1399
+ return result
1400
+
1401
+
1402
+ def split_by(self, tag: str | tuple) -> list:
145
1403
  """Split the series into multiple subseries based on keyword value.
146
1404
 
147
1405
  Args:
@@ -152,7 +1410,11 @@ class Series(Record):
152
1410
  ValueError: if all images have the same value for the keyword, so no subseries can be derived. An exception is raised rather than a copy of the series to avoid unnecessary copies being made. If that is the intention, use series.copy() instead.
153
1411
 
154
1412
  Returns:
155
- list: A list of subseries, where each element has the same value of the given keyword.
1413
+ list: A list of ``Series`` instances, where each element has the same value of the given keyword.
1414
+
1415
+ See Also:
1416
+ `slice`
1417
+ `islice`
156
1418
 
157
1419
  Example:
158
1420
 
@@ -160,10 +1422,10 @@ class Series(Record):
160
1422
 
161
1423
  >>> coords = {
162
1424
  ... 'FlipAngle': [2, 15, 30],
163
- ... 'RepetitionTime': [2.5, 5.0, 7.5],
1425
+ ... 'RepetitionTime': [2.5, 7.5],
164
1426
  ... }
165
- >>> zeros = db.zeros((3,2,128,128), coords)
166
- >>> print(zeros)
1427
+ >>> zeros = db.zeros((128, 128, 3, 2), coords)
1428
+ >>> zeros.print()
167
1429
  ---------- SERIES --------------
168
1430
  Series 001 [New Series]
169
1431
  Nr of instances: 6
@@ -177,7 +1439,7 @@ class Series(Record):
177
1439
 
178
1440
  Splitting this series by FlipAngle now creates 3 new series in the same study, with 2 images each. By default the fixed value of the splitting attribute is written in the series description:
179
1441
 
180
- >>> zeros_FA = zeros.split_by('FlipAngle')
1442
+ >>> FA = zeros.split_by('FlipAngle')
181
1443
  >>> zeros.study().print()
182
1444
  ---------- STUDY ---------------
183
1445
  Study New Study [None]
@@ -190,151 +1452,421 @@ class Series(Record):
190
1452
  Series 004 [New Series[FlipAngle = 30.0]]
191
1453
  Nr of instances: 2
192
1454
  --------------------------------
1455
+
1456
+ Check the flip angle of the split series:
1457
+ >>> for series in FA:
1458
+ ... print(series.FlipAngle)
1459
+ 2.0
1460
+ 15.0
1461
+ 30.0
193
1462
  """
194
1463
 
195
- self.status.message('Reading values..')
196
- try:
197
- values = self[keyword]
198
- except:
199
- msg = str(keyword) + ' is not a valid DICOM keyword'
200
- raise ValueError(msg)
201
- if len(values) == 1:
202
- msg = 'Cannot split by ' + str(keyword) + '\n'
203
- msg += 'All images have the same value'
1464
+ vals = self.unique(tag)
1465
+ if len(vals)==1:
1466
+ msg = 'Cannot split by ' + str(tag) + '\n'
1467
+ msg += 'All frames have the same value.'
204
1468
  raise ValueError(msg)
205
1469
 
206
- self.status.message('Splitting series..')
1470
+ desc = self.instance().SeriesDescription + '[' + str(tag) + ' = '
207
1471
  split_series = []
208
- desc = self.instance().SeriesDescription + '[' + keyword + ' = '
209
- for v in values:
210
- kwargs = {keyword: v}
211
- new = self.subseries(**kwargs)
1472
+ for v in vals:
1473
+ new = self.extract(slice={tag: v})
212
1474
  new.SeriesDescription = desc + str(v) + ']'
213
1475
  split_series.append(new)
214
1476
  return split_series
1477
+
215
1478
 
1479
+ def spacing(self, **kwargs)->tuple:
1480
+ """3D pixel spacing in mm
216
1481
 
217
- def import_dicom(self, files):
218
- uids = self.manager.import_datasets(files)
219
- self.manager.move_to(uids, self.uid)
1482
+ Returns:
1483
+ tuple: (x-spacing, y-spacing, z-spacing)
220
1484
 
221
- def slice_groups(*args, **kwargs):
222
- return slice_groups(*args, **kwargs)
1485
+ See also:
1486
+ `shape`
223
1487
 
1488
+ Examples:
1489
+ Check the spacing of a digital reference object:
224
1490
 
225
- def affine_matrix(self):
226
- return affine_matrix(self)
227
-
1491
+ >>> series = db.dro.T1_mapping_vFATR()
1492
+ >>> series.spacing()
1493
+ (15, 15, 20)
1494
+ """
1495
+ affine = self.affine(**kwargs)
1496
+ column_spacing = np.linalg.norm(affine[:3, 0])
1497
+ row_spacing = np.linalg.norm(affine[:3, 1])
1498
+ slice_spacing = np.linalg.norm(affine[:3, 2])
1499
+ return column_spacing, row_spacing, slice_spacing
228
1500
 
229
- def ndarray(self, dims=('InstanceNumber',)) -> np.ndarray:
230
- """Return a numpy.ndarray with pixel data.
231
1501
 
232
- Args:
233
- dims (tuple, optional): Dimensions of the result, as a tuple of valid DICOM tags of any length. Defaults to ('InstanceNumber',).
1502
+
1503
+
1504
+ def unique_affines(self)->np.ndarray:
1505
+ """Return the array of unique affine matrices.
1506
+
1507
+ Raises:
1508
+ ValueError: if the DICOM file is corrupted.
234
1509
 
235
1510
  Returns:
236
- np.ndarray: pixel data. The number of dimensions will be 2 plus the number of elements in dim. The first two indices will enumerate (x,y) coordinates in the slice, the other dimensions are as specified by the dims argument.
1511
+ np.ndarray: array of 4x4 ndarrays with the unique affine matrices of the series.
237
1512
 
238
1513
  See also:
239
- :func:`~set_ndarray`
1514
+ `set_affine`
1515
+ `affine`
1516
+
1517
+ Example:
1518
+ Check that the default affine is the identity:
1519
+
1520
+ >>> zeros = db.zeros((128,128,10))
1521
+ >>> zeros.affine()
1522
+ [array([
1523
+ [1., 0., 0., 0.],
1524
+ [0., 1., 0., 0.],
1525
+ [0., 0., 1., 0.],
1526
+ [0., 0., 0., 1.]], dtype=float32)]
1527
+ """
1528
+ image_orientation = self.ImageOrientationPatient
1529
+ if image_orientation is None:
1530
+ msg = 'ImageOrientationPatient not defined in the DICOM header \n'
1531
+ msg += 'This is a required DICOM field \n'
1532
+ msg += 'The data may be corrupted - please check'
1533
+ raise ValueError(msg)
1534
+ # Multiple slice groups in series - return list of affine matrices
1535
+ if isinstance(image_orientation[0], list):
1536
+ affine_matrices = []
1537
+ for dir in image_orientation:
1538
+ slice_group = self.instances(ImageOrientationPatient=dir)
1539
+ affine = _slice_group_affine_matrix(slice_group, dir)
1540
+ affine_matrices.append(affine)
1541
+ return np.unique(affine_matrices)
1542
+ # Single slice group in series - return a list with a single affine matrix
1543
+ else:
1544
+ slice_group = self.instances()
1545
+ affine = _slice_group_affine_matrix(slice_group, image_orientation)
1546
+ return np.array([affine])
1547
+
1548
+
1549
+ def islice(self, indices={}, **inds) -> Series:
1550
+ """Get a slice of the series by dimension indics
1551
+
1552
+ Args:
1553
+ indices (dict, optional): Dictionary with tag:value pairs, where the values are either a single index or an array of indices.
1554
+ inds (dict, optional): Provide indices for the slice, either as keyword=index pairs or as a dictionary. The indices must be provided either as a scalar, a list or a numpy array.
1555
+
1556
+ Raises:
1557
+ IndexError: when the indices in inds are out of range of the existing coordinates.
1558
+
1559
+ See also:
1560
+ `slice`
1561
+ `split_by`
1562
+
1563
+ Example:
1564
+ Create a zero-filled array, describing 8 MRI images each measured at 3 flip angles and 2 repetition times:
1565
+
1566
+ >>> coords = {
1567
+ ... 'SliceLocation': np.arange(8),
1568
+ ... 'FlipAngle': [2, 15, 30],
1569
+ ... 'RepetitionTime': [2.5, 5.0],
1570
+ ... }
1571
+ >>> series = db.zeros((128,128,8,3,2), coords)
1572
+
1573
+ Slice the series at flip angle 15 (i.e. index 1):
1574
+
1575
+ >>> fa15 = series.islice(FlipAngle=1)
1576
+
1577
+ Retrieve the array and check the dimensions:
1578
+
1579
+ >>> array = fa15.pixel_values(dims=tuple(coords))
1580
+ >>> print(array.shape)
1581
+ (128, 128, 8, 1, 2)
1582
+
1583
+ Multiple possible indices can be specified as a list or np.ndarray:
1584
+
1585
+ >>> fa15 = series.slice(SliceLocation=[0,5], FlipAngle=1)
1586
+ >>> array = fa15.pixel_values(dims=tuple(coords))
1587
+ >>> print(array.shape)
1588
+ (128, 128, 2, 1, 2)
1589
+
1590
+ Values can also be provided as a dictionary, which is useful for instance for private tags that do not have a keyword string. So the following are equivalent:
1591
+
1592
+ >>> fa15 = series.slice(SliceLocation=[0,5], FlipAngle=1)
1593
+ >>> fa15 = series.slice({SliceLocation:[0,5], FlipAngle:1})
1594
+ >>> fa15 = series.slice({(0x0020, 0x1041):[0,5], (0x0018, 0x1314):1})
1595
+
1596
+ """
1597
+ inds = {**indices, **inds}
1598
+
1599
+ # Check whether the arguments are valid, and initialize dims.
1600
+ if inds == {}:
1601
+ return self.new_sibling()
1602
+ dims = list(inds.keys())
1603
+ source = instance_array(self, sortby=dims)
1604
+
1605
+ # Retrieve the instances of the slice.
1606
+ for d, dim in enumerate(inds):
1607
+ ind = inds[dim]
1608
+ try:
1609
+ source = source.take(ind, axis=d)
1610
+ # Insert dimensions of 1 back in
1611
+ if isinstance(ind, Number):
1612
+ source = np.expand_dims(source, axis=d)
1613
+ except IndexError as e:
1614
+ msg = str(e) + '\n'
1615
+ msg += 'The indices for ' + str(dim) + ' in the inds argument are out of bounds'
1616
+ raise IndexError(msg)
1617
+
1618
+ result = self.new_sibling()
1619
+ source = source.ravel()
1620
+ for i in range(source.size):
1621
+ source[i].copy_to(result)
1622
+ return result
1623
+
1624
+
1625
+ #
1626
+ # Following APIs are obsolete and will be removed in future versions
1627
+ #
1628
+
1629
+
1630
+ def _old_set_pixel_values(self, array:np.ndarray, coords:dict=None, inds:dict=None):
1631
+ """Assign new pixel data with a new numpy.ndarray.
1632
+
1633
+ Args:
1634
+ array (np.ndarray): array with new pixel data.
1635
+ coords (dict, optional): Provide coordinates for the array, using a dictionary where the keys list the dimensions, and the values are provided as 1D or meshgrid arrays of coordinates. If data already exist at the specified coordinates, these will be overwritten. If not, the new data will be added to the series.
1636
+ inds (dict, optional): Provide a slice of existing data that will be overwritten with the new array. The format is the same as the dictionary of coordinates, except that the slice is identified by indices rather than values.
1637
+
1638
+ Raises:
1639
+ ValueError: if neither coords or inds or provided, if both are provided, or if the dimensions in coords or inds does not match up with the dimensions of the array.
1640
+ IndexError: when attempting to set a slice in an empty array, or when the indices in inds are out of range of the existing coordinates.
1641
+
1642
+ See also:
1643
+ `pixel_values`
1644
+
1645
+ Example:
1646
+ Create a zero-filled array, describing 8 MRI images each measured at 3 flip angles and 2 repetition times:
1647
+
1648
+ >>> coords = {
1649
+ ... 'SliceLocation': np.arange(8),
1650
+ ... 'FlipAngle': [2, 15, 30],
1651
+ ... 'RepetitionTime': [2.5, 5.0],
1652
+ ... }
1653
+ >>> series = db.zeros((128,128,8,3,2), coords)
1654
+
1655
+ Retrieve the array and check that it is populated with zeros:
1656
+
1657
+ >>> array = series.pixel_values(dims=tuple(coords))
1658
+ >>> print(np.mean(array))
1659
+ 0.0
1660
+
1661
+ Now overwrite the values with a new array of ones in a new shape:
1662
+
1663
+ >>> new_shape = (128,128,8)
1664
+ >>> new_coords = {
1665
+ ... 'SliceLocation': np.arange(8),
1666
+ ... }
1667
+ >>> ones = np.ones(new_shape)
1668
+ >>> series.set_pixel_values(ones, coords=new_coords)
1669
+
1670
+ Retrieve the new array and check shape:
1671
+
1672
+ >>> array = series.pixel_values(dims=tuple(new_coords))
1673
+ >>> print(array.shape)
1674
+ (128,128,8)
1675
+
1676
+ Check that the value is overwritten:
1677
+
1678
+ >>> print(np.mean(array))
1679
+ 1.0
1680
+ """
1681
+
1682
+ # Check whether the arguments are valid, and initialize dims.
1683
+ cnt = 0
1684
+ if coords is not None:
1685
+ cnt+=1
1686
+ dims = tuple(coords)
1687
+ if len(dims) != array.ndim-2:
1688
+ msg = 'One coordinate must be specified for each dimensions in the array.'
1689
+ raise ValueError(msg)
1690
+ for d, dim in enumerate(coords):
1691
+ if len(coords[dim]) != array.shape[d+2]:
1692
+ msg = str(dim) + ' in the coords must have the same number of elements as the corresponding dimension in the array'
1693
+ raise ValueError(msg)
1694
+ if inds is not None:
1695
+ cnt+=1
1696
+ dims = tuple(inds)
1697
+ if len(dims) != array.ndim-2:
1698
+ msg = 'One coordinate must be specified for each dimensions in the array.'
1699
+ raise ValueError(msg)
1700
+ if cnt == 0:
1701
+ msg = 'At least one of the optional arguments coords or inds must be provided'
1702
+ raise ValueError(msg)
1703
+ if cnt == 2:
1704
+ msg = 'Only one of the optional arguments coords or inds must be provided'
1705
+ raise ValueError(msg)
1706
+
1707
+ source = instance_array(self, sortby=list(dims))
1708
+
1709
+ if coords is not None:
1710
+ # Retrieve the instances corresponding to the coordinates.
1711
+ if source.size != 0:
1712
+ for d, dim in enumerate(coords):
1713
+ ind = []
1714
+ for i in range(source.shape[d]):
1715
+ si = source.take(i,axis=d).ravel()
1716
+ if si[0][dim] in coords[dim]:
1717
+ ind.append(i)
1718
+ source = source.take(ind, axis=d)
1719
+ # Insert dimensions of 1 back in
1720
+ if len(ind)==1:
1721
+ source = np.expand_dims(source, axis=d)
1722
+ elif inds is not None:
1723
+ # Retrieve the instances of the slice, as well as their coordinates.
1724
+ coords = {}
1725
+ for d, dim in enumerate(inds):
1726
+ ind = inds[dim]
1727
+ if isinstance(ind, np.ndarray):
1728
+ ind = list(ind)
1729
+ try:
1730
+ source = source.take(ind, axis=d)
1731
+ except IndexError as e:
1732
+ msg = str(e) + '\n'
1733
+ msg += 'The indices for ' + str(dim) + ' in the inds argument are out of bounds'
1734
+ raise IndexError(msg)
1735
+ coords[dim] = []
1736
+ for i in range(source.shape[d]):
1737
+ si = source.take(i,axis=d).ravel()
1738
+ coords[dim].append(si[0][dim])
1739
+
1740
+ nr_of_slices = int(np.prod(array.shape[2:]))
1741
+ if source.size == 0:
1742
+ # If there are not yet any instances at the correct coordinates, they will be created from scratch
1743
+ source = [self.new_instance(MRImage()) for _ in range(nr_of_slices)]
1744
+ set_pixel_values(self, array, source=source, coords=coords)
1745
+ elif array.shape[2:] == source.shape:
1746
+ # If the new array has the same shape, use the exact headers.
1747
+ set_pixel_values(self, array, source=source.ravel().tolist(), coords=coords)
1748
+ else:
1749
+ # If the new array has a different shape, use the first header for all and delete all the others
1750
+ # This happens when some of the new coordinates are present, but not all.
1751
+ # TODO: This is overkill - only fill in the gaps with copies.
1752
+ source = source.ravel().tolist()
1753
+ for series in source[1:]:
1754
+ series.remove()
1755
+ source = [source[0]] + [source[0].copy_to(self) for _ in range(nr_of_slices-1)]
1756
+ set_pixel_values(self, array, source=source, coords=coords)
1757
+
1758
+ def subseries(self, **kwargs)->Series:
1759
+ """Extract a subseries based on values of header elements.
1760
+
1761
+ Args:
1762
+ kwargs: Any number of valid DICOM (tag, value) keyword arguments.
1763
+
1764
+ Returns:
1765
+ Series: a new series as a sibling under the same parent.
1766
+
1767
+ See Also:
1768
+ :func:`~split_by`
240
1769
 
241
1770
  Example:
242
- Create a zero-filled array, describing 8 MRI slices each measured at 3 flip angles and 2 repetition times:
1771
+
1772
+ Create a multi-slice series with multiple flip angles and repetition times:
243
1773
 
244
1774
  >>> coords = {
245
- ... 'SliceLocation': np.arange(8),
1775
+ ... 'SliceLocation': np.arange(16),
246
1776
  ... 'FlipAngle': [2, 15, 30],
247
- ... 'RepetitionTime': [2.5, 5.0],
1777
+ ... 'RepetitionTime': [2.5, 5.0, 7.5],
248
1778
  ... }
249
- >>> zeros = db.zeros((128,128,8,3,2), coords)
1779
+ >>> zeros = db.zeros((128, 128, 16, 3, 2), coords)
250
1780
 
251
- To retrieve the array, the dimensions need to be provided:
1781
+ Create a new series containing only the data with flip angle 2 and repetition time 7.5:
252
1782
 
253
- >>> dims = ('SliceLocation', 'FlipAngle', 'RepetitionTime')
254
- >>> array = zeros.ndarray(dims)
255
- >>> print(array.shape)
256
- (128, 128, 8, 3, 2)
1783
+ >>> volume = zeros.subseries(FlipAngle=2.0, RepetitionTime=7.5)
257
1784
 
258
- The dimensions are the keys of the coordinate dictionary, so this could also have been called as:
1785
+ Check that the volume series now has two dimensions of size 1:
259
1786
 
260
- >>> array = zeros.ndarray(dims=tuple(coords))
1787
+ >>> array = volume.pixel_values(dims=tuple(coords))
261
1788
  >>> print(array.shape)
262
- (128, 128, 8, 3, 2)
263
- """
264
- array, _ = get_pixel_array(self, sortby=list(dims), first_volume=True, pixels_first=True)
265
- return array
266
-
1789
+ (128, 128, 16, 1, 1)
267
1790
 
268
- def set_ndarray(self, array:np.ndarray, dims=('InstanceNumber',), coords:dict=None):
269
- """Assign new pixel data with a new numpy.ndarray.
270
-
271
- Args:
272
- array (np.ndarray): array with new pixel data.
273
- dims (tuple, optional): Dimensions of the result, as a tuple of valid DICOM tags of any length. Defaults to ('InstanceNumber',). Must be provided if coords are not given.
274
- coords (dict, optional): Provide coordinates for the array explicitly, using a dictionary with dimensions as keys and as values either 1D or meshgrid arrays of coordinates. If coords are not provided, then dimensions a default range array will be used. If coordinates are provided, then the dimensions argument is ignored.
1791
+ and only one flip angle and repetition time:
275
1792
 
276
- Raises:
277
- ValueError: if dimensions and coordinates are both provided with incompatible dimensions.
1793
+ >>> print(volume.FlipAngle, volume.RepetitionTime)
1794
+ 2.0 7.5
278
1795
 
279
- See also:
280
- :func:`~ndarray`
1796
+ and that the parent study now has two series:
281
1797
 
282
- Warning:
283
- Currently this function assumes that the new array has the same shape as the current array. This will be generalised in an upcoming update - for now please look at the pipelines examples for saving different dimensions using the current interface.
1798
+ >>> volume.study().print()
1799
+ ---------- STUDY ---------------
1800
+ Study New Study [None]
1801
+ Series 001 [New Series]
1802
+ Nr of instances: 96
1803
+ Series 002 [New Series]
1804
+ Nr of instances: 16
1805
+ --------------------------------
1806
+ """
1807
+ return subseries(self, move=False, **kwargs)
1808
+
1809
+ def slice_groups(self, dims=('InstanceNumber',)) -> list:
1810
+ """Return a list of slice groups in the series.
284
1811
 
285
- Example:
286
- Create a zero-filled array, describing 8 MRI slices each measured at 3 flip angles and 2 repetition times:
1812
+ In dbdicom, a *slice group* is defined as a series of slices that have the same orientation. It is common for a single series to have images with multiple orientations, such as in localizer series in MRI. For such a series, returning all data in a single array may not be meaningful.
287
1813
 
288
- >>> coords = {
289
- ... 'SliceLocation': np.arange(8),
290
- ... 'FlipAngle': [2, 15, 30],
291
- ... 'RepetitionTime': [2.5, 5.0],
292
- ... }
293
- >>> series = db.zeros((128,128,8,3,2), coords)
1814
+ Formally, a *slice group* is a dictionary with two entries: 'ndarray' is the numpy.ndarray with the data along the dimensions provided by the dims argument, and 'affine' is the 4x4 affine matrix of the slice group. The function returns a list of such dictionaries, one for each slice group in the series.
294
1815
 
295
- Retrieve the array and check that it is populated with zeros:
1816
+ Args:
1817
+ dims (tuple, optional): Dimensions for the returned arrays. Defaults to ('InstanceNumber',).
296
1818
 
297
- >>> array = series.ndarray(dims=tuple(coords))
298
- >>> print(np.mean(array))
299
- 0.0
1819
+ Returns:
1820
+ list: A list of slice groups (dictionaries), one for each slice group in the series.
300
1821
 
301
- Now overwrite the values with a new array of ones. Coordinates are not changed so only dimensions need to be specified:
1822
+ Examples:
302
1823
 
303
- >>> ones = np.ones((128,128,8,3,2))
304
- >>> series.set_ndarray(ones, dims=tuple(coords))
1824
+ >>> series = db.ones((128,128,5,10))
1825
+ >>> sgroups = series.slice_groups(dims=('SliceLocation', 'AcquisitionTime'))
305
1826
 
306
- Retrieve the array and check that it is now populated with ones:
1827
+ Since there is only one slice group in the series, ``sgroups`` is a list with one element:
307
1828
 
308
- >>> array = series.ndarray(dims=tuple(coords))
309
- >>> print(np.mean(array))
310
- 1.0
311
- """
312
- # TODO: Include a reshaping option!!!!
313
-
314
- # TODO: set_pixel_array has **kwargs to allow setting other properties on the fly to save extra reading and writing. This makes sense but should be handled by a more general function, such as:
315
- # #
316
- # series.set_properties(ndarray:np.ndarray, coords:{}, affine:np.ndarray, **kwargs)
317
- # #
1829
+ >>> print(len(sgroups))
1830
+ 1
318
1831
 
319
- # Lazy solution - first get the header information (slower than propagating explicitly but conceptually more convenient - can be rationalised later - pixel values can be set on the fly as the header is retrieved)
1832
+ The array of the slice group is the entire volume of the series:
320
1833
 
321
- # If coordinates are provided, the dimensions are taken from that. Dimensions are not needed in this case but if they are set they need to be the same as those specified in the coordinates. Else an error is raised.
322
- if coords is not None:
323
- if dims != tuple(coords):
324
- msg = 'Coordinates do not have the correct dimensions \n'
325
- msg += 'Note: if coordinates are defined than the dimensions argument is ignored. Hence you can remove the dimensions argument in this call, or else make sure it matches up with the dimensions in coordinates.'
326
- raise ValueError(msg)
327
- else:
328
- dims = tuple(coords)
329
- _, headers = get_pixel_array(self, sortby=list(dims), first_volume=True, pixels_first=True)
330
- set_pixel_array(self, array, source=headers, pixels_first=True, coords=coords)
1834
+ >>> print(sgroups[0]['ndarray'].shape)
1835
+ (128, 128, 5, 10)
331
1836
 
1837
+ And the affine of the series has not changed from the default (identity):
332
1838
 
333
- #
334
- # Following APIs are obsolete and will be removed in future versions
335
- #
1839
+ >>> print(sgroups[0]['affine'])
1840
+ [[1. 0. 0. 0.]
1841
+ [0. 1. 0. 0.]
1842
+ [0. 0. 1. 0.]
1843
+ [0. 0. 0. 1.]]
336
1844
 
1845
+ """
1846
+
1847
+ slice_groups = []
1848
+ image_orientation = self.ImageOrientationPatient
1849
+
1850
+ # Multiple slice groups in series - return list of cuboids
1851
+ if isinstance(image_orientation[0], list):
1852
+ for dir in image_orientation:
1853
+ slice_group = instance_array(self, ImageOrientationPatient=dir)
1854
+ affine = _slice_group_affine_matrix(list(slice_group), dir)
1855
+ array, _ = _get_pixel_array_from_instance_array(slice_group, sortby=list(dims), pixels_first=True)
1856
+ slice_groups.append({'ndarray': array[...,0], 'affine': affine})
1857
+
1858
+ # Single slice group in series - return a list with a single affine matrix
1859
+ else:
1860
+ slice_group = instance_array(self)
1861
+ affine = _slice_group_affine_matrix(list(slice_group), image_orientation)
1862
+ array, _ = _get_pixel_array_from_instance_array(slice_group, sortby=list(dims), pixels_first=True)
1863
+ slice_groups.append({'ndarray': array[...,0], 'affine': affine})
337
1864
 
1865
+ return slice_groups
1866
+
1867
+ def affine_matrix(self):
1868
+ return affine_matrix(self)
1869
+
338
1870
  def array(*args, **kwargs):
339
1871
  return get_pixel_array(*args, **kwargs)
340
1872
 
@@ -347,24 +1879,366 @@ class Series(Record):
347
1879
  def set_pixel_array(*args, **kwargs):
348
1880
  set_pixel_array(*args, **kwargs)
349
1881
 
1882
+ def ndarray(self, *args, **kwargs):
1883
+ return self.pixel_values(*args, **kwargs)
1884
+
1885
+ def set_ndarray(self, *args, **kwargs):
1886
+ self.set_pixel_values(*args, **kwargs)
1887
+
1888
+
1889
+
1890
+ def _filter_values(vframes, slice, coords, exclude=False):
1891
+ # vframes: list with one item per frame, each item being a list of values.
1892
+ # filters: dictionary of tag: value pairs.
1893
+ if slice=={} and coords=={}:
1894
+ fvalues = vframes
1895
+ else:
1896
+ fvalues = []
1897
+ nf = len(slice)
1898
+ nl = _coords_size(coords)
1899
+ nc = len(coords)
1900
+ for vframe in vframes:
1901
+ in_slice = True
1902
+ for i, s in enumerate(slice):
1903
+ if isinstance(slice[s], np.ndarray):
1904
+ in_slice = vframe[i-nf-nc] in slice[s]
1905
+ else:
1906
+ in_slice = vframe[i-nf-nc] == slice[s]
1907
+ if exclude:
1908
+ in_slice = not in_slice
1909
+ if not in_slice:
1910
+ break
1911
+ if nl==0:
1912
+ in_coords = True
1913
+ else:
1914
+ in_coords = False
1915
+ for l in range(nl):
1916
+ at_l = True
1917
+ for i, loc in enumerate(coords):
1918
+ at_l = at_l and (vframe[i-nc] == coords[loc][l])
1919
+ in_coords = in_coords or at_l
1920
+ if at_l:
1921
+ break
1922
+ if exclude:
1923
+ in_coords = not in_coords
1924
+ if in_slice and in_coords:
1925
+ fvalues.append(vframe[:-nf-nc])
1926
+
1927
+ if len(fvalues) == 0:
1928
+ return np.array([]).reshape((0,0))
1929
+
1930
+ # Create array of return values. Values can be of different types including lists so this must be an object array.
1931
+ nd, nf = len(fvalues[0]), len(fvalues)
1932
+ rvalues = np.empty((nd,nf), dtype=object)
1933
+ for d in range(nd):
1934
+ for f in range(nf):
1935
+ rvalues[d,f] = fvalues[f][d]
1936
+
1937
+ return rvalues
1938
+
1939
+
1940
+
1941
+ def _filter_values_ind(vframes, slice, coords, exclude=False):
1942
+ if slice=={} and coords=={}:
1943
+ return np.arange(len(vframes), dtype=int)
1944
+ finds = []
1945
+ nf = len(slice)
1946
+ nl = _coords_size(coords)
1947
+ nc = len(coords)
1948
+ for iv, vframe in enumerate(vframes):
1949
+ in_slice = True
1950
+ for i, s in enumerate(slice):
1951
+ if isinstance(slice[s], np.ndarray):
1952
+ in_slice = vframe[i-nf-nc] in slice[s]
1953
+ else:
1954
+ in_slice = vframe[i-nf-nc] == slice[s]
1955
+ if exclude:
1956
+ in_slice = not in_slice
1957
+ if not in_slice:
1958
+ break
1959
+ if nl==0:
1960
+ in_coords = True
1961
+ else:
1962
+ in_coords = False
1963
+ for l in range(nl):
1964
+ at_l = True
1965
+ for i, loc in enumerate(coords):
1966
+ at_l = at_l and (vframe[i-nc] == coords[loc][l])
1967
+ in_coords = in_coords or at_l
1968
+ if at_l:
1969
+ break
1970
+ if exclude:
1971
+ in_coords = not in_coords
1972
+ if in_slice and in_coords:
1973
+ finds.append(iv)
1974
+ return np.array(finds, dtype=int)
1975
+
1976
+
1977
+ def _coords_shape(coords):
1978
+ if coords == {}:
1979
+ return (0,)
1980
+
1981
+ # Check that all values are arrays.
1982
+ for c in coords:
1983
+ if not isinstance(coords[c], np.ndarray):
1984
+ msg = 'Coordinate values must be provided as numpy arrays.'
1985
+ msg += '\nBut the value of ' + str(c) + ' is a ' + str(type(c))
1986
+ raise ValueError(msg)
1987
+
1988
+ shapes = [coords[tag].shape for tag in coords]
1989
+ shape = shapes[0]
1990
+ for s in shapes[1:]:
1991
+ if s != shape:
1992
+ msg = 'Dimensions are ambiguous - not all coordinates have the same shape.'
1993
+ raise ValueError(msg)
1994
+ return shapes[0]
1995
+
1996
+
1997
+ def _coords_size(coords):
1998
+
1999
+ if coords == {}:
2000
+ return 0
2001
+
2002
+ for c in coords:
2003
+ if not isinstance(coords[c], np.ndarray):
2004
+ msg = 'Coordinate values must be provided as numpy arrays.'
2005
+ msg += '\nBut the value of ' + str(c) + ' is a ' + str(type(c))
2006
+ raise ValueError(msg)
2007
+
2008
+ # Coordinate values must a have the same size.
2009
+ sizes = np.unique([coords[tag].size for tag in coords])
2010
+ if len(sizes) > 1:
2011
+ msg = 'These are not proper dimensions. Each coordinate must have the same number of values.'
2012
+ raise ValueError(msg)
2013
+ return sizes[0]
2014
+
2015
+ def _coords_vals(coords):
2016
+ values = [coords[tag].ravel() for tag in coords]
2017
+ values = np.stack(values)
2018
+ return values
2019
+
2020
+ def _check_if_ivals(values):
2021
+ if None in values:
2022
+ msg = 'These are not proper dimensions. Coordinate values must be defined everywhere.'
2023
+ raise ValueError(msg)
2024
+
2025
+ # Check if the values are unique
2026
+ for f in range(values.shape[1]-1):
2027
+ for g in range(f+1, values.shape[1]):
2028
+ equal = True
2029
+ for d in range(values.shape[0]):
2030
+ if values[d,f] != values[d,g]:
2031
+ equal = False
2032
+ break
2033
+ if equal:
2034
+ msg = 'These are not proper dimensions. Coordinate values must be unique.'
2035
+ raise ValueError(msg)
2036
+ # if values.shape[1] != np.unique(values, axis=1).shape[1]:
2037
+ # msg = 'These are not proper dimensions. Coordinate values must be unique.'
2038
+ # raise ValueError(msg)
2039
+
2040
+ def _check_if_coords(coords):
2041
+
2042
+ # Check that all values are arrays.
2043
+ for c in coords:
2044
+ if not isinstance(coords[c], np.ndarray):
2045
+ msg = 'Coordinate values must be provided as numpy arrays.'
2046
+ msg += '\nBut the value of ' + str(c) + ' is a ' + str(type(coords[c]))
2047
+ raise ValueError(msg)
2048
+
2049
+ # Check if coordinates are unique
2050
+ values = _coords_vals(coords)
2051
+ _check_if_ivals(values)
2052
+ return coords
2053
+
2054
+ def _mesh_to_coords(coords):
2055
+ for c in coords:
2056
+ coords[c] = coords[c].ravel()
2057
+ return _check_if_coords(coords)
2058
+
2059
+
2060
+ def _grid_to_meshcoords(gridcoords):
2061
+
2062
+ grid = []
2063
+ for c in gridcoords:
2064
+ if not isinstance(gridcoords[c], np.ndarray):
2065
+ msg = 'Grid coordinates have to be numpy arrays.'
2066
+ raise TypeError(msg)
2067
+ if len(gridcoords[c].shape) != 1:
2068
+ msg = 'Grid coordinates have to be one-dimensionial.'
2069
+ raise ValueError(msg)
2070
+ if len(np.unique(gridcoords[c])) != len(gridcoords[c]):
2071
+ msg = 'Grid coordinates have to be unique.'
2072
+ raise ValueError(msg)
2073
+ grid.append(gridcoords[c])
2074
+
2075
+ mesh = np.meshgrid(*tuple(grid), indexing='ij')
2076
+ meshcoords = {}
2077
+ for i, c in enumerate(gridcoords):
2078
+ meshcoords[c] = mesh[i]
2079
+ _check_if_coords(meshcoords)
2080
+ return meshcoords
2081
+
2082
+
2083
+ def _meshcoords_to_grid(coords):
2084
+ dims = tuple(coords)
2085
+ gridcoords = {}
2086
+ for d, dim in enumerate(dims):
2087
+ gridcoords[dim] = []
2088
+ dvals = coords[dim]
2089
+ for i in range(dvals.shape[d]):
2090
+ dvals_i = dvals.take(i, axis=d)
2091
+ dvals_i = np.unique(dvals_i)
2092
+ if len(dvals_i) > 1:
2093
+ msg = 'These are not proper grid coordinates.'
2094
+ raise ValueError(msg)
2095
+ gridcoords[dim].append(dvals_i[0])
2096
+ gridcoords[dim] = np.array(gridcoords[dim])
2097
+ return gridcoords
2098
+
2099
+
2100
+ def _grid_to_coords(grid):
2101
+ if grid == {}:
2102
+ return {}
2103
+ coords = _grid_to_meshcoords(grid)
2104
+ for c in coords:
2105
+ coords[c] = coords[c].flatten()
2106
+ return coords
2107
+
2108
+ def _as_meshcoords(coords):
2109
+
2110
+ # First check that they are proper coordinates
2111
+ values = _coords_vals(coords)
2112
+ _check_if_ivals(values)
2113
+ values = _meshvals(values)
2114
+ meshcoords = {}
2115
+ for i, c in enumerate(coords):
2116
+ meshcoords[c] = values[i,...]
2117
+ return meshcoords
2118
+
2119
+ def _meshvals(values):
2120
+ # Input array shape: (d, f) with d = nr of dims and f = nr of frames
2121
+ # Output array shape: (d, f1,..., fd)
2122
+ if values.size == 0:
2123
+ return np.array([])
2124
+ # List the unique values of the first coordinate
2125
+ vals, cnts = np.unique(values[0,:], return_counts=True)
2126
+ # Check that there is an equal number of each value
2127
+ if len(np.unique(cnts)) > 1:
2128
+ msg = 'These are not mesh coordinates.'
2129
+ raise ValueError(msg)
2130
+ # If there is only one dimension, we are done
2131
+ if values.shape[0] == 1:
2132
+ return values
2133
+ mesh = []
2134
+ for v in vals:
2135
+ vind = np.where(values[0,:]==v)[0]
2136
+ vmesh = _meshvals(values[1:,vind])
2137
+ mesh.append(vmesh)
2138
+ mesh = np.stack(mesh, axis=1)
2139
+ a = [np.full(mesh.shape[2:], v) for v in vals]
2140
+ a = np.stack(a)
2141
+ a = np.expand_dims(a,0)
2142
+ mesh = np.concatenate((a, mesh))
2143
+ return mesh
2144
+
2145
+ def _meshdata(vals, crds, cmesh):
2146
+ mshape = (vals.shape[0],) + cmesh.shape[1:]
2147
+ if mshape[0]==0:
2148
+ return vals.reshape(mshape)
2149
+ vmesh = np.zeros(mshape, dtype=object)
2150
+ cmesh = cmesh.reshape((cmesh.shape[0],-1))
2151
+ vmesh = vmesh.reshape((vmesh.shape[0],-1))
2152
+ for i in range(vals.shape[1]):
2153
+ # find location of coordinate i in cmesh
2154
+ for j in range(cmesh.shape[1]):
2155
+ if np.array_equal(cmesh[:,j], crds[:,i]):
2156
+ break
2157
+ # Write value i at the same location in vmesh
2158
+ vmesh[:,j] = vals[:,i]
2159
+ return vmesh.reshape(mshape)
2160
+
2161
+ def _concatenate_coords(coords:tuple, mesh=False):
2162
+ concat = {}
2163
+ for c in coords[0]:
2164
+ concat[c] = coords[0][c].flatten().copy()
2165
+ for coord in coords[1:]:
2166
+ for c in coord:
2167
+ if c not in concat:
2168
+ msg = 'Cannot concatenate - all coordinates must have the same variables.'
2169
+ raise ValueError(msg)
2170
+ concat[c] = np.concatenate((concat[c], coord[c].flatten()))
2171
+ _check_if_coords(concat)
2172
+ if mesh:
2173
+ return _as_meshcoords(concat)
2174
+ else:
2175
+ return concat
2176
+
2177
+
2178
+ ### OBSOLETE BELOW HERE
2179
+
2180
+
2181
+ def set_pixel_values(series, array, source=None, coords=None, **kwargs):
2182
+
2183
+ # If coordinates are given as 1D arrays, turn them into grids and flatten for iteration.
2184
+ if coords is not None:
2185
+ mesh_coords = {}
2186
+ v = list(coords.values())
2187
+ if v != []:
2188
+ v0 = v[0]
2189
+ if np.array(v0).ndim==1: # regular grid
2190
+ pos = tuple([coords[c] for c in coords])
2191
+ pos = np.meshgrid(*pos, indexing='ij')
2192
+ for i, c in enumerate(coords):
2193
+ mesh_coords[c] = pos[i].ravel()
350
2194
 
2195
+ # Flatten array for iterating
2196
+ nr_of_slices = int(np.prod(array.shape[2:]))
2197
+ array = array.reshape((array.shape[0], array.shape[1], nr_of_slices)) # shape (x,y,i)
2198
+ attr = {**series.attributes, **kwargs}
2199
+ if 'SliceLocation' in coords:
2200
+ affine = series.affine()
2201
+ for i, image in enumerate(source):
2202
+ series.progress(i+1, len(source), 'Saving array..')
2203
+ image.read()
351
2204
 
2205
+ # Update any other header data provided
2206
+ for a, v in attr.items():
2207
+ setattr(image, a, v)
2208
+ # if isinstance(v, list):
2209
+ # setattr(image, a, v[i])
2210
+ # else:
2211
+ # setattr(image, a, v)
2212
+
2213
+ # # If needed, use Defaults for geometry markers
2214
+ # if affine is not None:
2215
+ # affine[2, 3] = i # not sufficiently general
2216
+ # image.affine_matrix = affine
2217
+
2218
+ # Set coordinates.
2219
+ if mesh_coords is not None:
2220
+ for c in mesh_coords:
2221
+ image[c] = mesh_coords[c][i]
2222
+ if c == 'SliceLocation':
2223
+ image['ImagePositionPatient'] = image_utils.image_position_from_slice_location(mesh_coords[c][i], affine)
2224
+
2225
+ image.set_pixel_array(array[:,:,i])
2226
+ image.clear()
352
2227
 
353
2228
 
2229
+ # def slice_groups(series): # not yet in use
2230
+ # slice_groups = []
2231
+ # for orientation in series.ImageOrientationPatient:
2232
+ # sg = series.instances(ImageOrientationPatient=orientation)
2233
+ # slice_groups.append(sg)
2234
+ # return slice_groups
354
2235
 
355
- def slice_groups(series): # not yet in use
356
- slice_groups = []
357
- for orientation in series.ImageOrientationPatient:
358
- sg = series.instances(ImageOrientationPatient=orientation)
359
- slice_groups.append(sg)
360
- return slice_groups
361
2236
 
362
2237
  def subseries(record, move=False, **kwargs):
363
- """Extract subseries"""
364
2238
  series = record.new_sibling()
365
2239
  instances = record.instances(**kwargs)
366
2240
  for i, instance in enumerate(instances):
367
- record.status.progress(i+1, len(instances), 'Extracting subseries..')
2241
+ record.progress(i+1, len(instances), 'Extracting subseries..')
368
2242
  if move:
369
2243
  instance.move_to(series)
370
2244
  else:
@@ -374,6 +2248,7 @@ def subseries(record, move=False, **kwargs):
374
2248
  # series.adopt(instances)
375
2249
  return series
376
2250
 
2251
+
377
2252
  def read_npy(record):
378
2253
  # Not in use - loading of temporary numpy files
379
2254
  file = record.manager.npy()
@@ -384,123 +2259,35 @@ def read_npy(record):
384
2259
  return array
385
2260
 
386
2261
 
387
- def affine_matrix(series):
388
- """Returns the affine matrix of a series.
389
-
390
- If the series consists of multiple slice groups with different
391
- image orientations, then a list of affine matrices is returned,
392
- one for each slice orientation.
393
- """
394
- image_orientation = series.ImageOrientationPatient
395
- if image_orientation is None:
396
- msg = 'ImageOrientationPatient not defined in the DICOM header \n'
397
- msg = 'This is a required DICOM field \n'
398
- msg += 'The data may be corrupted - please check'
399
- raise ValueError(msg)
400
- # Multiple slice groups in series - return list of affine matrices
401
- if isinstance(image_orientation[0], list):
402
- affine_matrices = []
403
- for dir in image_orientation:
404
- slice_group = series.instances(ImageOrientationPatient=dir)
405
- affine = _slice_group_affine_matrix(slice_group, dir)
406
- affine_matrices.append((affine, slice_group))
407
- return affine_matrices
408
- # Single slice group in series - return a single affine matrix
409
- else:
410
- slice_group = series.instances()
411
- affine = _slice_group_affine_matrix(slice_group, image_orientation)
412
- return affine, slice_group
413
-
414
-
415
- def _slice_group_affine_matrix(slice_group, image_orientation):
416
- """Return the affine matrix of a slice group"""
417
-
418
- # single slice
419
- if len(slice_group) == 1:
420
- return slice_group[0].affine_matrix
421
- # multi slice
422
- else:
423
- pos = [s.ImagePositionPatient for s in slice_group]
424
- # Find unique elements
425
- pos = [x for i, x in enumerate(pos) if i==pos.index(x)]
426
-
427
- # One slice location
428
- if len(pos) == 1:
429
- return slice_group[0].affine_matrix
430
-
431
- # Slices with different locations
432
- else:
433
- return image_utils.affine_matrix_multislice(
434
- image_orientation, pos,
435
- slice_group[0].PixelSpacing) # assume all the same pixel spacing
436
-
437
2262
 
438
- def array(record, **kwargs):
2263
+ def array(record, sortby=None, pixels_first=False, first_volume=False):
439
2264
  if isinstance(record, list): # array of instances
440
2265
  arr = np.empty(len(record), dtype=object)
441
2266
  for i, rec in enumerate(record):
442
2267
  arr[i] = rec
443
- return _get_pixel_array_from_instance_array(arr, **kwargs)
2268
+ return _get_pixel_array_from_instance_array(arr, sortby=sortby, pixels_first=pixels_first, first_volume=first_volume)
444
2269
  elif isinstance(record, np.ndarray): # array of instances
445
- return _get_pixel_array_from_instance_array(record, **kwargs)
2270
+ return _get_pixel_array_from_instance_array(record, sortby=sortby, pixels_first=pixels_first, first_volume=first_volume)
446
2271
  else:
447
- return get_pixel_array(record, **kwargs)
2272
+ return get_pixel_array(record, sortby=sortby, pixels_first=pixels_first, first_volume=first_volume)
448
2273
 
449
2274
 
450
- def get_pixel_array(record, sortby=None, first_volume=False, **kwargs):
451
- """Pixel values of the object as an ndarray
452
-
453
- Args:
454
- sortby:
455
- Optional list of DICOM keywords by which the volume is sorted
456
- pixels_first:
457
- If True, the (x,y) dimensions are the first dimensions of the array.
458
- If False, (x,y) are the last dimensions - this is the default.
459
-
460
- Returns:
461
- An ndarray holding the pixel data.
462
-
463
- An ndarry holding the datasets (instances) of each slice.
464
-
465
- Examples:
466
- ``` ruby
467
- # return a 3D array (z,x,y)
468
- # with the pixel data for each slice
469
- # in no particular order (z)
470
- array, _ = series.array()
471
-
472
- # return a 3D array (x,y,z)
473
- # with pixel data in the leading indices
474
- array, _ = series.array(pixels_first = True)
475
-
476
- # Return a 4D array (x,y,t,k) sorted by acquisition time
477
- # The last dimension (k) enumerates all slices with the same acquisition time.
478
- # If there is only one image for each acquision time,
479
- # the last dimension is a dimension of 1
480
- array, data = series.array('AcquisitionTime', pixels_first=True)
481
- v = array[:,:,10,0] # First image at the 10th location
482
- t = data[10,0].AcquisitionTIme # acquisition time of the same image
483
-
484
- # Return a 4D array (loc, TI, x, y)
485
- sortby = ['SliceLocation','InversionTime']
486
- array, data = series.array(sortby)
487
- v = array[10,6,0,:,:] # First slice at 11th slice location and 7th inversion time
488
- Loc = data[10,6,0][sortby[0]] # Slice location of the same slice
489
- TI = data[10,6,0][sortby[1]] # Inversion time of the same slice
490
- ```
491
- """
492
-
2275
+ def get_pixel_array(record, sortby=None, first_volume=False, pixels_first=False):
493
2276
  source = instance_array(record, sortby)
494
- array, headers = _get_pixel_array_from_sorted_instance_array(source, **kwargs)
2277
+ array, headers = _get_pixel_array_from_sorted_instance_array(source, pixels_first=pixels_first)
495
2278
  if first_volume:
496
2279
  return array[...,0], headers[...,0]
497
2280
  else:
498
2281
  return array, headers
499
2282
 
500
2283
 
501
- def _get_pixel_array_from_instance_array(instance_array, sortby=None, **kwargs):
2284
+ def _get_pixel_array_from_instance_array(instance_array, sortby=None, pixels_first=False, first_volume=False):
502
2285
  source = sort_instance_array(instance_array, sortby)
503
- return _get_pixel_array_from_sorted_instance_array(source, **kwargs)
2286
+ array, headers = _get_pixel_array_from_sorted_instance_array(source, pixels_first=pixels_first)
2287
+ if first_volume:
2288
+ return array[...,0], headers[...,0]
2289
+ else:
2290
+ return array, headers
504
2291
 
505
2292
 
506
2293
  def _get_pixel_array_from_sorted_instance_array(source, pixels_first=False):
@@ -529,112 +2316,19 @@ def _get_pixel_array_from_sorted_instance_array(source, pixels_first=False):
529
2316
  return array, source
530
2317
 
531
2318
 
532
- def set_pixel_array(series, array, source=None, pixels_first=False, coords=None, **kwargs):
533
- """
534
- Set pixel values of a series from a numpy ndarray.
535
-
536
- Since the pixel data do not hold any information about the
537
- image such as geometry, or other metainformation,
538
- a dataset must be provided as well with the same
539
- shape as the array except for the slice dimensions.
540
-
541
- If a dataset is not provided, header info is
542
- derived from existing instances in order.
543
-
544
- Args:
545
- array:
546
- numpy ndarray with pixel data.
547
-
548
- dataset:
549
- numpy ndarray
550
-
551
- Instances holding the header information.
552
- This *must* have the same shape as array, minus the slice dimensions.
553
-
554
- pixels_first:
555
- bool
556
-
557
- Specifies whether the pixel dimensions are the first or last dimensions of the series.
558
- If not provided it is assumed the slice dimensions are the last dimensions
559
- of the array.
560
-
561
- inplace:
562
- bool
563
-
564
- If True (default) the current pixel values in the series
565
- are overwritten. If set to False, the new array is added to the series.
566
-
567
- Examples:
568
- ```ruby
569
- # Invert all images in a series:
570
- array, _ = series.array()
571
- series.set_array(-array)
572
-
573
- # Create a maximum intensity projection of the series.
574
- # Header information for the result is taken from the first image.
575
- # Results are saved in a new sibling series.
576
- array, data = series.array()
577
- array = np.amax(array, axis=0)
578
- data = np.squeeze(data[0,...])
579
- series.new_sibling().set_array(array, data)
580
-
581
- # Create a 2D maximum intensity projection along the SliceLocation direction.
582
- # Header information for the result is taken from the first slice location.
583
- # Current data of the series are overwritten.
584
- array, data = series.array('SliceLocation')
585
- array = np.amax(array, axis=0)
586
- data = np.squeeze(data[0,...])
587
- series.set_array(array, data)
588
-
589
- # In a series with multiple slice locations and inversion times,
590
- # replace all images for each slice location with that of the shortest inversion time.
591
- array, data = series.array(['SliceLocation','InversionTime'])
592
- for loc in range(array.shape[0]): # loop over slice locations
593
- slice0 = np.squeeze(array[loc,0,0,:,:]) # get the slice with shortest TI
594
- TI0 = data[loc,0,0].InversionTime # get the TI of that slice
595
- for TI in range(array.shape[1]): # loop over TIs
596
- array[loc,TI,0,:,:] = slice0 # replace each slice with shortest TI
597
- data[loc,TI,0].InversionTime = TI0 # replace each TI with shortest TI
598
- series.set_array(array, data)
599
- ```
600
- """
2319
+ def set_pixel_array(series, array, source=None, pixels_first=False, **kwargs):
601
2320
 
602
2321
  # Move pixels to the end (default)
603
2322
  if pixels_first:
604
2323
  array = np.moveaxis(array, 0, -1)
605
2324
  array = np.moveaxis(array, 0, -1)
606
2325
 
607
- # If source data are provided, then coordinates are optional.
608
- # If no source data are given, then coordinates MUST be defined to ensure array data can be retrieved in the proper order..
609
- if source is None:
610
- if coords is None:
611
- if array.ndim > 4:
612
- msg = 'For arrays with more than 4 dimensions, \n'
613
- msg += 'either coordinate labels or headers must be provided'
614
- raise ValueError(msg)
615
- elif array.ndim == 4:
616
- coords = {
617
- 'SliceLocation':np.arange(array.shape[0]),
618
- 'AcquisitionTime':np.arange(array.shape[1]),
619
- }
620
- elif array.ndim == 3:
621
- coords = {
622
- 'SliceLocation':np.arange(array.shape[0]),
623
- }
624
-
625
- # If coordinates are given as 1D arrays, turn them into grids and flatten for iteration.
626
- if coords is not None:
627
- v0 = list(coords.values())[0]
628
- if np.array(v0).ndim==1: # regular grid
629
- pos = tuple([coords[c] for c in coords])
630
- pos = np.meshgrid(*pos)
631
- for i, c in enumerate(coords):
632
- coords[c] = pos[i].ravel()
633
-
634
2326
  # if no header data are provided, use template headers.
635
2327
  nr_of_slices = int(np.prod(array.shape[:-2]))
636
2328
  if source is None:
637
2329
  source = [series.new_instance(MRImage()) for _ in range(nr_of_slices)]
2330
+ if source.size == 0:
2331
+ source = [series.new_instance(MRImage()) for _ in range(nr_of_slices)]
638
2332
 
639
2333
  # If the header data are not the same size, use only the first one.
640
2334
  else:
@@ -670,40 +2364,67 @@ def set_pixel_array(series, array, source=None, pixels_first=False, coords=None,
670
2364
  for i, image in enumerate(copy_source):
671
2365
  series.progress(i+1, len(copy_source), 'Saving array..')
672
2366
  image.read()
673
-
674
2367
  for attr, vals in kwargs.items():
675
2368
  if isinstance(vals, list):
676
2369
  setattr(image, attr, vals[i])
677
2370
  else:
678
2371
  setattr(image, attr, vals)
679
-
680
- # If coordinates are provided, these will override the values from the sources.
681
- if coords is not None: # ADDED 31/05/2023
682
- for c in coords:
683
- image[c] = coords[c][i]
684
2372
  image.set_pixel_array(array[i,...])
685
2373
  image.clear()
686
2374
 
687
2375
 
2376
+ def affine_matrix(series):
2377
+ """Returns the affine matrix of a series.
2378
+
2379
+ If the series consists of multiple slice groups with different
2380
+ image orientations, then a list of affine matrices is returned,
2381
+ one for each slice orientation.
2382
+ """
2383
+ image_orientation = series.ImageOrientationPatient
2384
+ if image_orientation is None:
2385
+ msg = 'ImageOrientationPatient not defined in the DICOM header \n'
2386
+ msg = 'This is a required DICOM field \n'
2387
+ msg += 'The data may be corrupted - please check'
2388
+ raise ValueError(msg)
2389
+ # Multiple slice groups in series - return list of affine matrices
2390
+ if isinstance(image_orientation[0], list):
2391
+ affine_matrices = []
2392
+ for dir in image_orientation:
2393
+ slice_group = series.instances(ImageOrientationPatient=dir)
2394
+ affine = _slice_group_affine_matrix(slice_group, dir)
2395
+ affine_matrices.append((affine, slice_group))
2396
+ return affine_matrices
2397
+ # Single slice group in series - return a single affine matrix
2398
+ else:
2399
+ slice_group = series.instances()
2400
+ affine = _slice_group_affine_matrix(slice_group, image_orientation)
2401
+ return affine, slice_group
2402
+
688
2403
 
689
- # More compact but does not work with pause extensions
690
- # for i, s in enumerate(source):
691
- # series.status.progress(i+1, len(source), 'Writing array..')
692
- # if s not in instances:
693
- # s.copy_to(series).set_pixel_array(array[i,...])
694
- # else:
695
- # s.set_pixel_array(array[i,...])
696
-
697
-
698
-
699
-
2404
+ def _slice_group_affine_matrix(slice_group, image_orientation):
2405
+ """Return the affine matrix of a slice group"""
700
2406
 
2407
+ # single slice
2408
+ if len(slice_group) == 1:
2409
+ return slice_group[0].affine_matrix
2410
+ # multi slice
2411
+ else:
2412
+ pos = [s.ImagePositionPatient for s in slice_group]
2413
+ # Find unique elements
2414
+ pos = [x for i, x in enumerate(pos) if i==pos.index(x)]
701
2415
 
702
- ##
703
- ## Helper functions
704
- ##
2416
+ # One slice location
2417
+ if len(pos) == 1:
2418
+ return slice_group[0].affine_matrix
2419
+
2420
+ # Slices with different locations
2421
+ else:
2422
+ return image_utils.affine_matrix_multislice(
2423
+ image_orientation, pos,
2424
+ slice_group[0].PixelSpacing) # assume all the same pixel spacing
2425
+
705
2426
 
706
- def sort_instance_array(instance_array, sortby=None, status=True):
2427
+ def sort_instance_array(instance_array, sortby=None):
707
2428
  if sortby is None:
708
2429
  return instance_array
709
2430
  else:
@@ -711,10 +2432,55 @@ def sort_instance_array(instance_array, sortby=None, status=True):
711
2432
  sortby = [sortby]
712
2433
  df = read_dataframe_from_instance_array(instance_array, sortby + ['SOPInstanceUID'])
713
2434
  df.sort_values(sortby, inplace=True)
714
- return df_to_sorted_instance_array(instance_array[0], df, sortby, status=status)
715
-
2435
+ return df_to_sorted_instance_array(instance_array[0], df, sortby)
2436
+
2437
+
2438
+ def _instances(series, dims:tuple=None, inds:dict=None, select={}, **filters):
2439
+
2440
+ # Use default dimensions if needed.
2441
+ if dims is None:
2442
+ dims = ('InstanceNumber',)
716
2443
 
717
- def instance_array(record, sortby=None, status=True):
2444
+ # If indices are provided, check that they are compatible with dims.
2445
+ if inds is not None:
2446
+ for dim in inds:
2447
+ if dim not in dims:
2448
+ msg = 'Indices must be in the dimensions provided.'
2449
+ raise ValueError(msg)
2450
+
2451
+ # Get the frames and sort by dim
2452
+ frames = instance_array(series, list(dims), report_none=True, select=select, **filters)
2453
+ if frames.size == 0:
2454
+ return frames.reshape(tuple([0]*len(dims)))
2455
+ if frames.shape[-1] > 1:
2456
+ d = ''.join(['('] + [str(v)+', ' for v in dims] + [')'])
2457
+ msg = 'series shape is ambiguous in dimensions ' + d
2458
+ msg += '\n--> Multiple frames exist at some or all locations.'
2459
+ msg += '\n--> Hint: use Series.unique() to list the values at all locations.'
2460
+ raise ValueError(msg)
2461
+ if None in frames:
2462
+ d = ''.join(['('] + [str(v)+', ' for v in dims] + [')'])
2463
+ msg = 'series shape is not well defined in dimensions ' + d
2464
+ msg += '\n--> There are no frames at some locations.'
2465
+ msg += '\n--> Hint: use Series.value() to find the values at all locations.'
2466
+ raise ValueError(msg)
2467
+ frames = frames[...,0]
2468
+
2469
+ # Extract indices and coordinates if provided
2470
+ if inds is not None:
2471
+ for dim in inds:
2472
+ ind = inds[dim]
2473
+ d = dims.index(dim)
2474
+ frames = frames.take(ind, axis=d)
2475
+ if not isinstance(ind, np.ndarray):
2476
+ frames = np.expand_dims(frames, axis=d)
2477
+ if frames.size == 0:
2478
+ return frames.reshape(tuple([0]*len(dims)))
2479
+ else:
2480
+ return frames
2481
+
2482
+
2483
+ def instance_array(record, sortby=None, report_none=False, select={}, **filters):
718
2484
  """Sort instances by a list of attributes.
719
2485
 
720
2486
  Args:
@@ -724,7 +2490,7 @@ def instance_array(record, sortby=None, status=True):
724
2490
  An ndarray holding the instances sorted by sortby.
725
2491
  """
726
2492
  if sortby is None:
727
- instances = record.instances()
2493
+ instances = record.instances(**filters) # Note filter values here cant be arrays
728
2494
  array = np.empty(len(instances), dtype=object)
729
2495
  for i, instance in enumerate(instances):
730
2496
  array[i] = instance
@@ -732,34 +2498,44 @@ def instance_array(record, sortby=None, status=True):
732
2498
  else:
733
2499
  if not isinstance(sortby, list):
734
2500
  sortby = [sortby]
735
- df = record.read_dataframe(sortby + ['SOPInstanceUID'])
2501
+ df = record.read_dataframe(sortby + ['SOPInstanceUID'], select=select, **filters)
2502
+ df = df[df.SOPInstanceUID.values != None]
2503
+ if df.empty:
2504
+ return np.array([])
2505
+ if report_none:
2506
+ if None in df.values:
2507
+ d = ''.join(['('] + [str(v)+', ' for v in sortby] + [')'])
2508
+ msg = 'series shape is not well defined in dimensions ' + d
2509
+ msg += '\n--> Some of the dimensions are not defined in the header.'
2510
+ msg += '\n--> Hint: use Series.value() to find the undefined values.'
2511
+ raise ValueError(msg)
736
2512
  df.sort_values(sortby, inplace=True)
737
- return df_to_sorted_instance_array(record, df, sortby, status=status)
2513
+ return df_to_sorted_instance_array(record, df, sortby)
738
2514
 
739
2515
 
740
- def df_to_sorted_instance_array(record, df, sortby, status=True):
741
- # note record here only passed for access to the function instance() and progress()
742
- # This really should be db.instance()
2516
+ def df_to_sorted_instance_array(record, df, sortby):
743
2517
 
744
2518
  data = []
745
2519
  vals = df[sortby[0]].unique()
746
- for i, c in enumerate(vals):
747
- if status:
748
- record.progress(i, len(vals), message='Sorting pixel data..')
2520
+ for i, c in enumerate(vals):
2521
+ record.progress(i, len(vals), message='Sorting pixel data..')
749
2522
  # if a type is not supported by np.isnan()
750
2523
  # assume it is not a nan
751
- try:
752
- nan = np.isnan(c)
753
- except:
754
- nan = False
755
- if nan:
2524
+ if c is None: # this happens when undefined keyword is used
756
2525
  dfc = df[df[sortby[0]].isnull()]
757
2526
  else:
758
- dfc = df[df[sortby[0]] == c]
2527
+ try:
2528
+ nan = np.isnan(c)
2529
+ except:
2530
+ nan = False
2531
+ if nan:
2532
+ dfc = df[df[sortby[0]].isnull()]
2533
+ else:
2534
+ dfc = df[df[sortby[0]] == c]
759
2535
  if len(sortby) == 1:
760
2536
  datac = df_to_instance_array(record, dfc)
761
2537
  else:
762
- datac = df_to_sorted_instance_array(record, dfc, sortby[1:], status=False)
2538
+ datac = df_to_sorted_instance_array(record, dfc, sortby[1:])
763
2539
  data.append(datac)
764
2540
  return _stack(data, align_left=True)
765
2541
 
@@ -784,9 +2560,10 @@ def _stack(arrays, align_left=False):
784
2560
 
785
2561
  # Get the dimensions of the stack
786
2562
  # For each dimension, look for the largest values across all arrays
787
- arrays = [a for a in arrays if a is not None]
2563
+ #arrays = [a for a in arrays if a is not None]
2564
+ arrays = [a for a in arrays if a.size != 0]
788
2565
  if arrays == []:
789
- return
2566
+ return np.array([])
790
2567
  ndim = len(arrays[0].shape)
791
2568
  dim = [0] * ndim
792
2569
  for array in arrays:
@@ -814,3 +2591,9 @@ def _stack(arrays, align_left=False):
814
2591
 
815
2592
  return stack
816
2593
 
2594
+
2595
+
2596
+
2597
+
2598
+
2599
+