dbdicom 0.2.0__py3-none-any.whl → 0.3.16__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- dbdicom/__init__.py +3 -25
- dbdicom/api.py +496 -0
- dbdicom/const.py +144 -0
- dbdicom/database.py +133 -0
- dbdicom/dataset.py +471 -0
- dbdicom/dbd.py +1290 -0
- dbdicom/external/__pycache__/__init__.cpython-311.pyc +0 -0
- dbdicom/external/dcm4che/__pycache__/__init__.cpython-311.pyc +0 -0
- dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-311.pyc +0 -0
- dbdicom/external/dcm4che/bin/emf2sf +57 -57
- dbdicom/register.py +402 -0
- dbdicom/{ds/types → sop_classes}/ct_image.py +2 -16
- dbdicom/{ds/types → sop_classes}/enhanced_mr_image.py +206 -160
- dbdicom/sop_classes/mr_image.py +338 -0
- dbdicom/sop_classes/parametric_map.py +381 -0
- dbdicom/sop_classes/secondary_capture.py +140 -0
- dbdicom/sop_classes/segmentation.py +311 -0
- dbdicom/{ds/types → sop_classes}/ultrasound_multiframe_image.py +1 -15
- dbdicom/{ds/types → sop_classes}/xray_angiographic_image.py +2 -17
- dbdicom/utils/arrays.py +142 -0
- dbdicom/utils/files.py +0 -20
- dbdicom/utils/image.py +43 -466
- dbdicom/utils/pydicom_dataset.py +386 -0
- dbdicom-0.3.16.dist-info/METADATA +26 -0
- dbdicom-0.3.16.dist-info/RECORD +54 -0
- {dbdicom-0.2.0.dist-info → dbdicom-0.3.16.dist-info}/WHEEL +1 -1
- dbdicom/create.py +0 -450
- dbdicom/ds/__init__.py +0 -10
- dbdicom/ds/create.py +0 -63
- dbdicom/ds/dataset.py +0 -841
- dbdicom/ds/dictionaries.py +0 -620
- dbdicom/ds/types/mr_image.py +0 -267
- dbdicom/ds/types/parametric_map.py +0 -226
- dbdicom/external/__pycache__/__init__.cpython-310.pyc +0 -0
- dbdicom/external/__pycache__/__init__.cpython-37.pyc +0 -0
- dbdicom/external/dcm4che/__pycache__/__init__.cpython-310.pyc +0 -0
- dbdicom/external/dcm4che/__pycache__/__init__.cpython-37.pyc +0 -0
- dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-310.pyc +0 -0
- dbdicom/external/dcm4che/bin/__pycache__/__init__.cpython-37.pyc +0 -0
- dbdicom/external/dcm4che/lib/linux-x86/libclib_jiio.so +0 -0
- dbdicom/external/dcm4che/lib/linux-x86-64/libclib_jiio.so +0 -0
- dbdicom/external/dcm4che/lib/linux-x86-64/libopencv_java.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio_vis.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-sparc/libclib_jiio_vis2.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio_vis.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-sparcv9/libclib_jiio_vis2.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-x86/libclib_jiio.so +0 -0
- dbdicom/external/dcm4che/lib/solaris-x86-64/libclib_jiio.so +0 -0
- dbdicom/manager.py +0 -2077
- dbdicom/message.py +0 -119
- dbdicom/record.py +0 -1526
- dbdicom/types/database.py +0 -107
- dbdicom/types/instance.py +0 -184
- dbdicom/types/patient.py +0 -40
- dbdicom/types/series.py +0 -816
- dbdicom/types/study.py +0 -58
- dbdicom/utils/variables.py +0 -155
- dbdicom/utils/vreg.py +0 -2626
- dbdicom/wrappers/__init__.py +0 -7
- dbdicom/wrappers/dipy.py +0 -462
- dbdicom/wrappers/elastix.py +0 -855
- dbdicom/wrappers/numpy.py +0 -119
- dbdicom/wrappers/scipy.py +0 -1413
- dbdicom/wrappers/skimage.py +0 -1030
- dbdicom/wrappers/sklearn.py +0 -151
- dbdicom/wrappers/vreg.py +0 -273
- dbdicom-0.2.0.dist-info/METADATA +0 -276
- dbdicom-0.2.0.dist-info/RECORD +0 -81
- {dbdicom-0.2.0.dist-info → dbdicom-0.3.16.dist-info/licenses}/LICENSE +0 -0
- {dbdicom-0.2.0.dist-info → dbdicom-0.3.16.dist-info}/top_level.txt +0 -0
dbdicom/types/series.py
DELETED
|
@@ -1,816 +0,0 @@
|
|
|
1
|
-
# Importing annotations to handle or sign in import type hints
|
|
2
|
-
from __future__ import annotations
|
|
3
|
-
|
|
4
|
-
import os
|
|
5
|
-
import math
|
|
6
|
-
|
|
7
|
-
import numpy as np
|
|
8
|
-
|
|
9
|
-
from dbdicom.record import Record, read_dataframe_from_instance_array
|
|
10
|
-
from dbdicom.ds import MRImage
|
|
11
|
-
import dbdicom.utils.image as image_utils
|
|
12
|
-
from dbdicom.manager import Manager
|
|
13
|
-
# import dbdicom.wrappers.scipy as scipy_utils
|
|
14
|
-
from dbdicom.utils.files import export_path
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
class Series(Record):
|
|
18
|
-
|
|
19
|
-
name = 'SeriesInstanceUID'
|
|
20
|
-
|
|
21
|
-
def _set_key(self):
|
|
22
|
-
self._key = self.keys()[0]
|
|
23
|
-
|
|
24
|
-
def remove(self):
|
|
25
|
-
self.manager.delete_series([self.uid])
|
|
26
|
-
|
|
27
|
-
def parent(self):
|
|
28
|
-
#uid = self.manager.register.at[self.key(), 'StudyInstanceUID']
|
|
29
|
-
uid = self.manager._at(self.key(), 'StudyInstanceUID')
|
|
30
|
-
return self.record('Study', uid, key=self.key())
|
|
31
|
-
|
|
32
|
-
def children(self, **kwargs):
|
|
33
|
-
return self.instances(**kwargs)
|
|
34
|
-
|
|
35
|
-
def new_child(self, dataset=None, **kwargs):
|
|
36
|
-
attr = {**kwargs, **self.attributes}
|
|
37
|
-
return self.new_instance(dataset=dataset, **attr)
|
|
38
|
-
|
|
39
|
-
def new_sibling(self, suffix=None, **kwargs):
|
|
40
|
-
if suffix is not None:
|
|
41
|
-
desc = self.manager._at(self.key(), 'SeriesDescription')
|
|
42
|
-
kwargs['SeriesDescription'] = desc + ' [' + suffix + ']'
|
|
43
|
-
return self.parent().new_child(**kwargs)
|
|
44
|
-
|
|
45
|
-
def new_instance(self, dataset=None, **kwargs):
|
|
46
|
-
attr = {**kwargs, **self.attributes}
|
|
47
|
-
uid, key = self.manager.new_instance(parent=self.uid, dataset=dataset, key=self.key(), **attr)
|
|
48
|
-
return self.record('Instance', uid, key, **attr)
|
|
49
|
-
|
|
50
|
-
# replace by clone(). Adopt implies move rather than copy
|
|
51
|
-
def adopt(self, instances):
|
|
52
|
-
uids = [i.uid for i in instances]
|
|
53
|
-
uids = self.manager.copy_to_series(uids, self.uid, **self.attributes)
|
|
54
|
-
if isinstance(uids, list):
|
|
55
|
-
return [self.record('Instance', uid) for uid in uids]
|
|
56
|
-
else:
|
|
57
|
-
return self.record('Instance', uids)
|
|
58
|
-
|
|
59
|
-
def _copy_from(self, record, **kwargs):
|
|
60
|
-
attr = {**kwargs, **self.attributes}
|
|
61
|
-
uids = self.manager.copy_to_series(record.uid, self.uid, **attr)
|
|
62
|
-
if isinstance(uids, list):
|
|
63
|
-
return [self.record('Instance', uid, **attr) for uid in uids]
|
|
64
|
-
else:
|
|
65
|
-
return self.record('Instance', uids, **attr)
|
|
66
|
-
|
|
67
|
-
|
|
68
|
-
|
|
69
|
-
|
|
70
|
-
def export_as_npy(self, directory=None, filename=None, sortby=None, pixels_first=False):
|
|
71
|
-
"""Export array in numpy format"""
|
|
72
|
-
|
|
73
|
-
if directory is None:
|
|
74
|
-
directory = self.dialog.directory(message='Please select a folder for the png data')
|
|
75
|
-
if filename is None:
|
|
76
|
-
filename = self.SeriesDescription
|
|
77
|
-
array, _ = self.get_pixel_array(sortby=sortby, pixels_first=pixels_first)
|
|
78
|
-
file = os.path.join(directory, filename + '.npy')
|
|
79
|
-
with open(file, 'wb') as f:
|
|
80
|
-
np.save(f, array)
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
def export_as_dicom(self, path):
|
|
84
|
-
# instance = self.instance()
|
|
85
|
-
# patient = "".join([c if c.isalnum() else "_" for c in instance.PatientID])
|
|
86
|
-
# study = "".join([c if c.isalnum() else "_" for c in instance.StudyDescription])
|
|
87
|
-
# series = "".join([c if c.isalnum() else "_" for c in instance.SeriesDescription])
|
|
88
|
-
# path = os.path.join(os.path.join(os.path.join(path, patient), study), series)
|
|
89
|
-
# path = export_path(path)
|
|
90
|
-
|
|
91
|
-
folder = self.label()
|
|
92
|
-
path = export_path(path, folder)
|
|
93
|
-
|
|
94
|
-
copy = self.copy()
|
|
95
|
-
mgr = Manager(path, status=self.status)
|
|
96
|
-
mgr.open(path)
|
|
97
|
-
mgr.import_datasets(copy.files())
|
|
98
|
-
copy.remove()
|
|
99
|
-
|
|
100
|
-
|
|
101
|
-
def export_as_png(self, path):
|
|
102
|
-
"""Export all images as png files"""
|
|
103
|
-
folder = self.label()
|
|
104
|
-
path = export_path(path, folder)
|
|
105
|
-
images = self.images()
|
|
106
|
-
for i, img in enumerate(images):
|
|
107
|
-
img.status.progress(i+1, len(images), 'Exporting png..')
|
|
108
|
-
img.export_as_png(path)
|
|
109
|
-
|
|
110
|
-
|
|
111
|
-
def export_as_csv(self, path):
|
|
112
|
-
"""Export all images as csv files"""
|
|
113
|
-
folder = self.label()
|
|
114
|
-
path = export_path(path, folder)
|
|
115
|
-
images = self.images()
|
|
116
|
-
for i, img in enumerate(images):
|
|
117
|
-
img.status.progress(i+1, len(images), 'Exporting csv..')
|
|
118
|
-
img.export_as_csv(path)
|
|
119
|
-
|
|
120
|
-
|
|
121
|
-
def export_as_nifti(self, path: str):
|
|
122
|
-
"""Export images in nifti format.
|
|
123
|
-
|
|
124
|
-
Args:
|
|
125
|
-
path (str): path where results are to be saved.
|
|
126
|
-
"""
|
|
127
|
-
folder = self.label()
|
|
128
|
-
path = export_path(path, folder)
|
|
129
|
-
affine = self.affine_matrix()
|
|
130
|
-
if not isinstance(affine, list):
|
|
131
|
-
affine = [affine]
|
|
132
|
-
for a in affine:
|
|
133
|
-
matrix = a[0]
|
|
134
|
-
images = a[1]
|
|
135
|
-
for i, img in enumerate(images):
|
|
136
|
-
img.status.progress(i+1, len(images), 'Exporting nifti..')
|
|
137
|
-
img.export_as_nifti(path, matrix)
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
def subseries(*args, move=False, **kwargs):
|
|
141
|
-
return subseries(*args, move=move, **kwargs)
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
def split_by(self, keyword: str | tuple) -> list:
|
|
145
|
-
"""Split the series into multiple subseries based on keyword value.
|
|
146
|
-
|
|
147
|
-
Args:
|
|
148
|
-
keyword (str | tuple): A valid DICOM keyword or hexadecimal (group, element) tag.
|
|
149
|
-
|
|
150
|
-
Raises:
|
|
151
|
-
ValueError: if an invalid or missing keyword is provided.
|
|
152
|
-
ValueError: if all images have the same value for the keyword, so no subseries can be derived. An exception is raised rather than a copy of the series to avoid unnecessary copies being made. If that is the intention, use series.copy() instead.
|
|
153
|
-
|
|
154
|
-
Returns:
|
|
155
|
-
list: A list of subseries, where each element has the same value of the given keyword.
|
|
156
|
-
|
|
157
|
-
Example:
|
|
158
|
-
|
|
159
|
-
Create a single-slice series with multiple flip angles and repetition times:
|
|
160
|
-
|
|
161
|
-
>>> coords = {
|
|
162
|
-
... 'FlipAngle': [2, 15, 30],
|
|
163
|
-
... 'RepetitionTime': [2.5, 5.0, 7.5],
|
|
164
|
-
... }
|
|
165
|
-
>>> zeros = db.zeros((3,2,128,128), coords)
|
|
166
|
-
>>> print(zeros)
|
|
167
|
-
---------- SERIES --------------
|
|
168
|
-
Series 001 [New Series]
|
|
169
|
-
Nr of instances: 6
|
|
170
|
-
MRImage 000001
|
|
171
|
-
MRImage 000002
|
|
172
|
-
MRImage 000003
|
|
173
|
-
MRImage 000004
|
|
174
|
-
MRImage 000005
|
|
175
|
-
MRImage 000006
|
|
176
|
-
--------------------------------
|
|
177
|
-
|
|
178
|
-
Splitting this series by FlipAngle now creates 3 new series in the same study, with 2 images each. By default the fixed value of the splitting attribute is written in the series description:
|
|
179
|
-
|
|
180
|
-
>>> zeros_FA = zeros.split_by('FlipAngle')
|
|
181
|
-
>>> zeros.study().print()
|
|
182
|
-
---------- STUDY ---------------
|
|
183
|
-
Study New Study [None]
|
|
184
|
-
Series 001 [New Series]
|
|
185
|
-
Nr of instances: 6
|
|
186
|
-
Series 002 [New Series[FlipAngle = 2.0]]
|
|
187
|
-
Nr of instances: 2
|
|
188
|
-
Series 003 [New Series[FlipAngle = 15.0]]
|
|
189
|
-
Nr of instances: 2
|
|
190
|
-
Series 004 [New Series[FlipAngle = 30.0]]
|
|
191
|
-
Nr of instances: 2
|
|
192
|
-
--------------------------------
|
|
193
|
-
"""
|
|
194
|
-
|
|
195
|
-
self.status.message('Reading values..')
|
|
196
|
-
try:
|
|
197
|
-
values = self[keyword]
|
|
198
|
-
except:
|
|
199
|
-
msg = str(keyword) + ' is not a valid DICOM keyword'
|
|
200
|
-
raise ValueError(msg)
|
|
201
|
-
if len(values) == 1:
|
|
202
|
-
msg = 'Cannot split by ' + str(keyword) + '\n'
|
|
203
|
-
msg += 'All images have the same value'
|
|
204
|
-
raise ValueError(msg)
|
|
205
|
-
|
|
206
|
-
self.status.message('Splitting series..')
|
|
207
|
-
split_series = []
|
|
208
|
-
desc = self.instance().SeriesDescription + '[' + keyword + ' = '
|
|
209
|
-
for v in values:
|
|
210
|
-
kwargs = {keyword: v}
|
|
211
|
-
new = self.subseries(**kwargs)
|
|
212
|
-
new.SeriesDescription = desc + str(v) + ']'
|
|
213
|
-
split_series.append(new)
|
|
214
|
-
return split_series
|
|
215
|
-
|
|
216
|
-
|
|
217
|
-
def import_dicom(self, files):
|
|
218
|
-
uids = self.manager.import_datasets(files)
|
|
219
|
-
self.manager.move_to(uids, self.uid)
|
|
220
|
-
|
|
221
|
-
def slice_groups(*args, **kwargs):
|
|
222
|
-
return slice_groups(*args, **kwargs)
|
|
223
|
-
|
|
224
|
-
|
|
225
|
-
def affine_matrix(self):
|
|
226
|
-
return affine_matrix(self)
|
|
227
|
-
|
|
228
|
-
|
|
229
|
-
def ndarray(self, dims=('InstanceNumber',)) -> np.ndarray:
|
|
230
|
-
"""Return a numpy.ndarray with pixel data.
|
|
231
|
-
|
|
232
|
-
Args:
|
|
233
|
-
dims (tuple, optional): Dimensions of the result, as a tuple of valid DICOM tags of any length. Defaults to ('InstanceNumber',).
|
|
234
|
-
|
|
235
|
-
Returns:
|
|
236
|
-
np.ndarray: pixel data. The number of dimensions will be 2 plus the number of elements in dim. The first two indices will enumerate (x,y) coordinates in the slice, the other dimensions are as specified by the dims argument.
|
|
237
|
-
|
|
238
|
-
See also:
|
|
239
|
-
:func:`~set_ndarray`
|
|
240
|
-
|
|
241
|
-
Example:
|
|
242
|
-
Create a zero-filled array, describing 8 MRI slices each measured at 3 flip angles and 2 repetition times:
|
|
243
|
-
|
|
244
|
-
>>> coords = {
|
|
245
|
-
... 'SliceLocation': np.arange(8),
|
|
246
|
-
... 'FlipAngle': [2, 15, 30],
|
|
247
|
-
... 'RepetitionTime': [2.5, 5.0],
|
|
248
|
-
... }
|
|
249
|
-
>>> zeros = db.zeros((128,128,8,3,2), coords)
|
|
250
|
-
|
|
251
|
-
To retrieve the array, the dimensions need to be provided:
|
|
252
|
-
|
|
253
|
-
>>> dims = ('SliceLocation', 'FlipAngle', 'RepetitionTime')
|
|
254
|
-
>>> array = zeros.ndarray(dims)
|
|
255
|
-
>>> print(array.shape)
|
|
256
|
-
(128, 128, 8, 3, 2)
|
|
257
|
-
|
|
258
|
-
The dimensions are the keys of the coordinate dictionary, so this could also have been called as:
|
|
259
|
-
|
|
260
|
-
>>> array = zeros.ndarray(dims=tuple(coords))
|
|
261
|
-
>>> print(array.shape)
|
|
262
|
-
(128, 128, 8, 3, 2)
|
|
263
|
-
"""
|
|
264
|
-
array, _ = get_pixel_array(self, sortby=list(dims), first_volume=True, pixels_first=True)
|
|
265
|
-
return array
|
|
266
|
-
|
|
267
|
-
|
|
268
|
-
def set_ndarray(self, array:np.ndarray, dims=('InstanceNumber',), coords:dict=None):
|
|
269
|
-
"""Assign new pixel data with a new numpy.ndarray.
|
|
270
|
-
|
|
271
|
-
Args:
|
|
272
|
-
array (np.ndarray): array with new pixel data.
|
|
273
|
-
dims (tuple, optional): Dimensions of the result, as a tuple of valid DICOM tags of any length. Defaults to ('InstanceNumber',). Must be provided if coords are not given.
|
|
274
|
-
coords (dict, optional): Provide coordinates for the array explicitly, using a dictionary with dimensions as keys and as values either 1D or meshgrid arrays of coordinates. If coords are not provided, then dimensions a default range array will be used. If coordinates are provided, then the dimensions argument is ignored.
|
|
275
|
-
|
|
276
|
-
Raises:
|
|
277
|
-
ValueError: if dimensions and coordinates are both provided with incompatible dimensions.
|
|
278
|
-
|
|
279
|
-
See also:
|
|
280
|
-
:func:`~ndarray`
|
|
281
|
-
|
|
282
|
-
Warning:
|
|
283
|
-
Currently this function assumes that the new array has the same shape as the current array. This will be generalised in an upcoming update - for now please look at the pipelines examples for saving different dimensions using the current interface.
|
|
284
|
-
|
|
285
|
-
Example:
|
|
286
|
-
Create a zero-filled array, describing 8 MRI slices each measured at 3 flip angles and 2 repetition times:
|
|
287
|
-
|
|
288
|
-
>>> coords = {
|
|
289
|
-
... 'SliceLocation': np.arange(8),
|
|
290
|
-
... 'FlipAngle': [2, 15, 30],
|
|
291
|
-
... 'RepetitionTime': [2.5, 5.0],
|
|
292
|
-
... }
|
|
293
|
-
>>> series = db.zeros((128,128,8,3,2), coords)
|
|
294
|
-
|
|
295
|
-
Retrieve the array and check that it is populated with zeros:
|
|
296
|
-
|
|
297
|
-
>>> array = series.ndarray(dims=tuple(coords))
|
|
298
|
-
>>> print(np.mean(array))
|
|
299
|
-
0.0
|
|
300
|
-
|
|
301
|
-
Now overwrite the values with a new array of ones. Coordinates are not changed so only dimensions need to be specified:
|
|
302
|
-
|
|
303
|
-
>>> ones = np.ones((128,128,8,3,2))
|
|
304
|
-
>>> series.set_ndarray(ones, dims=tuple(coords))
|
|
305
|
-
|
|
306
|
-
Retrieve the array and check that it is now populated with ones:
|
|
307
|
-
|
|
308
|
-
>>> array = series.ndarray(dims=tuple(coords))
|
|
309
|
-
>>> print(np.mean(array))
|
|
310
|
-
1.0
|
|
311
|
-
"""
|
|
312
|
-
# TODO: Include a reshaping option!!!!
|
|
313
|
-
|
|
314
|
-
# TODO: set_pixel_array has **kwargs to allow setting other properties on the fly to save extra reading and writing. This makes sense but should be handled by a more general function, such as:
|
|
315
|
-
# #
|
|
316
|
-
# series.set_properties(ndarray:np.ndarray, coords:{}, affine:np.ndarray, **kwargs)
|
|
317
|
-
# #
|
|
318
|
-
|
|
319
|
-
# Lazy solution - first get the header information (slower than propagating explicitly but conceptually more convenient - can be rationalised later - pixel values can be set on the fly as the header is retrieved)
|
|
320
|
-
|
|
321
|
-
# If coordinates are provided, the dimensions are taken from that. Dimensions are not needed in this case but if they are set they need to be the same as those specified in the coordinates. Else an error is raised.
|
|
322
|
-
if coords is not None:
|
|
323
|
-
if dims != tuple(coords):
|
|
324
|
-
msg = 'Coordinates do not have the correct dimensions \n'
|
|
325
|
-
msg += 'Note: if coordinates are defined than the dimensions argument is ignored. Hence you can remove the dimensions argument in this call, or else make sure it matches up with the dimensions in coordinates.'
|
|
326
|
-
raise ValueError(msg)
|
|
327
|
-
else:
|
|
328
|
-
dims = tuple(coords)
|
|
329
|
-
_, headers = get_pixel_array(self, sortby=list(dims), first_volume=True, pixels_first=True)
|
|
330
|
-
set_pixel_array(self, array, source=headers, pixels_first=True, coords=coords)
|
|
331
|
-
|
|
332
|
-
|
|
333
|
-
#
|
|
334
|
-
# Following APIs are obsolete and will be removed in future versions
|
|
335
|
-
#
|
|
336
|
-
|
|
337
|
-
|
|
338
|
-
def array(*args, **kwargs):
|
|
339
|
-
return get_pixel_array(*args, **kwargs)
|
|
340
|
-
|
|
341
|
-
def set_array(*args, **kwargs):
|
|
342
|
-
set_pixel_array(*args, **kwargs)
|
|
343
|
-
|
|
344
|
-
def get_pixel_array(*args, **kwargs):
|
|
345
|
-
return get_pixel_array(*args, **kwargs)
|
|
346
|
-
|
|
347
|
-
def set_pixel_array(*args, **kwargs):
|
|
348
|
-
set_pixel_array(*args, **kwargs)
|
|
349
|
-
|
|
350
|
-
|
|
351
|
-
|
|
352
|
-
|
|
353
|
-
|
|
354
|
-
|
|
355
|
-
def slice_groups(series): # not yet in use
|
|
356
|
-
slice_groups = []
|
|
357
|
-
for orientation in series.ImageOrientationPatient:
|
|
358
|
-
sg = series.instances(ImageOrientationPatient=orientation)
|
|
359
|
-
slice_groups.append(sg)
|
|
360
|
-
return slice_groups
|
|
361
|
-
|
|
362
|
-
def subseries(record, move=False, **kwargs):
|
|
363
|
-
"""Extract subseries"""
|
|
364
|
-
series = record.new_sibling()
|
|
365
|
-
instances = record.instances(**kwargs)
|
|
366
|
-
for i, instance in enumerate(instances):
|
|
367
|
-
record.status.progress(i+1, len(instances), 'Extracting subseries..')
|
|
368
|
-
if move:
|
|
369
|
-
instance.move_to(series)
|
|
370
|
-
else:
|
|
371
|
-
instance.copy_to(series)
|
|
372
|
-
# This should be faster:
|
|
373
|
-
# instances = record.instances(**kwargs)
|
|
374
|
-
# series.adopt(instances)
|
|
375
|
-
return series
|
|
376
|
-
|
|
377
|
-
def read_npy(record):
|
|
378
|
-
# Not in use - loading of temporary numpy files
|
|
379
|
-
file = record.manager.npy()
|
|
380
|
-
if not os.path.exists(file):
|
|
381
|
-
return
|
|
382
|
-
with open(file, 'rb') as f:
|
|
383
|
-
array = np.load(f)
|
|
384
|
-
return array
|
|
385
|
-
|
|
386
|
-
|
|
387
|
-
def affine_matrix(series):
|
|
388
|
-
"""Returns the affine matrix of a series.
|
|
389
|
-
|
|
390
|
-
If the series consists of multiple slice groups with different
|
|
391
|
-
image orientations, then a list of affine matrices is returned,
|
|
392
|
-
one for each slice orientation.
|
|
393
|
-
"""
|
|
394
|
-
image_orientation = series.ImageOrientationPatient
|
|
395
|
-
if image_orientation is None:
|
|
396
|
-
msg = 'ImageOrientationPatient not defined in the DICOM header \n'
|
|
397
|
-
msg = 'This is a required DICOM field \n'
|
|
398
|
-
msg += 'The data may be corrupted - please check'
|
|
399
|
-
raise ValueError(msg)
|
|
400
|
-
# Multiple slice groups in series - return list of affine matrices
|
|
401
|
-
if isinstance(image_orientation[0], list):
|
|
402
|
-
affine_matrices = []
|
|
403
|
-
for dir in image_orientation:
|
|
404
|
-
slice_group = series.instances(ImageOrientationPatient=dir)
|
|
405
|
-
affine = _slice_group_affine_matrix(slice_group, dir)
|
|
406
|
-
affine_matrices.append((affine, slice_group))
|
|
407
|
-
return affine_matrices
|
|
408
|
-
# Single slice group in series - return a single affine matrix
|
|
409
|
-
else:
|
|
410
|
-
slice_group = series.instances()
|
|
411
|
-
affine = _slice_group_affine_matrix(slice_group, image_orientation)
|
|
412
|
-
return affine, slice_group
|
|
413
|
-
|
|
414
|
-
|
|
415
|
-
def _slice_group_affine_matrix(slice_group, image_orientation):
|
|
416
|
-
"""Return the affine matrix of a slice group"""
|
|
417
|
-
|
|
418
|
-
# single slice
|
|
419
|
-
if len(slice_group) == 1:
|
|
420
|
-
return slice_group[0].affine_matrix
|
|
421
|
-
# multi slice
|
|
422
|
-
else:
|
|
423
|
-
pos = [s.ImagePositionPatient for s in slice_group]
|
|
424
|
-
# Find unique elements
|
|
425
|
-
pos = [x for i, x in enumerate(pos) if i==pos.index(x)]
|
|
426
|
-
|
|
427
|
-
# One slice location
|
|
428
|
-
if len(pos) == 1:
|
|
429
|
-
return slice_group[0].affine_matrix
|
|
430
|
-
|
|
431
|
-
# Slices with different locations
|
|
432
|
-
else:
|
|
433
|
-
return image_utils.affine_matrix_multislice(
|
|
434
|
-
image_orientation, pos,
|
|
435
|
-
slice_group[0].PixelSpacing) # assume all the same pixel spacing
|
|
436
|
-
|
|
437
|
-
|
|
438
|
-
def array(record, **kwargs):
|
|
439
|
-
if isinstance(record, list): # array of instances
|
|
440
|
-
arr = np.empty(len(record), dtype=object)
|
|
441
|
-
for i, rec in enumerate(record):
|
|
442
|
-
arr[i] = rec
|
|
443
|
-
return _get_pixel_array_from_instance_array(arr, **kwargs)
|
|
444
|
-
elif isinstance(record, np.ndarray): # array of instances
|
|
445
|
-
return _get_pixel_array_from_instance_array(record, **kwargs)
|
|
446
|
-
else:
|
|
447
|
-
return get_pixel_array(record, **kwargs)
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
def get_pixel_array(record, sortby=None, first_volume=False, **kwargs):
|
|
451
|
-
"""Pixel values of the object as an ndarray
|
|
452
|
-
|
|
453
|
-
Args:
|
|
454
|
-
sortby:
|
|
455
|
-
Optional list of DICOM keywords by which the volume is sorted
|
|
456
|
-
pixels_first:
|
|
457
|
-
If True, the (x,y) dimensions are the first dimensions of the array.
|
|
458
|
-
If False, (x,y) are the last dimensions - this is the default.
|
|
459
|
-
|
|
460
|
-
Returns:
|
|
461
|
-
An ndarray holding the pixel data.
|
|
462
|
-
|
|
463
|
-
An ndarry holding the datasets (instances) of each slice.
|
|
464
|
-
|
|
465
|
-
Examples:
|
|
466
|
-
``` ruby
|
|
467
|
-
# return a 3D array (z,x,y)
|
|
468
|
-
# with the pixel data for each slice
|
|
469
|
-
# in no particular order (z)
|
|
470
|
-
array, _ = series.array()
|
|
471
|
-
|
|
472
|
-
# return a 3D array (x,y,z)
|
|
473
|
-
# with pixel data in the leading indices
|
|
474
|
-
array, _ = series.array(pixels_first = True)
|
|
475
|
-
|
|
476
|
-
# Return a 4D array (x,y,t,k) sorted by acquisition time
|
|
477
|
-
# The last dimension (k) enumerates all slices with the same acquisition time.
|
|
478
|
-
# If there is only one image for each acquision time,
|
|
479
|
-
# the last dimension is a dimension of 1
|
|
480
|
-
array, data = series.array('AcquisitionTime', pixels_first=True)
|
|
481
|
-
v = array[:,:,10,0] # First image at the 10th location
|
|
482
|
-
t = data[10,0].AcquisitionTIme # acquisition time of the same image
|
|
483
|
-
|
|
484
|
-
# Return a 4D array (loc, TI, x, y)
|
|
485
|
-
sortby = ['SliceLocation','InversionTime']
|
|
486
|
-
array, data = series.array(sortby)
|
|
487
|
-
v = array[10,6,0,:,:] # First slice at 11th slice location and 7th inversion time
|
|
488
|
-
Loc = data[10,6,0][sortby[0]] # Slice location of the same slice
|
|
489
|
-
TI = data[10,6,0][sortby[1]] # Inversion time of the same slice
|
|
490
|
-
```
|
|
491
|
-
"""
|
|
492
|
-
|
|
493
|
-
source = instance_array(record, sortby)
|
|
494
|
-
array, headers = _get_pixel_array_from_sorted_instance_array(source, **kwargs)
|
|
495
|
-
if first_volume:
|
|
496
|
-
return array[...,0], headers[...,0]
|
|
497
|
-
else:
|
|
498
|
-
return array, headers
|
|
499
|
-
|
|
500
|
-
|
|
501
|
-
def _get_pixel_array_from_instance_array(instance_array, sortby=None, **kwargs):
|
|
502
|
-
source = sort_instance_array(instance_array, sortby)
|
|
503
|
-
return _get_pixel_array_from_sorted_instance_array(source, **kwargs)
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
def _get_pixel_array_from_sorted_instance_array(source, pixels_first=False):
|
|
507
|
-
|
|
508
|
-
array = []
|
|
509
|
-
instances = source.ravel()
|
|
510
|
-
im = None
|
|
511
|
-
for i, im in enumerate(instances):
|
|
512
|
-
if im is None:
|
|
513
|
-
array.append(np.zeros((1,1)))
|
|
514
|
-
else:
|
|
515
|
-
im.progress(i+1, len(instances), 'Reading pixel data..')
|
|
516
|
-
array.append(im.get_pixel_array())
|
|
517
|
-
if im is not None:
|
|
518
|
-
im.status.hide()
|
|
519
|
-
array = _stack(array)
|
|
520
|
-
if array is None:
|
|
521
|
-
msg = 'Pixel array is empty. \n'
|
|
522
|
-
msg += 'Either because one or more of the keywords used for sorting does not exist; \n'
|
|
523
|
-
msg += 'or the series does not have any image data..'
|
|
524
|
-
raise ValueError(msg)
|
|
525
|
-
array = array.reshape(source.shape + array.shape[1:])
|
|
526
|
-
if pixels_first:
|
|
527
|
-
array = np.moveaxis(array, -1, 0)
|
|
528
|
-
array = np.moveaxis(array, -1, 0)
|
|
529
|
-
return array, source
|
|
530
|
-
|
|
531
|
-
|
|
532
|
-
def set_pixel_array(series, array, source=None, pixels_first=False, coords=None, **kwargs):
|
|
533
|
-
"""
|
|
534
|
-
Set pixel values of a series from a numpy ndarray.
|
|
535
|
-
|
|
536
|
-
Since the pixel data do not hold any information about the
|
|
537
|
-
image such as geometry, or other metainformation,
|
|
538
|
-
a dataset must be provided as well with the same
|
|
539
|
-
shape as the array except for the slice dimensions.
|
|
540
|
-
|
|
541
|
-
If a dataset is not provided, header info is
|
|
542
|
-
derived from existing instances in order.
|
|
543
|
-
|
|
544
|
-
Args:
|
|
545
|
-
array:
|
|
546
|
-
numpy ndarray with pixel data.
|
|
547
|
-
|
|
548
|
-
dataset:
|
|
549
|
-
numpy ndarray
|
|
550
|
-
|
|
551
|
-
Instances holding the header information.
|
|
552
|
-
This *must* have the same shape as array, minus the slice dimensions.
|
|
553
|
-
|
|
554
|
-
pixels_first:
|
|
555
|
-
bool
|
|
556
|
-
|
|
557
|
-
Specifies whether the pixel dimensions are the first or last dimensions of the series.
|
|
558
|
-
If not provided it is assumed the slice dimensions are the last dimensions
|
|
559
|
-
of the array.
|
|
560
|
-
|
|
561
|
-
inplace:
|
|
562
|
-
bool
|
|
563
|
-
|
|
564
|
-
If True (default) the current pixel values in the series
|
|
565
|
-
are overwritten. If set to False, the new array is added to the series.
|
|
566
|
-
|
|
567
|
-
Examples:
|
|
568
|
-
```ruby
|
|
569
|
-
# Invert all images in a series:
|
|
570
|
-
array, _ = series.array()
|
|
571
|
-
series.set_array(-array)
|
|
572
|
-
|
|
573
|
-
# Create a maximum intensity projection of the series.
|
|
574
|
-
# Header information for the result is taken from the first image.
|
|
575
|
-
# Results are saved in a new sibling series.
|
|
576
|
-
array, data = series.array()
|
|
577
|
-
array = np.amax(array, axis=0)
|
|
578
|
-
data = np.squeeze(data[0,...])
|
|
579
|
-
series.new_sibling().set_array(array, data)
|
|
580
|
-
|
|
581
|
-
# Create a 2D maximum intensity projection along the SliceLocation direction.
|
|
582
|
-
# Header information for the result is taken from the first slice location.
|
|
583
|
-
# Current data of the series are overwritten.
|
|
584
|
-
array, data = series.array('SliceLocation')
|
|
585
|
-
array = np.amax(array, axis=0)
|
|
586
|
-
data = np.squeeze(data[0,...])
|
|
587
|
-
series.set_array(array, data)
|
|
588
|
-
|
|
589
|
-
# In a series with multiple slice locations and inversion times,
|
|
590
|
-
# replace all images for each slice location with that of the shortest inversion time.
|
|
591
|
-
array, data = series.array(['SliceLocation','InversionTime'])
|
|
592
|
-
for loc in range(array.shape[0]): # loop over slice locations
|
|
593
|
-
slice0 = np.squeeze(array[loc,0,0,:,:]) # get the slice with shortest TI
|
|
594
|
-
TI0 = data[loc,0,0].InversionTime # get the TI of that slice
|
|
595
|
-
for TI in range(array.shape[1]): # loop over TIs
|
|
596
|
-
array[loc,TI,0,:,:] = slice0 # replace each slice with shortest TI
|
|
597
|
-
data[loc,TI,0].InversionTime = TI0 # replace each TI with shortest TI
|
|
598
|
-
series.set_array(array, data)
|
|
599
|
-
```
|
|
600
|
-
"""
|
|
601
|
-
|
|
602
|
-
# Move pixels to the end (default)
|
|
603
|
-
if pixels_first:
|
|
604
|
-
array = np.moveaxis(array, 0, -1)
|
|
605
|
-
array = np.moveaxis(array, 0, -1)
|
|
606
|
-
|
|
607
|
-
# If source data are provided, then coordinates are optional.
|
|
608
|
-
# If no source data are given, then coordinates MUST be defined to ensure array data can be retrieved in the proper order..
|
|
609
|
-
if source is None:
|
|
610
|
-
if coords is None:
|
|
611
|
-
if array.ndim > 4:
|
|
612
|
-
msg = 'For arrays with more than 4 dimensions, \n'
|
|
613
|
-
msg += 'either coordinate labels or headers must be provided'
|
|
614
|
-
raise ValueError(msg)
|
|
615
|
-
elif array.ndim == 4:
|
|
616
|
-
coords = {
|
|
617
|
-
'SliceLocation':np.arange(array.shape[0]),
|
|
618
|
-
'AcquisitionTime':np.arange(array.shape[1]),
|
|
619
|
-
}
|
|
620
|
-
elif array.ndim == 3:
|
|
621
|
-
coords = {
|
|
622
|
-
'SliceLocation':np.arange(array.shape[0]),
|
|
623
|
-
}
|
|
624
|
-
|
|
625
|
-
# If coordinates are given as 1D arrays, turn them into grids and flatten for iteration.
|
|
626
|
-
if coords is not None:
|
|
627
|
-
v0 = list(coords.values())[0]
|
|
628
|
-
if np.array(v0).ndim==1: # regular grid
|
|
629
|
-
pos = tuple([coords[c] for c in coords])
|
|
630
|
-
pos = np.meshgrid(*pos)
|
|
631
|
-
for i, c in enumerate(coords):
|
|
632
|
-
coords[c] = pos[i].ravel()
|
|
633
|
-
|
|
634
|
-
# if no header data are provided, use template headers.
|
|
635
|
-
nr_of_slices = int(np.prod(array.shape[:-2]))
|
|
636
|
-
if source is None:
|
|
637
|
-
source = [series.new_instance(MRImage()) for _ in range(nr_of_slices)]
|
|
638
|
-
|
|
639
|
-
# If the header data are not the same size, use only the first one.
|
|
640
|
-
else:
|
|
641
|
-
if isinstance(source, list):
|
|
642
|
-
pass
|
|
643
|
-
elif isinstance(source, np.ndarray):
|
|
644
|
-
source = source.ravel().tolist()
|
|
645
|
-
else: # assume scalar
|
|
646
|
-
source = [source] * nr_of_slices
|
|
647
|
-
if nr_of_slices != len(source):
|
|
648
|
-
source = [source[0]] * nr_of_slices
|
|
649
|
-
|
|
650
|
-
# Copy all sources to the series, if they are not part of it
|
|
651
|
-
copy_source = []
|
|
652
|
-
instances = series.instances()
|
|
653
|
-
for i, s in enumerate(source):
|
|
654
|
-
if s in instances:
|
|
655
|
-
copy_source.append(s)
|
|
656
|
-
else:
|
|
657
|
-
series.progress(i+1, len(source), 'Copying series..')
|
|
658
|
-
copy_source.append(s.copy_to(series))
|
|
659
|
-
|
|
660
|
-
# Faster but does not work if all sources are the same
|
|
661
|
-
# series.status.message('Saving array (1/2): Copying series..')
|
|
662
|
-
# instances = series.instances()
|
|
663
|
-
# to_copy = [i for i in range(len(source)) if source[i] not in instances]
|
|
664
|
-
# copied = series.adopt([source[i] for i in to_copy])
|
|
665
|
-
# for i, c in enumerate(copied):
|
|
666
|
-
# source[to_copy[i]] = c
|
|
667
|
-
|
|
668
|
-
# Flatten array for iterating
|
|
669
|
-
array = array.reshape((nr_of_slices, array.shape[-2], array.shape[-1])) # shape (i,x,y)
|
|
670
|
-
for i, image in enumerate(copy_source):
|
|
671
|
-
series.progress(i+1, len(copy_source), 'Saving array..')
|
|
672
|
-
image.read()
|
|
673
|
-
|
|
674
|
-
for attr, vals in kwargs.items():
|
|
675
|
-
if isinstance(vals, list):
|
|
676
|
-
setattr(image, attr, vals[i])
|
|
677
|
-
else:
|
|
678
|
-
setattr(image, attr, vals)
|
|
679
|
-
|
|
680
|
-
# If coordinates are provided, these will override the values from the sources.
|
|
681
|
-
if coords is not None: # ADDED 31/05/2023
|
|
682
|
-
for c in coords:
|
|
683
|
-
image[c] = coords[c][i]
|
|
684
|
-
image.set_pixel_array(array[i,...])
|
|
685
|
-
image.clear()
|
|
686
|
-
|
|
687
|
-
|
|
688
|
-
|
|
689
|
-
# More compact but does not work with pause extensions
|
|
690
|
-
# for i, s in enumerate(source):
|
|
691
|
-
# series.status.progress(i+1, len(source), 'Writing array..')
|
|
692
|
-
# if s not in instances:
|
|
693
|
-
# s.copy_to(series).set_pixel_array(array[i,...])
|
|
694
|
-
# else:
|
|
695
|
-
# s.set_pixel_array(array[i,...])
|
|
696
|
-
|
|
697
|
-
|
|
698
|
-
|
|
699
|
-
|
|
700
|
-
|
|
701
|
-
|
|
702
|
-
##
|
|
703
|
-
## Helper functions
|
|
704
|
-
##
|
|
705
|
-
|
|
706
|
-
def sort_instance_array(instance_array, sortby=None, status=True):
|
|
707
|
-
if sortby is None:
|
|
708
|
-
return instance_array
|
|
709
|
-
else:
|
|
710
|
-
if not isinstance(sortby, list):
|
|
711
|
-
sortby = [sortby]
|
|
712
|
-
df = read_dataframe_from_instance_array(instance_array, sortby + ['SOPInstanceUID'])
|
|
713
|
-
df.sort_values(sortby, inplace=True)
|
|
714
|
-
return df_to_sorted_instance_array(instance_array[0], df, sortby, status=status)
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
def instance_array(record, sortby=None, status=True):
|
|
718
|
-
"""Sort instances by a list of attributes.
|
|
719
|
-
|
|
720
|
-
Args:
|
|
721
|
-
sortby:
|
|
722
|
-
List of DICOM keywords by which the series is sorted
|
|
723
|
-
Returns:
|
|
724
|
-
An ndarray holding the instances sorted by sortby.
|
|
725
|
-
"""
|
|
726
|
-
if sortby is None:
|
|
727
|
-
instances = record.instances()
|
|
728
|
-
array = np.empty(len(instances), dtype=object)
|
|
729
|
-
for i, instance in enumerate(instances):
|
|
730
|
-
array[i] = instance
|
|
731
|
-
return array
|
|
732
|
-
else:
|
|
733
|
-
if not isinstance(sortby, list):
|
|
734
|
-
sortby = [sortby]
|
|
735
|
-
df = record.read_dataframe(sortby + ['SOPInstanceUID'])
|
|
736
|
-
df.sort_values(sortby, inplace=True)
|
|
737
|
-
return df_to_sorted_instance_array(record, df, sortby, status=status)
|
|
738
|
-
|
|
739
|
-
|
|
740
|
-
def df_to_sorted_instance_array(record, df, sortby, status=True):
|
|
741
|
-
# note record here only passed for access to the function instance() and progress()
|
|
742
|
-
# This really should be db.instance()
|
|
743
|
-
|
|
744
|
-
data = []
|
|
745
|
-
vals = df[sortby[0]].unique()
|
|
746
|
-
for i, c in enumerate(vals):
|
|
747
|
-
if status:
|
|
748
|
-
record.progress(i, len(vals), message='Sorting pixel data..')
|
|
749
|
-
# if a type is not supported by np.isnan()
|
|
750
|
-
# assume it is not a nan
|
|
751
|
-
try:
|
|
752
|
-
nan = np.isnan(c)
|
|
753
|
-
except:
|
|
754
|
-
nan = False
|
|
755
|
-
if nan:
|
|
756
|
-
dfc = df[df[sortby[0]].isnull()]
|
|
757
|
-
else:
|
|
758
|
-
dfc = df[df[sortby[0]] == c]
|
|
759
|
-
if len(sortby) == 1:
|
|
760
|
-
datac = df_to_instance_array(record, dfc)
|
|
761
|
-
else:
|
|
762
|
-
datac = df_to_sorted_instance_array(record, dfc, sortby[1:], status=False)
|
|
763
|
-
data.append(datac)
|
|
764
|
-
return _stack(data, align_left=True)
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
def df_to_instance_array(record, df):
|
|
768
|
-
"""Return datasets as numpy array of object type"""
|
|
769
|
-
|
|
770
|
-
data = np.empty(df.shape[0], dtype=object)
|
|
771
|
-
for i, item in enumerate(df.SOPInstanceUID.items()):
|
|
772
|
-
data[i] = record.instance(key=item[0])
|
|
773
|
-
return data
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
def _stack(arrays, align_left=False):
|
|
777
|
-
"""Stack a list of arrays of different shapes but same number of dimensions.
|
|
778
|
-
|
|
779
|
-
This generalises numpy.stack to arrays of different sizes.
|
|
780
|
-
The stack has the size of the largest array.
|
|
781
|
-
If an array is smaller it is zero-padded and centred on the middle.
|
|
782
|
-
None items are removed first before stacking
|
|
783
|
-
"""
|
|
784
|
-
|
|
785
|
-
# Get the dimensions of the stack
|
|
786
|
-
# For each dimension, look for the largest values across all arrays
|
|
787
|
-
arrays = [a for a in arrays if a is not None]
|
|
788
|
-
if arrays == []:
|
|
789
|
-
return
|
|
790
|
-
ndim = len(arrays[0].shape)
|
|
791
|
-
dim = [0] * ndim
|
|
792
|
-
for array in arrays:
|
|
793
|
-
for i, d in enumerate(dim):
|
|
794
|
-
dim[i] = max((d, array.shape[i])) # changing the variable we are iterating over!!
|
|
795
|
-
# for i in range(ndim):
|
|
796
|
-
# dim[i] = max((dim[i], array.shape[i]))
|
|
797
|
-
|
|
798
|
-
# Create the stack
|
|
799
|
-
# Add one dimension corresponding to the size of the stack
|
|
800
|
-
n = len(arrays)
|
|
801
|
-
#stack = np.full([n] + dim, 0, dtype=arrays[0].dtype)
|
|
802
|
-
stack = np.full([n] + dim, None, dtype=arrays[0].dtype)
|
|
803
|
-
|
|
804
|
-
for k, array in enumerate(arrays):
|
|
805
|
-
index = [k]
|
|
806
|
-
for i, d in enumerate(dim):
|
|
807
|
-
if align_left:
|
|
808
|
-
i0 = 0
|
|
809
|
-
else: # align center and zero-pad missing values
|
|
810
|
-
i0 = math.floor((d-array.shape[i])/2)
|
|
811
|
-
i1 = i0 + array.shape[i]
|
|
812
|
-
index.append(slice(i0,i1))
|
|
813
|
-
stack[tuple(index)] = array
|
|
814
|
-
|
|
815
|
-
return stack
|
|
816
|
-
|