dbdicom 0.3.8__py3-none-any.whl → 0.3.10__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of dbdicom might be problematic. Click here for more details.

dbdicom/register.py CHANGED
@@ -96,6 +96,27 @@ def index(dbtree, entity):
96
96
  if sr['SeriesInstanceUID'] == series_uid:
97
97
  return list(sr['instances'].values())
98
98
 
99
+ def remove(dbtree, entity):
100
+ if len(entity)==2:
101
+ patient_id = entity[1]
102
+ for pt in sorted(dbtree, key=lambda pt: pt['PatientID']):
103
+ if pt['PatientID'] == patient_id:
104
+ dbtree.remove(pt)
105
+ elif len(entity)==3:
106
+ study_uid = uid(dbtree, entity)
107
+ for pt in sorted(dbtree, key=lambda pt: pt['PatientID']):
108
+ for st in sorted(pt['studies'], key=lambda st: st['StudyInstanceUID']):
109
+ if st['StudyInstanceUID'] == study_uid:
110
+ pt['studies'].remove(st)
111
+ elif len(entity)==4:
112
+ series_uid = uid(dbtree, entity)
113
+ for pt in sorted(dbtree, key=lambda pt: pt['PatientID']):
114
+ for st in sorted(pt['studies'], key=lambda st: st['StudyInstanceUID']):
115
+ for sr in sorted(st['series'], key=lambda sr: sr['SeriesNumber']):
116
+ if sr['SeriesInstanceUID'] == series_uid:
117
+ st['series'].remove(sr)
118
+ return dbtree
119
+
99
120
 
100
121
  def drop(dbtree, relpaths):
101
122
  for pt in sorted(dbtree[:], key=lambda pt: pt['PatientID']):
@@ -1,163 +1,218 @@
1
1
  # Coded version of DICOM file 'C:\Users\steve\Dropbox\Software\QIB-Sheffield\dbdicom\tests\data\MULTIFRAME\IM_0010'
2
2
  # Produced by pydicom codify utility script
3
- from datetime import datetime
4
- from datetime import timedelta
3
+
4
+ import datetime
5
5
 
6
6
  import numpy as np
7
+ import vreg
8
+
7
9
  import pydicom
8
- from pydicom.dataset import Dataset, FileDataset, FileMetaDataset
10
+ from pydicom.dataset import Dataset, FileMetaDataset
9
11
  from pydicom.sequence import Sequence
10
- from pydicom.uid import (
11
- generate_uid,
12
- MRImageStorage,
13
- EnhancedMRImageStorage,
14
- ExplicitVRLittleEndian
15
- )
12
+ from pydicom.uid import ExplicitVRLittleEndian, generate_uid
16
13
 
14
+ import dbdicom.utils.image as image_utils
15
+ from dbdicom.utils.pydicom_dataset import set_values, get_values
17
16
 
18
- from dbdicom.utils import image
19
17
 
18
+ def from_volume(vol:vreg.Volume3D):
19
+ """
20
+ Build an Enhanced MR Image DICOM dataset from N+3D array.
20
21
 
21
- import numpy as np
22
- import pydicom
23
- from pydicom.dataset import Dataset, FileDataset, FileMetaDataset
24
- from pydicom.sequence import Sequence
25
- from pydicom.uid import ExplicitVRLittleEndian, EnhancedMRImageStorage, generate_uid
26
- from datetime import datetime, timedelta
27
-
28
-
29
- def create_5d_enhanced_mr_dataset(
30
- time_points=20, flip_angles=10, slices=4, rows=128, cols=192
31
- ):
32
- total_frames = time_points * flip_angles * slices
33
- now = datetime.now()
34
-
35
- # File Meta Info
36
- file_meta = FileMetaDataset()
37
- file_meta.MediaStorageSOPClassUID = EnhancedMRImageStorage
38
- file_meta.MediaStorageSOPInstanceUID = generate_uid()
39
- file_meta.ImplementationClassUID = generate_uid()
40
- file_meta.TransferSyntaxUID = ExplicitVRLittleEndian
41
-
42
- # Create FileDataset
43
- ds = FileDataset(
44
- filename_or_obj=None,
45
- dataset=Dataset(),
46
- file_meta=file_meta,
47
- preamble=b"\0" * 128,
48
- )
22
+ Parameters
23
+ ----------
24
+ vol: vreg Volume3D
25
+
26
+ Returns
27
+ -------
28
+ pydicom dataset
29
+ """
30
+
31
+ # Flatten frames
32
+ frames = vol.values.reshape(vol.shape[:2] + (-1,))
33
+ geom = image_utils.dismantle_affine_matrix(vol.affine)
34
+
35
+ # --- FileDataset ---
36
+ ds = Dataset()
37
+
38
+ # File Meta
39
+ ds.file_meta = FileMetaDataset()
40
+ ds.file_meta.TransferSyntaxUID = ExplicitVRLittleEndian
41
+ ds.file_meta.MediaStorageSOPClassUID = "1.2.840.10008.5.1.4.1.1.4.1" # Enhanced MR
49
42
 
50
43
  ds.is_little_endian = True
51
44
  ds.is_implicit_VR = False
45
+ ds.SOPClassUID = ds.file_meta.MediaStorageSOPClassUID
46
+ ds.SOPInstanceUID = generate_uid()
52
47
 
53
- # Identification
54
- ds.SOPClassUID = EnhancedMRImageStorage
55
- ds.SOPInstanceUID = file_meta.MediaStorageSOPInstanceUID
56
- ds.PatientName = "FiveD^Phantom"
57
- ds.PatientID = "555555"
58
- ds.StudyInstanceUID = generate_uid()
48
+ # Study/Series
59
49
  ds.SeriesInstanceUID = generate_uid()
60
- ds.StudyDate = now.strftime("%Y%m%d")
61
- ds.StudyTime = now.strftime("%H%M%S")
50
+ ds.StudyInstanceUID = generate_uid()
51
+ ds.FrameOfReferenceUID = generate_uid()
62
52
  ds.Modality = "MR"
63
- ds.Manufacturer = "PythonPACS"
64
- ds.StudyID = "1"
65
- ds.SeriesNumber = "1"
66
- ds.InstanceNumber = "1"
67
-
68
- # Image Dimensions
69
- ds.Rows = rows
70
- ds.Columns = cols
71
- ds.NumberOfFrames = str(total_frames)
53
+ ds.PatientName = "Test^Patient"
54
+ ds.PatientID = "123456"
55
+ ds.StudyDate = datetime.date.today().strftime("%Y%m%d")
56
+ ds.StudyTime = datetime.datetime.now().strftime("%H%M%S")
57
+
58
+ # Image attributes
59
+ ds.Columns = vol.shape[0]
60
+ ds.Rows = vol.shape[1]
61
+ ds.NumberOfFrames = np.prod(vol.shape[2:])
72
62
  ds.SamplesPerPixel = 1
73
63
  ds.PhotometricInterpretation = "MONOCHROME2"
74
64
  ds.BitsAllocated = 16
75
- ds.BitsStored = 12
76
- ds.HighBit = 11
77
- ds.PixelRepresentation = 0
78
- ds.PixelSpacing = [1.0, 1.0]
79
- ds.SliceThickness = 1.0
80
- ds.FrameOfReferenceUID = generate_uid()
65
+ ds.BitsStored = 16
66
+ ds.HighBit = 15
67
+ ds.PixelRepresentation = 1
68
+ ds.PixelSpacing = list(vol.spacing[:2])
69
+ ds.SliceThickness = vol.spacing[2]
81
70
 
82
- # Dummy pixel data
83
- pixel_array = np.zeros((total_frames, rows, cols), dtype=np.uint16)
84
- ds.PixelData = pixel_array.tobytes()
71
+ # Dimensions
72
+ ds.DimensionOrganizationSequence = Sequence([Dataset()])
73
+ ds.DimensionOrganizationSequence[0].DimensionOrganizationUID = generate_uid()
74
+ ds.DimensionIndexSequence = Sequence()
75
+ for axis in ['SliceLocation'] + vol.dims:
76
+ axis_dimension_item = Dataset()
77
+ axis_dimension_item.DimensionIndexPointer = pydicom.tag.Tag(axis)
78
+ axis_dimension_item.DimensionDescriptionLabel = axis
79
+ ds.DimensionIndexSequence.append(axis_dimension_item)
85
80
 
86
81
  # Shared Functional Groups
87
- shared_fg = Dataset()
88
- pix_meas = Dataset()
89
- pix_meas.PixelSpacing = ds.PixelSpacing
90
- pix_meas.SliceThickness = ds.SliceThickness
91
- shared_fg.PixelMeasuresSequence = [pix_meas]
92
- ds.SharedFunctionalGroupsSequence = [shared_fg]
93
-
94
- # Dimension Organization
95
- dim_org_uid = generate_uid()
96
- ds.DimensionOrganizationSequence = Sequence([
97
- Dataset()
98
- ])
99
- ds.DimensionOrganizationSequence[0].DimensionOrganizationUID = dim_org_uid
82
+ ds.SharedFunctionalGroupsSequence = [Dataset()]
83
+ ds.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence = [Dataset()]
84
+ ds.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].PixelSpacing = ds.PixelSpacing
85
+ ds.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].SliceThickness = ds.SliceThickness
86
+ ds.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0].SpacingBetweenSlices = ds.SliceThickness
87
+ ds.SharedFunctionalGroupsSequence[0].PlaneOrientationSequence = [Dataset()]
88
+ ds.SharedFunctionalGroupsSequence[0].PlaneOrientationSequence[0].ImageOrientationPatient = geom['ImageOrientationPatient']
89
+
90
+ # Per-frame Functional Groups
91
+ PerFrameFunctionalGroupsSequence = []
92
+
93
+ for flat_index in range(frames.shape[-1]):
94
+ frame_ds = Dataset()
95
+ vol_idx, slice_idx = divmod(flat_index, vol.shape[2])
96
+ indices = np.unravel_index(vol_idx, vol.shape[3:])
97
+ dim_values = [i + 1 for i in indices]
98
+
99
+ # Frame content
100
+ frame_ds.FrameContentSequence = [Dataset()]
101
+ frame_ds.FrameContentSequence[0].DimensionIndexValues = dim_values
102
+
103
+ # Plane position
104
+ frame_ds.PlanePositionSequence = [Dataset()]
105
+ frame_ds.PlanePositionSequence[0].ImagePositionPatient = list(np.array(geom['ImagePositionPatient']) + slice_idx * vol.spacing[2] * np.array(geom['slice_cosine']))
106
+
107
+ # Plane orientation
108
+ frame_ds.PlaneOrientationSequence = [Dataset()]
109
+ frame_ds.PlaneOrientationSequence[0].ImageOrientationPatient = geom['ImageOrientationPatient']
110
+
111
+ # Assign parameters using dims as DICOM keywords
112
+ for ax_i, axis in enumerate(vol.dims):
113
+ val = vol.coords[ax_i][indices]
114
+
115
+ sequence, attr = axis.split("/")
116
+ if not hasattr(frame_ds, sequence):
117
+ setattr(frame_ds, sequence, [Dataset()])
118
+ sequence_ds = getattr(frame_ds, sequence)[0]
119
+ set_values(sequence_ds, attr, val)
120
+
121
+ # Frame anatomy & type
122
+ frame_ds.FrameAnatomySequence = [Dataset()]
123
+ frame_ds.FrameAnatomySequence[0].AnatomicRegionSequence = [Dataset()]
124
+ frame_ds.FrameAnatomySequence[0].AnatomicRegionSequence[0].CodeValue = "12738006"
125
+ frame_ds.FrameAnatomySequence[0].AnatomicRegionSequence[0].CodingSchemeDesignator = "SCT"
126
+ frame_ds.FrameAnatomySequence[0].AnatomicRegionSequence[0].CodeMeaning = "Brain"
127
+
128
+ frame_ds.MRImageFrameTypeSequence = [Dataset()]
129
+ frame_ds.MRImageFrameTypeSequence[0].FrameType = ["ORIGINAL", "PRIMARY", "M", "NONE"]
130
+
131
+ # Acquisition datetime
132
+ frame_ds.FrameAcquisitionDateTime = (
133
+ datetime.datetime.now() + datetime.timedelta(seconds=flat_index)
134
+ ).strftime("%Y%m%d%H%M%S.%f")
135
+
136
+ PerFrameFunctionalGroupsSequence.append(frame_ds)
137
+
138
+ ds.PerFrameFunctionalGroupsSequence = PerFrameFunctionalGroupsSequence
139
+
140
+ # Pixel Data
141
+ ds.PixelData = b"".join([f.tobytes() for f in frames])
100
142
 
101
- ds.DimensionIndexSequence = Sequence()
143
+ return ds
102
144
 
103
- # Time dimension
104
- temporal = Dataset()
105
- temporal.DimensionOrganizationUID = dim_org_uid
106
- temporal.DimensionIndexPointer = 0x00209164 # TemporalPositionIndex
107
- temporal.FunctionalGroupPointer = 0x00209113 # TemporalPositionSequence
108
- ds.DimensionIndexSequence.append(temporal)
109
-
110
- # Flip angle dimension
111
- flip = Dataset()
112
- flip.DimensionOrganizationUID = dim_org_uid
113
- flip.DimensionIndexPointer = 0x00181314 # FlipAngle
114
- flip.FunctionalGroupPointer = 0x00189105 # MRImagingModifierSequence
115
- ds.DimensionIndexSequence.append(flip)
116
-
117
- # Slice position
118
- slice_dim = Dataset()
119
- slice_dim.DimensionOrganizationUID = dim_org_uid
120
- slice_dim.DimensionIndexPointer = 0x00200032 # ImagePositionPatient
121
- slice_dim.FunctionalGroupPointer = 0x00209113 # PlanePositionSequence
122
- ds.DimensionIndexSequence.append(slice_dim)
123
-
124
- # Per-Frame Functional Groups
125
- per_frame_seq = []
126
-
127
- base_time = now
128
- flip_angle_values = np.linspace(5, 50, flip_angles) # Example flip angles
129
-
130
- for t in range(time_points):
131
- for f in range(flip_angles):
132
- for z in range(slices):
133
- frame = Dataset()
134
-
135
- # Frame content
136
- fc = Dataset()
137
- fc.FrameAcquisitionNumber = len(per_frame_seq)
138
- fc.AcquisitionTime = (base_time + timedelta(seconds=t)).strftime("%H%M%S.%f")[:13]
139
- frame.FrameContentSequence = [fc]
140
-
141
- # Temporal position
142
- tp = Dataset()
143
- tp.TemporalPositionIndex = t + 1
144
- frame.TemporalPositionSequence = [tp]
145
-
146
- # Flip angle
147
- fa = Dataset()
148
- fa.FlipAngle = float(flip_angle_values[f])
149
- frame.MRImagingModifierSequence = [fa]
150
-
151
- # Slice position
152
- pos = Dataset()
153
- pos.ImagePositionPatient = [0.0, 0.0, float(z)]
154
- frame.PlanePositionSequence = [pos]
155
-
156
- per_frame_seq.append(frame)
157
-
158
- ds.PerFrameFunctionalGroupsSequence = Sequence(per_frame_seq)
159
145
 
160
- return ds
146
+
147
+ # THIS NEEDS DEBUGGING
148
+ def to_volume(ds):
149
+ """
150
+ Write an Enhanced MR Image DICOM from N+3D array.
151
+
152
+ Parameters
153
+ ----------
154
+ ds: pydicom Dataset
155
+
156
+ Returns
157
+ -------
158
+ vreg Volume3D
159
+ """
160
+ values = pixel_data(ds).T # need reshape
161
+ dims = [item.DimensionDescriptionLabel
162
+ for item in ds.DimensionIndexSequence[1:]] # handle slice location
163
+ affine = image_utils.affine_matrix(
164
+ get_values(ds.SharedFunctionalGroupsSequence[0].PlaneOrientationSequence[0], 'ImageOrientationPatient'),
165
+ get_values(ds.SharedFunctionalGroupsSequence[0].PlanePositionSequence[0], 'ImagePositionPatient'),
166
+ get_values(ds.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0], 'PixelSpacing'),
167
+ get_values(ds.SharedFunctionalGroupsSequence[0].PixelMeasuresSequence[0], 'SliceThickness'), # derive from slice_loc in per-frame
168
+ )
169
+ coords = np.zeros((len(dims), ds.NumberOfFrames))
170
+ for d, dim in enumerate(dims):
171
+ for flat_index in range(ds.NumberOfFrames):
172
+ found_val = False
173
+ frame_ds = ds.PerFrameFunctionalGroupsSequence[flat_index]
174
+ for sequence in frame_ds:
175
+ if hasattr(sequence[0], dim):
176
+ coords[d, flat_index] = get_values(sequence[0], dim)
177
+ found_val=True
178
+ break
179
+ if not found_val:
180
+ raise ValueError(f"Dimension {dim} not found in frame {flat_index}")
181
+ shape = [len(np.unique(coords[d,:])) for d in range(len(dims))]
182
+ if np.prod(shape) == ds.NumberOfFrames:
183
+ values = values.reshape(values.shape[:2] + tuple(shape))
184
+ else:
185
+ values = values.reshape(values.shape[:2] + (1, ds.NumberOfFrames) )
186
+
187
+ return vreg.volume(values, affine, coords, dims)
188
+
189
+
190
+
191
+
192
+ def pixel_data(ds):
193
+ """Read the pixel array from an MR image"""
194
+
195
+ array = ds.pixel_array
196
+ array = array.astype(np.float32)
197
+ if [0x2005, 0x100E] in ds: # 'Philips Rescale Slope'
198
+ slope = ds[(0x2005, 0x100E)].value
199
+ intercept = ds[(0x2005, 0x100D)].value
200
+ if (intercept == 0) and (slope == 1):
201
+ array = array.astype(np.int16)
202
+ else:
203
+ array = array.astype(np.float32)
204
+ array -= intercept
205
+ array /= slope
206
+ else:
207
+ slope = float(getattr(ds, 'RescaleSlope', 1))
208
+ intercept = float(getattr(ds, 'RescaleIntercept', 0))
209
+ if (intercept == 0) and (slope == 1):
210
+ array = array.astype(np.int16)
211
+ else:
212
+ array = array.astype(np.float32)
213
+ array *= slope
214
+ array += intercept
215
+ return np.transpose(array)
161
216
 
162
217
 
163
218
 
@@ -624,139 +679,3 @@ def ukrin_maps_per_frame_functional_group():
624
679
 
625
680
  return ds
626
681
 
627
- def get_window(ds):
628
- """Centre and width of the pixel data after applying rescale slope and intercept.
629
-
630
- In this case retrieve the centre and width values of the first frame
631
- NOT In USE
632
- """
633
-
634
- centre = ds.PerFrameFunctionalGroupsSequence[0].FrameVOILUTSequence[0].WindowCenter
635
- width = ds.PerFrameFunctionalGroupsSequence[0].FrameVOILUTSequence[0].WindowWidth
636
- if centre is None or width is None:
637
- array = ds.get_pixel_array()
638
- if centre is None:
639
- centre = np.median(array)
640
- if width is None:
641
- p = np.percentile(array, [25, 75])
642
- width = p[1] - p[0]
643
-
644
- return centre, width
645
-
646
- def get_pixel_array(ds):
647
-
648
- array = ds.pixel_array.astype(np.float32)
649
- frames = ds.PerFrameFunctionalGroupsSequence
650
- for index, frame in enumerate(frames):
651
- slice = np.squeeze(array[index, ...])
652
- if [0x2005, 0x100E] in ds: # 'Philips Rescale Slope'
653
- slope = ds[(0x2005, 0x100E)].value
654
- intercept = ds[(0x2005, 0x100D)].value
655
- slice = (slice - intercept) / slope
656
- else:
657
- transform = frame.PixelValueTransformationSequence[0]
658
- slope = float(getattr(transform, 'RescaleSlope', 1))
659
- intercept = float(getattr(transform, 'RescaleIntercept', 0))
660
- slice = slice * slope + intercept
661
- array[index, ...] = np.transpose(slice)
662
-
663
- return array
664
-
665
-
666
- def set_pixel_array(ds, array, value_range=None):
667
-
668
- if (0x2005, 0x100E) in ds:
669
- del ds[0x2005, 0x100E] # Delete 'Philips Rescale Slope'
670
- if (0x2005, 0x100D) in ds:
671
- del ds[0x2005, 0x100D]
672
-
673
- array = image.clip(array, value_range=value_range)
674
- array, slope, intercept = image.scale_to_range(array, ds.BitsAllocated)
675
- array = np.transpose(array, (0, 2, 1))
676
-
677
- maximum = np.amax(array)
678
- minimum = np.amin(array)
679
- shape = np.shape(array)
680
-
681
- ds.NumberOfFrames = np.shape(array)[0]
682
- del ds.PerFrameFunctionalGroupsSequence[ds.NumberOfFrames:]
683
-
684
- ds.PixelRepresentation = 0
685
- ds.SmallestImagePixelValue = int(maximum)
686
- ds.LargestImagePixelValue = int(minimum)
687
- ds.RescaleSlope = 1 / slope
688
- ds.RescaleIntercept = - intercept / slope
689
- ds.WindowCenter = (maximum + minimum) / 2
690
- ds.WindowWidth = maximum - minimum
691
- ds.Rows = shape[0]
692
- ds.Columns = shape[1]
693
- ds.PixelData = array.tobytes()
694
-
695
-
696
- def image_type(ds):
697
- """Determine if a dataset is Magnitude, Phase, Real or Imaginary"""
698
-
699
- image_type = []
700
- for slice in ds.PerFrameFunctionalGroupsSequence:
701
- sequence = slice.MRImageFrameTypeSequence[0]
702
-
703
- if hasattr(sequence, 'FrameType'):
704
- type = set(sequence.FrameType)
705
- if set(['M', 'MAGNITUDE']).intersection(type):
706
- image_type.append('MAGNITUDE')
707
- elif set(['P', 'PHASE']).intersection(type):
708
- image_type.append('PHASE')
709
- elif set(['R', 'REAL']).intersection(type):
710
- image_type.append('REAL')
711
- elif set(['I', 'IMAGINARY']).intersection(type):
712
- image_type.append('IMAGINARY')
713
- elif hasattr(sequence, 'ComplexImageComponent'):
714
- type = set(sequence.ComplexImageComponent)
715
- if set(['M', 'MAGNITUDE']).intersection(type):
716
- image_type.append('MAGNITUDE')
717
- elif set(['P', 'PHASE']).intersection(type):
718
- image_type.append('PHASE')
719
- elif set(['R', 'REAL']).intersection(type):
720
- image_type.append('REAL')
721
- elif set(['I', 'IMAGINARY']).intersection(type):
722
- image_type.append('IMAGINARY')
723
- else:
724
- image_type.append('UNKNOWN')
725
-
726
- return image_type
727
-
728
-
729
- def signal_type(ds):
730
- """Determine if an image is Water, Fat, In-Phase, Out-phase image or None"""
731
-
732
- signal_type = []
733
- for slice in ds.PerFrameFunctionalGroupsSequence:
734
- sequence = slice.MRImageFrameTypeSequence[0]
735
-
736
- if hasattr(sequence, 'FrameType'):
737
- type = set(sequence.FrameType)
738
- if set(['W', 'WATER']).intersection(type):
739
- signal_type.append('WATER')
740
- elif set(['F', 'FAT']).intersection(type):
741
- signal_type.append('FAT')
742
- elif set(['IP', 'IN_PHASE']).intersection(type):
743
- signal_type.append('IN-PHASE')
744
- elif set(['OP', 'OUT_PHASE']).intersection(type):
745
- signal_type.append('OP-PHASE')
746
- else:
747
- signal_type.append('UNKNOWN')
748
-
749
- return signal_type
750
-
751
- def get_affine_matrix(ds):
752
- """Affine transformation matrix for all images in a multiframe image"""
753
-
754
- affineList = []
755
- for frame in ds.PerFrameFunctionalGroupsSequence:
756
- affine = image.affine_matrix(
757
- frame.PlaneOrientationSequence[0].ImageOrientationPatient,
758
- frame.PlanePositionSequence[0].ImagePositionPatient,
759
- frame.PixelMeasuresSequence[0].PixelSpacing,
760
- frame.PixelMeasuresSequence[0].SliceThickness)
761
- affineList.append(affine)
762
- return np.squeeze(np.array(affineList))
dbdicom/utils/arrays.py CHANGED
@@ -1,40 +1,128 @@
1
1
  import numpy as np
2
2
 
3
+ from typing import List, Tuple
3
4
 
4
- def meshvals(coords):
5
- # Input array shape: (d, f) with d = nr of dims and f = nr of frames
6
- # Output array shape: (d, f1,..., fd)
7
- if coords.size == 0:
8
- return np.array([])
9
- # Sort by column
10
- sorted_indices = np.lexsort(coords[::-1])
11
- sorted_array = coords[:, sorted_indices]
12
- # Find shape
13
- shape = _mesh_shape(sorted_array)
14
- # Reshape
15
- mesh_array = sorted_array.reshape(shape)
16
- return mesh_array, sorted_indices
17
-
18
-
19
- def _mesh_shape(sorted_array):
20
-
21
- nd = np.unique(sorted_array[0,:]).size
22
- shape = (sorted_array.shape[0], nd)
23
-
24
- for dim in range(1,shape[0]):
25
- shape_dim = (shape[0], np.prod(shape[1:]), -1)
26
- sorted_array = sorted_array.reshape(shape_dim)
27
- nd = [np.unique(sorted_array[dim,d,:]).size for d in range(shape_dim[1])]
28
- shape = shape + (max(nd),)
29
-
30
- if np.prod(shape) != sorted_array.size:
5
+
6
+ def meshvals(arrays) -> Tuple[List[np.ndarray], np.ndarray]:
7
+ """
8
+ Lexicographically sort flattened N coordinate arrays and reshape back to inferred grid shape,
9
+ preserving original type of each input array.
10
+
11
+ Parameters
12
+ ----------
13
+ *arrays : array-like
14
+ Flattened coordinate arrays of the same length. Can be numbers, strings, or list objects.
15
+
16
+ Returns
17
+ -------
18
+ sorted_arrays : list[np.ndarray]
19
+ Coordinate arrays reshaped to inferred N-D grid shape, dtype/type preserved.
20
+ indices : np.ndarray
21
+ Permutation indices applied to the flattened arrays.
22
+ shape : tuple[int, ...]
23
+ Inferred grid shape (number of unique values per axis).
24
+ """
25
+ # Remember original type/dtype for each array
26
+ orig_types = [a.dtype if isinstance(a[0], np.ndarray) else type(a[0]) for a in arrays]
27
+
28
+ # Convert non arrays to object arrays
29
+ arrs = []
30
+ for a in arrays:
31
+ arrs_a = np.empty(len(a), dtype=object)
32
+ arrs_a[:] = a
33
+ arrs.append(arrs_a)
34
+
35
+ # Stack arrays as columns (M x N)
36
+ coords = np.stack(arrs, axis=1)
37
+
38
+ # Lexicographic sort using structured array
39
+ indices = np.lexsort(coords.T[::-1])
40
+ sorted_coords = coords[indices]
41
+
42
+ # Check that all coordinates are unique
43
+ points = [tuple(col) for col in sorted_coords]
44
+ if not all_elements_unique(points):
31
45
  raise ValueError(
32
- 'Improper dimensions for the series. This usually means '
33
- 'that there are multiple images at the same location, \n or that '
34
- 'there are no images at one or more locations. \n\n'
35
- 'Make sure to specify proper dimensions when reading a pixel array or volume. \n'
36
- 'If the default dimensions of pixel_array (InstanceNumber) generate this error, '
37
- 'the DICOM data may be corrupted.'
38
- )
39
-
40
- return shape
46
+ f"Improper coordinates. Coordinate values are not unique."
47
+ )
48
+
49
+ # Infer shape from unique values per axis
50
+ shape = tuple(len(np.unique(sorted_coords[:, i])) for i in range(sorted_coords.shape[1]))
51
+
52
+ # Check perfect grid
53
+ if np.prod(shape) != sorted_coords.shape[0]:
54
+ raise ValueError(
55
+ f"Coordinates do not form a perfect Cartesian grid: inferred shape {shape} "
56
+ f"does not match number of points {sorted_coords.shape[0]}"
57
+ )
58
+
59
+ # Split back into individual arrays and cast to original type
60
+ sorted_arrays = []
61
+ for i, orig_type in enumerate(orig_types):
62
+ arr = sorted_coords[:, i]
63
+ arr = arr.astype(orig_type).reshape(shape)
64
+ sorted_arrays.append(arr)
65
+
66
+ return sorted_arrays, indices
67
+
68
+
69
+ def all_elements_unique(items):
70
+ """
71
+ The most general uniqueness check, but also the slowest (O(n^2)).
72
+
73
+ It works for ANY type that supports equality checking (==), including
74
+ lists, dicts, and custom objects, without requiring them to be hashable.
75
+ """
76
+ for i in range(len(items)):
77
+ for j in range(i + 1, len(items)):
78
+ if items[i] == items[j]:
79
+ return False
80
+ return True
81
+
82
+
83
+
84
+ # def NEWmeshvals(coords):
85
+ # stack_coords = [np.array(c, dtype=object) for c in coords]
86
+ # stack_coords = np.stack(stack_coords)
87
+ # mesh_coords, sorted_indices = _meshvals(stack_coords)
88
+ # mesh_coords = [mesh_coords[d,...] for d in range(mesh_coords.shape[0])]
89
+ # return mesh_coords, sorted_indices
90
+
91
+
92
+ # def _meshvals(coords):
93
+ # # Input array shape: (d, f) with d = nr of dims and f = nr of frames
94
+ # # Output array shape: (d, f1,..., fd)
95
+ # if coords.size == 0:
96
+ # return np.array([])
97
+ # # Sort by column
98
+ # sorted_indices = np.lexsort(coords[::-1])
99
+ # sorted_array = coords[:, sorted_indices]
100
+ # # Find shape
101
+ # shape = _mesh_shape(sorted_array)
102
+ # # Reshape
103
+ # mesh_array = sorted_array.reshape(shape)
104
+ # return mesh_array, sorted_indices
105
+
106
+
107
+ # def _mesh_shape(sorted_array):
108
+
109
+ # nd = np.unique(sorted_array[0,:]).size
110
+ # shape = (sorted_array.shape[0], nd)
111
+
112
+ # for dim in range(1,shape[0]):
113
+ # shape_dim = (shape[0], np.prod(shape[1:]), -1)
114
+ # sorted_array = sorted_array.reshape(shape_dim)
115
+ # nd = [np.unique(sorted_array[dim,d,:]).size for d in range(shape_dim[1])]
116
+ # shape = shape + (max(nd),)
117
+
118
+ # if np.prod(shape) != sorted_array.size:
119
+ # raise ValueError(
120
+ # 'Improper dimensions for the series. This usually means '
121
+ # 'that there are multiple images at the same location, \n or that '
122
+ # 'there are no images at one or more locations. \n\n'
123
+ # 'Make sure to specify proper dimensions when reading a pixel array or volume. \n'
124
+ # 'If the default dimensions of pixel_array (InstanceNumber) generate this error, '
125
+ # 'the DICOM data may be corrupted.'
126
+ # )
127
+
128
+ # return shape