mrd-python 2.0.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- mrd/__init__.py +96 -0
- mrd/_binary.py +1339 -0
- mrd/_dtypes.py +89 -0
- mrd/_ndjson.py +1194 -0
- mrd/binary.py +680 -0
- mrd/ndjson.py +2716 -0
- mrd/protocols.py +180 -0
- mrd/tools/export_png_images.py +39 -0
- mrd/tools/minimal_example.py +27 -0
- mrd/tools/phantom.py +161 -0
- mrd/tools/simulation.py +173 -0
- mrd/tools/stream_recon.py +184 -0
- mrd/tools/transform.py +37 -0
- mrd/types.py +1840 -0
- mrd/yardl_types.py +303 -0
- mrd_python-2.0.0.dist-info/METADATA +19 -0
- mrd_python-2.0.0.dist-info/RECORD +19 -0
- mrd_python-2.0.0.dist-info/WHEEL +5 -0
- mrd_python-2.0.0.dist-info/top_level.txt +1 -0
|
@@ -0,0 +1,184 @@
|
|
|
1
|
+
import sys
|
|
2
|
+
import argparse
|
|
3
|
+
import numpy as np
|
|
4
|
+
from typing import BinaryIO, Iterable, Union
|
|
5
|
+
|
|
6
|
+
import mrd
|
|
7
|
+
from mrd.tools.transform import kspace_to_image, image_to_kspace
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
def acquisition_reader(input: Iterable[mrd.StreamItem]) -> Iterable[mrd.Acquisition]:
|
|
11
|
+
for item in input:
|
|
12
|
+
if not isinstance(item, mrd.StreamItem.Acquisition):
|
|
13
|
+
# Skip non-acquisition items
|
|
14
|
+
continue
|
|
15
|
+
if item.value.flags & mrd.AcquisitionFlags.IS_NOISE_MEASUREMENT:
|
|
16
|
+
# Currently ignoring noise scans
|
|
17
|
+
continue
|
|
18
|
+
yield item.value
|
|
19
|
+
|
|
20
|
+
def stream_item_sink(input: Iterable[Union[mrd.Acquisition, mrd.Image[np.float32]]]) -> Iterable[mrd.StreamItem]:
|
|
21
|
+
for item in input:
|
|
22
|
+
if isinstance(item, mrd.Acquisition):
|
|
23
|
+
yield mrd.StreamItem.Acquisition(item)
|
|
24
|
+
elif isinstance(item, mrd.Image) and item.data.dtype == np.float32:
|
|
25
|
+
yield mrd.StreamItem.ImageFloat(item)
|
|
26
|
+
else:
|
|
27
|
+
raise ValueError("Unknown item type")
|
|
28
|
+
|
|
29
|
+
def remove_oversampling(head: mrd.Header,input: Iterable[mrd.Acquisition]) -> Iterable[mrd.Acquisition]:
|
|
30
|
+
enc = head.encoding[0]
|
|
31
|
+
|
|
32
|
+
if enc.encoded_space and enc.encoded_space.matrix_size and enc.recon_space and enc.recon_space.matrix_size:
|
|
33
|
+
eNx = enc.encoded_space.matrix_size.x
|
|
34
|
+
rNx = enc.recon_space.matrix_size.x
|
|
35
|
+
else:
|
|
36
|
+
raise Exception('Encoding information missing from header')
|
|
37
|
+
|
|
38
|
+
for acq in input:
|
|
39
|
+
if eNx != rNx and acq.samples() == eNx:
|
|
40
|
+
xline = kspace_to_image(acq.data, [1])
|
|
41
|
+
x0 = (eNx - rNx) // 2
|
|
42
|
+
x1 = x0 + rNx
|
|
43
|
+
xline = xline[:, x0:x1]
|
|
44
|
+
acq.center_sample = rNx // 2
|
|
45
|
+
acq.data = image_to_kspace(xline, [1])
|
|
46
|
+
yield acq
|
|
47
|
+
|
|
48
|
+
def accumulate_fft(head: mrd.Header, input: Iterable[mrd.Acquisition]) -> Iterable[mrd.Image[np.float32]]:
|
|
49
|
+
enc = head.encoding[0]
|
|
50
|
+
|
|
51
|
+
# Matrix size
|
|
52
|
+
if enc.encoded_space and enc.recon_space and enc.encoded_space.matrix_size and enc.recon_space.matrix_size:
|
|
53
|
+
eNx = enc.encoded_space.matrix_size.x
|
|
54
|
+
eNy = enc.encoded_space.matrix_size.y
|
|
55
|
+
eNz = enc.encoded_space.matrix_size.z
|
|
56
|
+
rNx = enc.recon_space.matrix_size.x
|
|
57
|
+
rNy = enc.recon_space.matrix_size.y
|
|
58
|
+
rNz = enc.recon_space.matrix_size.z
|
|
59
|
+
else:
|
|
60
|
+
raise Exception('Required encoding information not found in header')
|
|
61
|
+
|
|
62
|
+
# Field of view
|
|
63
|
+
if enc.recon_space and enc.recon_space.field_of_view_mm:
|
|
64
|
+
rFOVx = enc.recon_space.field_of_view_mm.x
|
|
65
|
+
rFOVy = enc.recon_space.field_of_view_mm.y
|
|
66
|
+
rFOVz = enc.recon_space.field_of_view_mm.z if enc.recon_space.field_of_view_mm.z else 1
|
|
67
|
+
else:
|
|
68
|
+
raise Exception('Required field of view information not found in header')
|
|
69
|
+
|
|
70
|
+
# Number of Slices, Reps, Contrasts, etc.
|
|
71
|
+
ncoils = 1
|
|
72
|
+
if head.acquisition_system_information and head.acquisition_system_information.receiver_channels:
|
|
73
|
+
ncoils = head.acquisition_system_information.receiver_channels
|
|
74
|
+
|
|
75
|
+
nslices = 1
|
|
76
|
+
if enc.encoding_limits and enc.encoding_limits.slice != None:
|
|
77
|
+
nslices = enc.encoding_limits.slice.maximum + 1
|
|
78
|
+
|
|
79
|
+
ncontrasts = 1
|
|
80
|
+
if enc.encoding_limits and enc.encoding_limits.contrast != None:
|
|
81
|
+
ncontrasts = enc.encoding_limits.contrast.maximum + 1
|
|
82
|
+
|
|
83
|
+
ky_offset = 0
|
|
84
|
+
if enc.encoding_limits and enc.encoding_limits.kspace_encoding_step_1 != None:
|
|
85
|
+
ky_offset = int((eNy+1)/2) - enc.encoding_limits.kspace_encoding_step_1.center
|
|
86
|
+
|
|
87
|
+
current_rep = -1
|
|
88
|
+
reference_acquisition = None
|
|
89
|
+
buffer = None
|
|
90
|
+
|
|
91
|
+
def produce_image(buffer: np.ndarray, ref_acq: mrd.Acquisition) -> Iterable[mrd.Image[np.float32]]:
|
|
92
|
+
if buffer.shape[-3] > 1:
|
|
93
|
+
img = kspace_to_image(buffer, dim=[-1, -2, -3])
|
|
94
|
+
else:
|
|
95
|
+
img = kspace_to_image(buffer, dim=[-1, -2])
|
|
96
|
+
|
|
97
|
+
for contrast in range(img.shape[0]):
|
|
98
|
+
for islice in range(img.shape[1]):
|
|
99
|
+
slice = img[contrast, islice]
|
|
100
|
+
combined = np.squeeze(np.sqrt(np.abs(np.sum(slice * np.conj(slice), axis=0)).astype('float32')))
|
|
101
|
+
|
|
102
|
+
xoffset = (combined.shape[-1] + 1) // 2 - (rNx+1) // 2
|
|
103
|
+
yoffset = (combined.shape[-2] + 1) // 2 - (rNy+1) // 2
|
|
104
|
+
if len(combined.shape) == 3:
|
|
105
|
+
zoffset = (combined.shape[-3] + 1) // 2 - (rNz+1) // 2
|
|
106
|
+
combined = combined[zoffset:(zoffset+rNz), yoffset:(yoffset+rNy), xoffset:(xoffset+rNx)]
|
|
107
|
+
combined = np.reshape(combined, (1, combined.shape[-3], combined.shape[-2], combined.shape[-1]))
|
|
108
|
+
elif len(combined.shape) == 2:
|
|
109
|
+
combined = combined[yoffset:(yoffset+rNy), xoffset:(xoffset+rNx)]
|
|
110
|
+
combined = np.reshape(combined, (1, 1, combined.shape[-2], combined.shape[-1]))
|
|
111
|
+
else:
|
|
112
|
+
raise Exception('Array img_combined should have 2 or 3 dimensions')
|
|
113
|
+
|
|
114
|
+
mrd_image = mrd.Image[np.float32](image_type=mrd.ImageType.MAGNITUDE, data=combined)
|
|
115
|
+
mrd_image.field_of_view[0] = rFOVx
|
|
116
|
+
mrd_image.field_of_view[1] = rFOVy
|
|
117
|
+
mrd_image.field_of_view[2] = rFOVz/rNz
|
|
118
|
+
mrd_image.position = ref_acq.position
|
|
119
|
+
mrd_image.col_dir = ref_acq.read_dir
|
|
120
|
+
mrd_image.line_dir = ref_acq.phase_dir
|
|
121
|
+
mrd_image.slice_dir = ref_acq.slice_dir
|
|
122
|
+
mrd_image.patient_table_position = ref_acq.patient_table_position
|
|
123
|
+
mrd_image.acquisition_time_stamp = ref_acq.acquisition_time_stamp
|
|
124
|
+
mrd_image.physiology_time_stamp = ref_acq.physiology_time_stamp
|
|
125
|
+
mrd_image.slice = ref_acq.idx.slice
|
|
126
|
+
mrd_image.contrast = contrast
|
|
127
|
+
mrd_image.repetition = ref_acq.idx.repetition
|
|
128
|
+
mrd_image.phase = ref_acq.idx.phase
|
|
129
|
+
mrd_image.average = ref_acq.idx.average
|
|
130
|
+
mrd_image.set = ref_acq.idx.set
|
|
131
|
+
yield mrd_image
|
|
132
|
+
|
|
133
|
+
for acq in input:
|
|
134
|
+
if acq.idx.repetition != current_rep:
|
|
135
|
+
# If we have a current buffer pass it on
|
|
136
|
+
if buffer is not None and reference_acquisition is not None:
|
|
137
|
+
yield from produce_image(buffer, reference_acquisition)
|
|
138
|
+
|
|
139
|
+
# Reset buffer
|
|
140
|
+
if acq.data.shape[-1] == eNx:
|
|
141
|
+
readout_length = eNx
|
|
142
|
+
else:
|
|
143
|
+
readout_length = rNx # Readout oversampling has been removed upstream
|
|
144
|
+
|
|
145
|
+
buffer = np.zeros((ncontrasts, nslices, ncoils, eNz, eNy, readout_length), dtype=np.complex64)
|
|
146
|
+
current_rep = acq.idx.repetition
|
|
147
|
+
reference_acquisition = acq
|
|
148
|
+
|
|
149
|
+
# Stuff into the buffer
|
|
150
|
+
if buffer is not None:
|
|
151
|
+
contrast = acq.idx.contrast if acq.idx.contrast is not None else 0
|
|
152
|
+
slice = acq.idx.slice if acq.idx.slice is not None else 0
|
|
153
|
+
k1 = acq.idx.kspace_encode_step_1 if acq.idx.kspace_encode_step_1 is not None else 0
|
|
154
|
+
k2 = acq.idx.kspace_encode_step_2 if acq.idx.kspace_encode_step_2 is not None else 0
|
|
155
|
+
buffer[contrast, slice, :, k2, k1 + ky_offset, :] = acq.data
|
|
156
|
+
|
|
157
|
+
if buffer is not None and reference_acquisition is not None:
|
|
158
|
+
yield from produce_image(buffer, reference_acquisition)
|
|
159
|
+
buffer = None
|
|
160
|
+
reference_acquisition = None
|
|
161
|
+
|
|
162
|
+
def reconstruct_mrd_stream(input: BinaryIO, output: BinaryIO):
|
|
163
|
+
with mrd.BinaryMrdReader(input) as reader:
|
|
164
|
+
with mrd.BinaryMrdWriter(output) as writer:
|
|
165
|
+
head = reader.read_header()
|
|
166
|
+
if head is None:
|
|
167
|
+
raise Exception("Could not read header")
|
|
168
|
+
writer.write_header(head)
|
|
169
|
+
writer.write_data(
|
|
170
|
+
stream_item_sink(
|
|
171
|
+
accumulate_fft(head,
|
|
172
|
+
remove_oversampling(head,
|
|
173
|
+
acquisition_reader(reader.read_data())))))
|
|
174
|
+
|
|
175
|
+
if __name__ == "__main__":
|
|
176
|
+
parser = argparse.ArgumentParser(description="Reconstructs an MRD stream")
|
|
177
|
+
parser.add_argument('-i', '--input', type=str, required=False, help="Input file, defaults to stdin")
|
|
178
|
+
parser.add_argument('-o', '--output', type=str, required=False, help="Output file, defaults to stdout")
|
|
179
|
+
args = parser.parse_args()
|
|
180
|
+
|
|
181
|
+
input = open(args.input, "rb") if args.input is not None else sys.stdin.buffer
|
|
182
|
+
output = open(args.output, "wb") if args.output is not None else sys.stdout.buffer
|
|
183
|
+
|
|
184
|
+
reconstruct_mrd_stream(input, output)
|
mrd/tools/transform.py
ADDED
|
@@ -0,0 +1,37 @@
|
|
|
1
|
+
"""
|
|
2
|
+
Helpers for transforming data from k-space to image space and vice-versa.
|
|
3
|
+
"""
|
|
4
|
+
import warnings
|
|
5
|
+
import numpy as np
|
|
6
|
+
from numpy.fft import fftshift, ifftshift, fftn, ifftn
|
|
7
|
+
|
|
8
|
+
def kspace_to_image(k: np.ndarray, dim=None, img_shape=None) -> np.ndarray:
|
|
9
|
+
""" Computes the Fourier transform from k-space to image space
|
|
10
|
+
along a given or all dimensions
|
|
11
|
+
|
|
12
|
+
:param k: k-space data
|
|
13
|
+
:param dim: vector of dimensions to transform
|
|
14
|
+
:param img_shape: desired shape of output image
|
|
15
|
+
:returns: data in image space (along transformed dimensions)
|
|
16
|
+
"""
|
|
17
|
+
if not dim:
|
|
18
|
+
dim = range(k.ndim)
|
|
19
|
+
img = fftshift(ifftn(ifftshift(k, axes=dim), s=img_shape, axes=dim), axes=dim)
|
|
20
|
+
img *= np.sqrt(np.prod(np.take(img.shape, dim)))
|
|
21
|
+
return img
|
|
22
|
+
|
|
23
|
+
|
|
24
|
+
def image_to_kspace(img: np.ndarray, dim=None, k_shape=None) -> np.ndarray:
|
|
25
|
+
""" Computes the Fourier transform from image space to k-space space
|
|
26
|
+
along a given or all dimensions
|
|
27
|
+
|
|
28
|
+
:param img: image space data
|
|
29
|
+
:param dim: vector of dimensions to transform
|
|
30
|
+
:param k_shape: desired shape of output k-space data
|
|
31
|
+
:returns: data in k-space (along transformed dimensions)
|
|
32
|
+
"""
|
|
33
|
+
if not dim:
|
|
34
|
+
dim = range(img.ndim)
|
|
35
|
+
k = fftshift(fftn(ifftshift(img, axes=dim), s=k_shape, axes=dim), axes=dim)
|
|
36
|
+
k /= np.sqrt(np.prod(np.take(img.shape, dim)))
|
|
37
|
+
return k
|