mrzerocore 0.2.5__cp37-abi3-win_amd64.whl → 0.2.7__cp37-abi3-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
MRzeroCore/__init__.py CHANGED
@@ -2,12 +2,13 @@ from .sequence import PulseUsage, Pulse, Repetition, Sequence, chain
2
2
  from .phantom.voxel_grid_phantom import VoxelGridPhantom
3
3
  from .phantom.custom_voxel_phantom import CustomVoxelPhantom
4
4
  from .phantom.sim_data import SimData
5
- from .phantom import brainweb
5
+ from .phantom.brainweb import generate_brainweb_phantoms
6
6
  from .simulation.isochromat_sim import isochromat_sim
7
7
  from .simulation.pre_pass import compute_graph, compute_graph_ext, Graph
8
8
  from .simulation.main_pass import execute_graph
9
9
  from .reconstruction import reco_adjoint
10
10
  from .pulseq.exporter import pulseq_write_cartesian
11
+
11
12
  # Currently not exposed directly as it is not required by typical use cases
12
13
  # and also not documented. Used internally by Sequence.from_seq_file.
13
14
  # Might re-expose later as it contains sequence plotting functionality
MRzeroCore/_prepass.pyd CHANGED
Binary file
@@ -1,31 +1,167 @@
1
- # The brainweb script contains tools for handling with Brainweb itself.
2
- # Because the API is not final yet, we just expose the very basic
3
- # download functionality here.
1
+ from typing import Literal
2
+ import json
3
+ import gzip
4
+ import requests
4
5
  import os
5
- from . import brainweb
6
- from .brainweb import SUBJECTS
6
+ import numpy as np
7
+ # from perlin_numpy import generate_perlin_noise_3d
7
8
 
8
9
 
9
- def generate_phantom(
10
- subject: int,
11
- output: str = os.path.join(os.getcwd(), "brainweb"),
12
- scale: int = 1
13
- ):
14
- """Generate an mr0 phantom from BrainWeb data.
10
+ # Load the brainweb data file that contains info about tissues, subjects, ...
11
+ brainweb_data_path = os.path.join(os.path.dirname(os.path.realpath(__file__)),
12
+ "brainweb_data.json")
13
+ brainweb_data = json.load(open(brainweb_data_path))
15
14
 
16
- Downloads segmentation data from https://brainweb.bic.mni.mcgill.ca/
17
- and fills it with literature T1, T2, ... data to generate an mr0 phantom.
15
+
16
+ def load_tissue(subject: int, alias: str, cache_dir: str) -> np.ndarray:
17
+ download_alias = f"subject{subject:02d}_{alias}"
18
+ file_name = download_alias + ".i8.gz" # 8 bit signed int, gnuzip
19
+ file_path = os.path.join(cache_dir, file_name)
20
+
21
+ # Download and cache file if it doesn't exist yet
22
+ if not os.path.exists(file_path):
23
+ print(f"Downloading '{download_alias}'", end="", flush=True)
24
+ response = requests.post(
25
+ "https://brainweb.bic.mni.mcgill.ca/cgi/brainweb1",
26
+ data={
27
+ "do_download_alias": download_alias,
28
+ "format_value": "raw_byte",
29
+ "zip_value": "gnuzip"
30
+ }
31
+ )
32
+ with open(file_path, "wb") as f:
33
+ f.write(response.content)
34
+ print(" - ", end="")
35
+
36
+ # Load the raw BrainWeb data and add it to the return array
37
+ with gzip.open(file_path) as f:
38
+ print(f"Loading {os.path.basename(file_path)}", end="", flush=True)
39
+ # BrainWeb says this data is unsigned, which is a lie
40
+ tmp = np.frombuffer(f.read(), np.uint8) + 128
41
+
42
+ # Vessel bugfix: most of background is 1 instead of zero
43
+ if alias == "ves":
44
+ tmp[tmp == 1] = 0
45
+ data = tmp.reshape(362, 434, 362).swapaxes(0, 2).astype(np.float32)
46
+
47
+ print(" - done")
48
+ return data / 255.0
49
+
50
+
51
+ def gen_noise(range: float, res: np.ndarray) -> np.ndarray:
52
+ if range == 0:
53
+ return 1
54
+ else:
55
+ freq = 20
56
+ padded_res = (res + freq - 1) // freq * freq
57
+ # noise = generate_perlin_noise_3d(padded_res, (freq, freq, freq))
58
+ noise = np.random.random(padded_res)
59
+ return 1 + range * noise[:res[0], :res[1], :res[2]]
60
+
61
+
62
+ def downsample(array: np.ndarray, factor: int) -> np.ndarray:
63
+ # crop array to multiple of factor
64
+ shape = (np.array(array.shape) // factor) * factor
65
+ array = array[:shape[0], :shape[1], :shape[2]]
66
+
67
+ tmp = np.zeros(shape // factor)
68
+ for x in range(factor):
69
+ for y in range(factor):
70
+ for z in range(factor):
71
+ tmp += array[x::factor, y::factor, z::factor]
72
+
73
+ return tmp / factor**3
74
+
75
+
76
+ def generate_brainweb_phantoms(
77
+ output_dir: str,
78
+ config: Literal["3T", "7T-noise", "3T-highres-fat"] = "3T"):
79
+ """Generate BrainWeb phantom maps for the selected configuration.
80
+
81
+ Raw tissue segmentation data is provided by the BrainWeb Database:
82
+ http://www.bic.mni.mcgill.ca/brainweb/
83
+
84
+ All tissue data etc. are stored in `brainweb_data.json`. To ensure
85
+ consistent configurations and reproducible results, available configs are
86
+ stored in this file as well. They specify which field strength to use,
87
+ which tissues to include, as well as the downsampling and noise levels.
88
+
89
+ The emitted files are compressed numpy files, which can be loaded with
90
+ `np.load(file_name)`. They contain the following arrays:
91
+
92
+ - `PD_map`: Proton Density [a.u.]
93
+ - `T1_map`: T1 relaxation time [s]
94
+ - `T2_map`: T2 relaxation time [s]
95
+ - `T2dash_map`: T2' relaxation time [s]
96
+ - `D_map`: Isotropic Diffusion coefficient [10^-3 mm² / s]
97
+ - `tissue_XY`: Tissue segmentation for all included tissues
18
98
 
19
99
  Parameters
20
100
  ----------
21
- subject : int
22
- Subject ID. Use `mr0.brainweb.SUBJECTS` for a list of valid ID.
23
- output : str
24
- Path to output folder for the resulting phantoms ('subjectXX.npz').
25
- Will also be used to cache the downloaded segmentation data.
26
- scale : int
27
- Amount of downsampling, e.g., a value of 2 will produce half-res data.
101
+ output_dir: str
102
+ The directory where the generated phantoms will be stored to. In
103
+ addition, a `cache` folder will be generated there too, which contains
104
+ all the data downloaded from BrainWeb to avoid repeating the download
105
+ for all configurations or when generating phantoms again.
106
+ config: ["3T", "7T-noise", "3T-highres-fat"]
107
+ The configuration for which the maps are generated.
28
108
  """
29
- brainweb.CACHE_PATH = output
30
- phantom = brainweb.Phantom.load(subject, scale)
31
- phantom.save()
109
+ config_data = brainweb_data["configs"][config]
110
+ cache_dir = os.path.join(output_dir, "cache")
111
+
112
+ try:
113
+ os.makedirs(cache_dir)
114
+ except FileExistsError:
115
+ pass
116
+
117
+ # Map resolution:
118
+ res = np.array([362, 434, 362]) // config_data["downsample"]
119
+
120
+ def noise() -> np.ndarray:
121
+ return gen_noise(config_data["noise"], res)
122
+
123
+ for subject in brainweb_data["subjects"]:
124
+ print(f"Generating '{config}', subject {subject}")
125
+ maps = {
126
+ "FOV": np.array([0.181, 0.217, 0.181]),
127
+ "PD_map": np.zeros(res, dtype=np.float32),
128
+ "T1_map": np.zeros(res, dtype=np.float32),
129
+ "T2_map": np.zeros(res, dtype=np.float32),
130
+ "T2dash_map": np.zeros(res, dtype=np.float32),
131
+ "D_map": np.zeros(res, dtype=np.float32),
132
+ }
133
+
134
+ for tissue in config_data["tissues"]:
135
+ tissue_map = sum([
136
+ load_tissue(subject, alias, cache_dir)
137
+ for alias in brainweb_data["download-aliases"][tissue]
138
+ ])
139
+ tissue_map = downsample(tissue_map, config_data["downsample"])
140
+ maps["tissue_" + tissue] = tissue_map
141
+
142
+ field_strength = config_data["field-strength"]
143
+ tissue_data = brainweb_data["tissues"][field_strength][tissue]
144
+
145
+ # Separate noise maps is slower but uncorrelated.
146
+ # Might be better for training or worse - could be configurable
147
+ print("Adding tissue to phantom", end="", flush=True)
148
+ maps["PD_map"] += tissue_data["PD"] * tissue_map * noise()
149
+ maps["T1_map"] += tissue_data["T1"] * tissue_map * noise()
150
+ maps["T2_map"] += tissue_data["T2"] * noise()
151
+ maps["T2dash_map"] += tissue_data["T2'"] * tissue_map * noise()
152
+ maps["D_map"] += tissue_data["D"] * tissue_map * noise()
153
+ print(" - done")
154
+
155
+ file = os.path.join(output_dir, f"subject{subject:02d}_{config}.npz")
156
+ print(f"Saving to '{os.path.basename(file)}'", end="", flush=True)
157
+ np.savez_compressed(file, **maps)
158
+ print(" - done\n")
159
+
160
+
161
+ if __name__ == "__main__":
162
+ print("This is for testing only, use generate_brainweb_phantoms directly!")
163
+ file_dir = os.path.dirname(os.path.realpath(__file__))
164
+ output_dir = os.path.join(file_dir, "output")
165
+
166
+ for config in brainweb_data["configs"].keys():
167
+ generate_brainweb_phantoms(output_dir, config)
@@ -0,0 +1,93 @@
1
+ {
2
+ "configs": {
3
+ "3T": {
4
+ "field-strength": "3T",
5
+ "tissues": ["gm", "wm", "csf"],
6
+ "downsample": 3,
7
+ "noise": 0
8
+ },
9
+ "7T-noise": {
10
+ "field-strength": "7T",
11
+ "tissues": ["gm", "wm", "csf"],
12
+ "downsample": 3,
13
+ "noise": 0.2
14
+ },
15
+ "3T-highres-fat": {
16
+ "field-strength": "3T",
17
+ "tissues": ["gm", "wm", "csf", "fat"],
18
+ "downsample": 1,
19
+ "noise": 0
20
+ }
21
+ },
22
+ "subjects": [
23
+ 4, 5, 6, 18, 20, 38, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54
24
+ ],
25
+ "download-aliases": {
26
+ "gm": ["gry"],
27
+ "wm": ["wht"],
28
+ "csf": ["csf", "ves"],
29
+ "fat": ["fat", "mus", "m-s", "dura", "fat2"]
30
+ },
31
+ "tissues": {
32
+ "3T": {
33
+ "gm": {
34
+ "PD": 0.8,
35
+ "T1": 1.56,
36
+ "T2": 0.0083,
37
+ "T2'": 0.32,
38
+ "D": 0.83
39
+ },
40
+ "wm": {
41
+ "PD": 0.7,
42
+ "T1": 0.83,
43
+ "T2": 0.075,
44
+ "T2'": 0.18,
45
+ "D": 0.65
46
+ },
47
+ "csf": {
48
+ "PD": 1,
49
+ "T1": 4.16,
50
+ "T2": 1.65,
51
+ "T2'": 0.059,
52
+ "D": 3.19
53
+ },
54
+ "fat": {
55
+ "PD": 1,
56
+ "T1": 0.37,
57
+ "T2": 0.125,
58
+ "T2'": 0.012,
59
+ "D": 0.1
60
+ }
61
+ },
62
+ "7T": {
63
+ "gm": {
64
+ "PD": 0.8,
65
+ "T1": 1.67,
66
+ "T2": 0.043,
67
+ "T2'": 0.82,
68
+ "D": 0.83
69
+ },
70
+ "wm": {
71
+ "PD": 0.7,
72
+ "T1": 1.22,
73
+ "T2": 0.037,
74
+ "T2'": 0.65,
75
+ "D": 0.65
76
+ },
77
+ "csf": {
78
+ "PD": 1,
79
+ "T1": 4.0,
80
+ "T2": 0.8,
81
+ "T2'": 0.204,
82
+ "D": 3.19
83
+ },
84
+ "fat": {
85
+ "PD": 1,
86
+ "T1": 0.374,
87
+ "T2": 0.125,
88
+ "T2'": 0.0117,
89
+ "D": 0.1
90
+ }
91
+ }
92
+ }
93
+ }
@@ -1,3 +1,8 @@
1
+ ! NOTE
2
+ 7T maps are not checked as thrououghly. Only source so far:
3
+ https://cds.ismrm.org/protected/14MProceedings/PDFfiles/3208.pdf
4
+
5
+
1
6
  # T1 and T2 times, taken from:
2
7
  # https://mri-q.com/uploads/3/4/5/7/34572113/normal_relaxation_times_at_3t.pdf
3
8
  # Value taken from paper with most participants (draw: closest to mean of all)
File without changes
@@ -32,9 +32,9 @@ class SimData:
32
32
  (coil_count, voxel_count) Per coil and per voxel B1 inhomogenity
33
33
  coil_sens : torch.Tensor
34
34
  (coil_count, voxel_count) Per coil sensitivity (arbitrary units)
35
- fov : torch.Tensor
36
- Physical size of the phantom, needed for diffusion (meters).
37
- More specifically, a gradient moment of 1 has a wavelength of fov
35
+ size : torch.Tensor
36
+ Physical size of the phantom. If a sequence with normalized gradients
37
+ is simulated, size is used to scale them to match the phantom.
38
38
  avg_B1_trig : torch.Tensor
39
39
  (361, 3) values containing the PD-weighted avg of sin/cos/sin²(B1*flip)
40
40
  voxel_pos : torch.Tensor
@@ -60,7 +60,7 @@ class SimData:
60
60
  B0: torch.Tensor,
61
61
  B1: torch.Tensor,
62
62
  coil_sens: torch.Tensor,
63
- fov: torch.Tensor,
63
+ size: torch.Tensor,
64
64
  voxel_pos: torch.Tensor,
65
65
  nyquist: torch.Tensor,
66
66
  dephasing_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
@@ -93,7 +93,7 @@ class SimData:
93
93
  self.B0 = B0.clone()
94
94
  self.B1 = B1.clone()
95
95
  self.coil_sens = coil_sens.clone()
96
- self.fov = fov.clone()
96
+ self.size = size.clone()
97
97
  self.voxel_pos = voxel_pos.clone()
98
98
  self.avg_B1_trig = calc_avg_B1_trig(B1, PD)
99
99
  self.nyquist = nyquist.clone()
@@ -115,7 +115,7 @@ class SimData:
115
115
  self.B0.cuda(),
116
116
  self.B1.cuda(),
117
117
  self.coil_sens.cuda(),
118
- self.fov.cuda(),
118
+ self.size.cuda(),
119
119
  self.voxel_pos.cuda(),
120
120
  self.nyquist.cuda(),
121
121
  self.dephasing_func,
@@ -137,7 +137,7 @@ class SimData:
137
137
  self.B0.cpu(),
138
138
  self.B1.cpu(),
139
139
  self.coil_sens.cpu(),
140
- self.fov.cpu(),
140
+ self.size.cpu(),
141
141
  self.voxel_pos.cpu(),
142
142
  self.nyquist.cpu(),
143
143
  self.dephasing_func,
@@ -1,4 +1,5 @@
1
1
  from __future__ import annotations
2
+ from typing import Literal
2
3
  from scipy import io
3
4
  import numpy as np
4
5
  import torch
@@ -42,13 +43,6 @@ class VoxelGridPhantom:
42
43
  As it is bandwidth limited, we assume that there is no signal above the
43
44
  Nyquist frequency. This leads to the usage of sinc-shaped voxels.
44
45
 
45
- This phantom has two FOVs: ``base_fov`` encodes the physical size in meters
46
- and is set on load. ``rel_fov`` is initially 1 and is changed by some
47
- operations like reducing the phantom to a few slices. This allows to either
48
- use only ``rel_fov`` in the simulation so that the sequence can still
49
- assume an FOV of 1, or to use ``base_fov * rel_fov`` and use SI units in
50
- the sequence definition.
51
-
52
46
  Attributes
53
47
  ----------
54
48
  PD : torch.Tensor
@@ -67,10 +61,8 @@ class VoxelGridPhantom:
67
61
  (coil_count, sx, sy, sz) tensor of RF coil profiles
68
62
  coil_sens : torch.Tensor
69
63
  (coil_count, sx, sy, sz) tensor of coil sensitivities
70
- base_fov : torch.Tensor
71
- Base size of the original loaded data, in meters.
72
- rel_fov : torch.Tensor
73
- Actual phantom size relative to ``base_fov``.
64
+ size : torch.Tensor
65
+ Size of the data, in meters.
74
66
  """
75
67
 
76
68
  def __init__(
@@ -83,8 +75,7 @@ class VoxelGridPhantom:
83
75
  B0: torch.Tensor,
84
76
  B1: torch.Tensor,
85
77
  coil_sens: torch.Tensor,
86
- base_fov: torch.Tensor,
87
- rel_fov: torch.Tensor,
78
+ size: torch.Tensor,
88
79
  ) -> None:
89
80
  """Set the phantom attributes to the provided parameters.
90
81
 
@@ -99,30 +90,31 @@ class VoxelGridPhantom:
99
90
  self.B0 = B0
100
91
  self.B1 = B1
101
92
  self.coil_sens = coil_sens
102
- self.base_fov = base_fov
103
- self.rel_fov = rel_fov
93
+ self.size = size
104
94
 
105
95
  def build(self, PD_threshold: float = 1e-6,
106
- use_SI_FoV: bool = False, voxel_shape="sinc") -> SimData:
96
+ voxel_shape: Literal["sinc", "box", "point"] = "sinc"
97
+ ) -> SimData:
107
98
  """Build a :class:`SimData` instance for simulation.
108
99
 
109
100
  Arguments
110
101
  ---------
111
- PD_threshold: float
102
+ PD_threshold : float
112
103
  All voxels with a proton density below this value are ignored.
113
- use_SI_FoV: bool
114
- If set to ``True``, the built :class:`SimData` will use its
115
- physical size in meters. If set to ``False``, the ``rel_fov`` is
116
- used, which means a sequence FOV of 1 is assumed.
117
104
  """
118
105
  mask = self.PD > PD_threshold
119
106
 
120
- fov = (self.base_fov * self.rel_fov) if use_SI_FoV else (self.rel_fov)
121
107
  shape = torch.tensor(mask.shape)
122
108
  pos_x, pos_y, pos_z = torch.meshgrid(
123
- fov[0] * torch.fft.fftshift(torch.fft.fftfreq(int(shape[0]), device=self.PD.device)),
124
- fov[1] * torch.fft.fftshift(torch.fft.fftfreq(int(shape[1]), device=self.PD.device)),
125
- fov[2] * torch.fft.fftshift(torch.fft.fftfreq(int(shape[2]), device=self.PD.device)),
109
+ self.size[0] *
110
+ torch.fft.fftshift(torch.fft.fftfreq(
111
+ int(shape[0]), device=self.PD.device)),
112
+ self.size[1] *
113
+ torch.fft.fftshift(torch.fft.fftfreq(
114
+ int(shape[1]), device=self.PD.device)),
115
+ self.size[2] *
116
+ torch.fft.fftshift(torch.fft.fftfreq(
117
+ int(shape[2]), device=self.PD.device)),
126
118
  )
127
119
 
128
120
  voxel_pos = torch.stack([
@@ -132,11 +124,11 @@ class VoxelGridPhantom:
132
124
  ], dim=1)
133
125
 
134
126
  if voxel_shape == "box":
135
- dephasing_func = lambda t, n: sinc(t, 0.5 / n)
127
+ def dephasing_func(t, n): return sinc(t, 0.5 / n)
136
128
  elif voxel_shape == "sinc":
137
- dephasing_func = lambda t, n: sigmoid(t, n)
129
+ def dephasing_func(t, n): return sigmoid(t, n)
138
130
  elif voxel_shape == "point":
139
- dephasing_func = lambda t, _: identity(t)
131
+ def dephasing_func(t, _): return identity(t)
140
132
  else:
141
133
  raise ValueError(f"Unsupported voxel shape '{voxel_shape}'")
142
134
 
@@ -149,11 +141,12 @@ class VoxelGridPhantom:
149
141
  self.B0[mask],
150
142
  self.B1[:, mask],
151
143
  self.coil_sens[:, mask],
152
- self.base_fov * self.rel_fov, # Always SI, only used for diffusion
144
+ self.size,
153
145
  voxel_pos,
154
- torch.tensor(shape, device=self.PD.device) / 2 / fov,
146
+ torch.tensor(shape, device=self.PD.device) / 2 / self.size,
155
147
  dephasing_func,
156
- recover_func=lambda d: recover(mask, self.base_fov, self.rel_fov, d)
148
+ recover_func=lambda d: recover(
149
+ mask, self.base_fov, self.rel_fov, d)
157
150
  )
158
151
 
159
152
  @classmethod
@@ -182,11 +175,14 @@ class VoxelGridPhantom:
182
175
  B0 -= (B0 * weight).sum()
183
176
  B1 /= (B1 * weight).sum()
184
177
 
178
+ try:
179
+ size = torch.tensor(data['FOV'])
180
+ except KeyError:
181
+ size = torch.tensor([0.192, 0.192, 0.192])
182
+
185
183
  return cls(
186
184
  PD, T1, T2, T2dash, D, B0, B1[None, ...],
187
- coil_sens=torch.ones(1, *PD.shape),
188
- base_fov=torch.tensor([0.192, 0.192, 0.192]),
189
- rel_fov=torch.ones(3)
185
+ torch.ones(1, *PD.shape), size,
190
186
  )
191
187
 
192
188
  @classmethod
@@ -255,8 +251,7 @@ class VoxelGridPhantom:
255
251
  data[..., 3], # B0
256
252
  data[..., 4][None, ...], # B1
257
253
  coil_sens=torch.ones(1, *data.shape[:-1]),
258
- base_fov=torch.tensor([0.2, 0.2, 0.008]),
259
- rel_fov=torch.ones(3)
254
+ size=torch.tensor([0.2, 0.2, 0.008]),
260
255
  )
261
256
 
262
257
  def slices(self, slices: list[int]) -> VoxelGridPhantom:
@@ -274,9 +269,6 @@ class VoxelGridPhantom:
274
269
  """
275
270
  assert 0 <= any([slices]) < self.PD.shape[2]
276
271
 
277
- # fov = self.rel_fov.clone()
278
- # fov[2] *= len(slices) / self.PD.shape[2]
279
-
280
272
  def select(tensor: torch.Tensor):
281
273
  return tensor[..., slices].view(
282
274
  *list(self.PD.shape[:2]), len(slices)
@@ -291,8 +283,7 @@ class VoxelGridPhantom:
291
283
  select(self.B0),
292
284
  select(self.B1).unsqueeze(0),
293
285
  select(self.coil_sens).unsqueeze(0),
294
- self.base_fov.clone(),
295
- self.rel_fov.clone(),
286
+ self.size.clone(),
296
287
  )
297
288
 
298
289
  def scale_fft(self, x: int, y: int, z: int) -> VoxelGridPhantom:
@@ -331,8 +322,7 @@ class VoxelGridPhantom:
331
322
  scale(self.B0),
332
323
  scale(self.B1.squeeze()).unsqueeze(0),
333
324
  scale(self.coil_sens.squeeze()).unsqueeze(0),
334
- self.base_fov.clone(),
335
- self.rel_fov.clone(),
325
+ self.size.clone(),
336
326
  )
337
327
 
338
328
  def interpolate(self, x: int, y: int, z: int) -> VoxelGridPhantom:
@@ -379,17 +369,13 @@ class VoxelGridPhantom:
379
369
  resample(self.B0),
380
370
  resample_multicoil(self.B1),
381
371
  resample_multicoil(self.coil_sens),
382
- self.base_fov.clone(),
383
- self.rel_fov.clone(),
372
+ self.size.clone(),
384
373
  )
385
374
 
386
375
  def plot(self) -> None:
387
376
  """Print and plot all data stored in this phantom."""
388
377
  print("VoxelGridPhantom")
389
- print(
390
- f"FOV: base * rel = {self.base_fov} * {self.rel_fov} "
391
- f"= {self.base_fov * self.rel_fov}"
392
- )
378
+ print(f"size = {self.size}")
393
379
  # Center slice
394
380
  s = self.PD.shape[2] // 2
395
381
  # Warn if we only print a part of all data
@@ -434,39 +420,37 @@ class VoxelGridPhantom:
434
420
  plt.imshow(self.coil_sens[0, :, :, s].T.cpu(), vmin=0, origin="lower")
435
421
  plt.colorbar()
436
422
  plt.show()
437
-
423
+
438
424
  def plot3D(self, data2print: int = 0) -> None:
439
425
  """Print and plot all slices of one selected data stored in this phantom."""
440
426
  print("VoxelGridPhantom")
441
- print(
442
- f"FOV: base * rel = {self.base_fov} * {self.rel_fov} "
443
- f"= {self.base_fov * self.rel_fov}"
444
- )
427
+ print(f"size = {self.size}")
445
428
  print()
446
-
429
+
447
430
  label = ['PD', 'T1', 'T2', "T2'", "D", "B0", "B1", "coil sens"]
448
-
431
+
449
432
  tensors = [
450
- self.PD, self.T1, self.T2, self.T2dash, self.D, self.B0,
433
+ self.PD, self.T1, self.T2, self.T2dash, self.D, self.B0,
451
434
  self.B1.squeeze(0), self.coil_sens
452
435
  ]
453
-
436
+
454
437
  # Warn if we only print a part of all data
455
438
  print(f"Plotting {label[data2print]}")
456
-
439
+
457
440
  tensor = tensors[data2print].squeeze(0)
458
-
459
- util.plot3D(tensor,figsize=(20, 5))
441
+
442
+ util.plot3D(tensor, figsize=(20, 5))
460
443
  plt.title(label[data2print])
461
444
  plt.show()
462
445
 
463
446
 
464
- def recover(mask, base_fov, rel_fov, sim_data: SimData) -> VoxelGridPhantom:
447
+ def recover(mask, sim_data: SimData) -> VoxelGridPhantom:
465
448
  """Provided to :class:`SimData` to reverse the ``build()``"""
466
449
  def to_full(sparse):
467
450
  assert sparse.ndim < 3
468
451
  if sparse.ndim == 2:
469
- full = torch.zeros([sparse.shape[0], *mask.shape])
452
+ full = torch.zeros(
453
+ [sparse.shape[0], *mask.shape], dtype=sparse.dtype)
470
454
  full[:, mask] = sparse.cpu()
471
455
  else:
472
456
  full = torch.zeros(mask.shape)
@@ -482,8 +466,7 @@ def recover(mask, base_fov, rel_fov, sim_data: SimData) -> VoxelGridPhantom:
482
466
  to_full(sim_data.B0),
483
467
  to_full(sim_data.B1),
484
468
  to_full(sim_data.coil_sens),
485
- base_fov,
486
- rel_fov
469
+ sim_data.size
487
470
  )
488
471
 
489
472