mrzerocore 0.2.12__cp37-abi3-win_amd64.whl → 0.3.1__cp37-abi3-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- MRzeroCore/__init__.py +8 -5
- MRzeroCore/_prepass.pyd +0 -0
- MRzeroCore/phantom/brainweb/__init__.py +5 -5
- MRzeroCore/phantom/custom_voxel_phantom.py +4 -3
- MRzeroCore/phantom/sim_data.py +11 -2
- MRzeroCore/phantom/voxel_grid_phantom.py +63 -41
- MRzeroCore/sequence.py +1 -1
- MRzeroCore/simulation/isochromat_sim.py +37 -13
- MRzeroCore/simulation/main_pass.py +68 -18
- MRzeroCore/simulation/pre_pass.py +10 -1
- MRzeroCore/util.py +31 -3
- {MRzeroCore-0.2.12.dist-info → MRzeroCore-0.3.1.dist-info}/METADATA +7 -7
- {MRzeroCore-0.2.12.dist-info → MRzeroCore-0.3.1.dist-info}/RECORD +15 -15
- {MRzeroCore-0.2.12.dist-info → MRzeroCore-0.3.1.dist-info}/WHEEL +1 -1
- {MRzeroCore-0.2.12.dist-info → MRzeroCore-0.3.1.dist-info}/license_files/LICENSE +0 -0
MRzeroCore/__init__.py
CHANGED
|
@@ -1,3 +1,11 @@
|
|
|
1
|
+
import numpy
|
|
2
|
+
if not hasattr(numpy, "int"):
|
|
3
|
+
numpy.int = int
|
|
4
|
+
if not hasattr(numpy, "float"):
|
|
5
|
+
numpy.float = float
|
|
6
|
+
if not hasattr(numpy, "complex"):
|
|
7
|
+
numpy.complex = complex
|
|
8
|
+
|
|
1
9
|
from .sequence import PulseUsage, Pulse, Repetition, Sequence, chain
|
|
2
10
|
from .phantom.voxel_grid_phantom import VoxelGridPhantom
|
|
3
11
|
from .phantom.custom_voxel_phantom import CustomVoxelPhantom
|
|
@@ -9,8 +17,3 @@ from .simulation.main_pass import execute_graph
|
|
|
9
17
|
from .reconstruction import reco_adjoint
|
|
10
18
|
from .pulseq.exporter import pulseq_write_cartesian
|
|
11
19
|
from . import util
|
|
12
|
-
|
|
13
|
-
# Currently not exposed directly as it is not required by typical use cases
|
|
14
|
-
# and also not documented. Used internally by Sequence.from_seq_file.
|
|
15
|
-
# Might re-expose later as it contains sequence plotting functionality
|
|
16
|
-
# from .pulseq.pulseq_loader import PulseqFile
|
MRzeroCore/_prepass.pyd
CHANGED
|
Binary file
|
|
@@ -85,13 +85,13 @@ def generate_brainweb_phantoms(
|
|
|
85
85
|
Raw tissue segmentation data is provided by the BrainWeb Database:
|
|
86
86
|
http://www.bic.mni.mcgill.ca/brainweb/
|
|
87
87
|
|
|
88
|
-
All tissue data etc. are stored in
|
|
89
|
-
consistent configurations and reproducible results, available
|
|
90
|
-
stored in this file as well. They specify which field strength
|
|
91
|
-
which tissues to include,
|
|
88
|
+
All tissue data etc. are stored in [brainweb_data.json](https://github.com/MRsources/MRzero-Core/blob/main/python/MRzeroCore/phantom/brainweb/brainweb_data.json).
|
|
89
|
+
To ensure consistent configurations and reproducible results, available
|
|
90
|
+
configs are stored in this file as well. They specify which field strength
|
|
91
|
+
to use, which tissues to include, and the downsampling and noise levels.
|
|
92
92
|
|
|
93
93
|
The emitted files are compressed numpy files, which can be loaded with
|
|
94
|
-
|
|
94
|
+
``np.load(file_name)``. They contain the following arrays:
|
|
95
95
|
|
|
96
96
|
- `PD_map`: Proton Density [a.u.]
|
|
97
97
|
- `T1_map`: T1 relaxation time [s]
|
|
@@ -52,7 +52,7 @@ class CustomVoxelPhantom:
|
|
|
52
52
|
Attributes
|
|
53
53
|
----------
|
|
54
54
|
voxel_pos : torch.Tensor
|
|
55
|
-
(voxel_count, 3) tensor
|
|
55
|
+
(voxel_count, 3) tensor of voxel positions in SI units [m]
|
|
56
56
|
PD : torch.Tensor
|
|
57
57
|
1D tensor containing the Proton Density of all voxels
|
|
58
58
|
T1 : torch.Tensor
|
|
@@ -128,6 +128,7 @@ class CustomVoxelPhantom:
|
|
|
128
128
|
"""Build a :class:`SimData` instance for simulation."""
|
|
129
129
|
# TODO: until the dephasing func fix is here, this only works on the
|
|
130
130
|
# device self.voxel_size happens to be on
|
|
131
|
+
size = self.voxel_pos.max(0).values - self.voxel_pos.min(0).values
|
|
131
132
|
|
|
132
133
|
return SimData(
|
|
133
134
|
self.PD,
|
|
@@ -138,7 +139,7 @@ class CustomVoxelPhantom:
|
|
|
138
139
|
self.B0,
|
|
139
140
|
self.B1[None, :],
|
|
140
141
|
torch.ones(1, self.PD.numel()),
|
|
141
|
-
|
|
142
|
+
size,
|
|
142
143
|
self.voxel_pos,
|
|
143
144
|
torch.tensor([float('inf'), float('inf'), float('inf')]),
|
|
144
145
|
build_dephasing_func(self.voxel_shape, self.voxel_size),
|
|
@@ -212,7 +213,7 @@ class CustomVoxelPhantom:
|
|
|
212
213
|
D = torch.fft.fftshift(torch.fft.ifft2(D_kspace))
|
|
213
214
|
|
|
214
215
|
maps = [PD, T1, T2, T2dash, D]
|
|
215
|
-
titles = ["
|
|
216
|
+
titles = ["PD", "T1", "T2", "T2'", "D"]
|
|
216
217
|
|
|
217
218
|
print("CustomVoxelPhantom")
|
|
218
219
|
print(f"Voxel shape: {self.voxel_shape}")
|
MRzeroCore/phantom/sim_data.py
CHANGED
|
@@ -65,6 +65,8 @@ class SimData:
|
|
|
65
65
|
nyquist: torch.Tensor,
|
|
66
66
|
dephasing_func: Callable[[torch.Tensor, torch.Tensor], torch.Tensor],
|
|
67
67
|
recover_func: Callable[[SimData], Any] | None = None,
|
|
68
|
+
phantom_motion=None,
|
|
69
|
+
voxel_motion=None
|
|
68
70
|
) -> None:
|
|
69
71
|
"""Create a :class:`SimData` instance based on the given tensors.
|
|
70
72
|
|
|
@@ -100,6 +102,9 @@ class SimData:
|
|
|
100
102
|
self.dephasing_func = dephasing_func
|
|
101
103
|
self.recover_func = recover_func
|
|
102
104
|
|
|
105
|
+
self.phantom_motion = phantom_motion
|
|
106
|
+
self.voxel_motion = voxel_motion
|
|
107
|
+
|
|
103
108
|
def cuda(self) -> SimData:
|
|
104
109
|
"""Move the simulation data to the default CUDA device.
|
|
105
110
|
|
|
@@ -119,7 +124,9 @@ class SimData:
|
|
|
119
124
|
self.voxel_pos.cuda(),
|
|
120
125
|
self.nyquist.cuda(),
|
|
121
126
|
self.dephasing_func,
|
|
122
|
-
self.recover_func
|
|
127
|
+
self.recover_func,
|
|
128
|
+
self.phantom_motion,
|
|
129
|
+
self.voxel_motion
|
|
123
130
|
)
|
|
124
131
|
|
|
125
132
|
def cpu(self) -> SimData:
|
|
@@ -141,7 +148,9 @@ class SimData:
|
|
|
141
148
|
self.voxel_pos.cpu(),
|
|
142
149
|
self.nyquist.cpu(),
|
|
143
150
|
self.dephasing_func,
|
|
144
|
-
self.recover_func
|
|
151
|
+
self.recover_func,
|
|
152
|
+
self.phantom_motion,
|
|
153
|
+
self.voxel_motion
|
|
145
154
|
)
|
|
146
155
|
|
|
147
156
|
@property
|
|
@@ -1,10 +1,12 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
from typing import Literal
|
|
3
|
+
from warnings import warn
|
|
3
4
|
from scipy import io
|
|
4
5
|
import numpy as np
|
|
5
6
|
import torch
|
|
6
7
|
import matplotlib.pyplot as plt
|
|
7
8
|
from .sim_data import SimData
|
|
9
|
+
from ..util import imshow
|
|
8
10
|
|
|
9
11
|
|
|
10
12
|
def sigmoid(trajectory: torch.Tensor, nyquist: torch.Tensor) -> torch.Tensor:
|
|
@@ -36,6 +38,25 @@ def identity(trajectory: torch.Tensor) -> torch.Tensor:
|
|
|
36
38
|
return torch.ones_like(trajectory[:, 0])
|
|
37
39
|
|
|
38
40
|
|
|
41
|
+
def generate_B0_B1(PD):
|
|
42
|
+
# Generate a somewhat plausible B0 and B1 map.
|
|
43
|
+
# Visually fitted to look similar to the numerical_brain_cropped
|
|
44
|
+
x_pos, y_pos, z_pos = torch.meshgrid(
|
|
45
|
+
torch.linspace(-1, 1, PD.shape[0]),
|
|
46
|
+
torch.linspace(-1, 1, PD.shape[1]),
|
|
47
|
+
torch.linspace(-1, 1, PD.shape[2]),
|
|
48
|
+
indexing="ij"
|
|
49
|
+
)
|
|
50
|
+
B1 = torch.exp(-(0.4*x_pos**2 + 0.2*y_pos**2 + 0.3*z_pos**2))
|
|
51
|
+
dist2 = (0.4*x_pos**2 + 0.2*(y_pos - 0.7)**2 + 0.3*z_pos**2)
|
|
52
|
+
B0 = 7 / (0.05 + dist2) - 45 / (0.3 + dist2)
|
|
53
|
+
# Normalize such that the weighted average is 0 or 1
|
|
54
|
+
weight = PD / PD.sum()
|
|
55
|
+
B0 -= (B0 * weight).sum()
|
|
56
|
+
B1 /= (B1 * weight).sum()
|
|
57
|
+
return B0, B1
|
|
58
|
+
|
|
59
|
+
|
|
39
60
|
class VoxelGridPhantom:
|
|
40
61
|
"""Class for using typical phantoms like those provided by BrainWeb.
|
|
41
62
|
|
|
@@ -82,15 +103,15 @@ class VoxelGridPhantom:
|
|
|
82
103
|
This function does no cloning nor contain any other funcionality. You
|
|
83
104
|
probably want to use :meth:`brainweb` to load a phantom instead.
|
|
84
105
|
"""
|
|
85
|
-
self.PD = PD
|
|
86
|
-
self.T1 = T1
|
|
87
|
-
self.T2 = T2
|
|
88
|
-
self.T2dash = T2dash
|
|
89
|
-
self.D = D
|
|
90
|
-
self.B0 = B0
|
|
91
|
-
self.B1 = B1
|
|
92
|
-
self.coil_sens = coil_sens
|
|
93
|
-
self.size = size
|
|
106
|
+
self.PD = torch.as_tensor(PD, dtype=torch.float32)
|
|
107
|
+
self.T1 = torch.as_tensor(T1, dtype=torch.float32)
|
|
108
|
+
self.T2 = torch.as_tensor(T2, dtype=torch.float32)
|
|
109
|
+
self.T2dash = torch.as_tensor(T2dash, dtype=torch.float32)
|
|
110
|
+
self.D = torch.as_tensor(D, dtype=torch.float32)
|
|
111
|
+
self.B0 = torch.as_tensor(B0, dtype=torch.float32)
|
|
112
|
+
self.B1 = torch.as_tensor(B1, dtype=torch.float32)
|
|
113
|
+
self.coil_sens = torch.as_tensor(coil_sens, dtype=torch.float32)
|
|
114
|
+
self.size = torch.as_tensor(size, dtype=torch.float32)
|
|
94
115
|
|
|
95
116
|
def build(self, PD_threshold: float = 1e-6,
|
|
96
117
|
voxel_shape: Literal["sinc", "box", "point"] = "sinc"
|
|
@@ -115,6 +136,7 @@ class VoxelGridPhantom:
|
|
|
115
136
|
self.size[2] *
|
|
116
137
|
torch.fft.fftshift(torch.fft.fftfreq(
|
|
117
138
|
int(shape[2]), device=self.PD.device)),
|
|
139
|
+
indexing="ij"
|
|
118
140
|
)
|
|
119
141
|
|
|
120
142
|
voxel_pos = torch.stack([
|
|
@@ -143,13 +165,18 @@ class VoxelGridPhantom:
|
|
|
143
165
|
self.coil_sens[:, mask],
|
|
144
166
|
self.size,
|
|
145
167
|
voxel_pos,
|
|
146
|
-
torch.
|
|
168
|
+
torch.as_tensor(shape, device=self.PD.device) / 2 / self.size,
|
|
147
169
|
dephasing_func,
|
|
148
170
|
recover_func=lambda data: recover(mask, data)
|
|
149
171
|
)
|
|
150
|
-
|
|
172
|
+
|
|
151
173
|
@classmethod
|
|
152
174
|
def brainweb(cls, file_name: str) -> VoxelGridPhantom:
|
|
175
|
+
warn("brainweb() will be removed in a future version, use load() instead", DeprecationWarning)
|
|
176
|
+
return cls.load(file_name)
|
|
177
|
+
|
|
178
|
+
@classmethod
|
|
179
|
+
def load(cls, file_name: str) -> VoxelGridPhantom:
|
|
153
180
|
"""Load a phantom from data produced by `generate_maps.py`."""
|
|
154
181
|
with np.load(file_name) as data:
|
|
155
182
|
T1 = torch.tensor(data['T1_map'])
|
|
@@ -158,28 +185,21 @@ class VoxelGridPhantom:
|
|
|
158
185
|
PD = torch.tensor(data['PD_map'])
|
|
159
186
|
D = torch.tensor(data['D_map'])
|
|
160
187
|
try:
|
|
161
|
-
|
|
188
|
+
B0 = torch.tensor(data['B0_map'])
|
|
189
|
+
B1 = torch.tensor(data['B1_map'])
|
|
190
|
+
except KeyError:
|
|
191
|
+
B0, B1 = generate_B0_B1(PD)
|
|
192
|
+
try:
|
|
193
|
+
size = torch.tensor(data['FOV'], dtype=torch.float)
|
|
162
194
|
except KeyError:
|
|
163
195
|
size = torch.tensor([0.192, 0.192, 0.192])
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
168
|
-
torch.linspace(-1, 1, PD.shape[0]),
|
|
169
|
-
torch.linspace(-1, 1, PD.shape[1]),
|
|
170
|
-
torch.linspace(-1, 1, PD.shape[2]),
|
|
171
|
-
indexing="ij"
|
|
172
|
-
)
|
|
173
|
-
B1 = torch.exp(-(0.4*x_pos**2 + 0.2*y_pos**2 + 0.3*z_pos**2))
|
|
174
|
-
dist2 = (0.4*x_pos**2 + 0.2*(y_pos - 0.7)**2 + 0.3*z_pos**2)
|
|
175
|
-
B0 = 7 / (0.05 + dist2) - 45 / (0.3 + dist2)
|
|
176
|
-
# Normalize such that the weighted average is 0 or 1
|
|
177
|
-
weight = PD / PD.sum()
|
|
178
|
-
B0 -= (B0 * weight).sum()
|
|
179
|
-
B1 /= (B1 * weight).sum()
|
|
196
|
+
|
|
197
|
+
if B1.ndim == 3:
|
|
198
|
+
# Add coil-dimension
|
|
199
|
+
B1 = B1[None, ...]
|
|
180
200
|
|
|
181
201
|
return cls(
|
|
182
|
-
PD, T1, T2, T2dash, D, B0, B1
|
|
202
|
+
PD, T1, T2, T2dash, D, B0, B1,
|
|
183
203
|
torch.ones(1, *PD.shape), size,
|
|
184
204
|
)
|
|
185
205
|
|
|
@@ -189,6 +209,7 @@ class VoxelGridPhantom:
|
|
|
189
209
|
file_name: str,
|
|
190
210
|
T2dash: float | torch.Tensor = 0.03,
|
|
191
211
|
D: float | torch.Tensor = 1.0,
|
|
212
|
+
size = [0.2, 0.2, 8e-3]
|
|
192
213
|
) -> VoxelGridPhantom:
|
|
193
214
|
"""Load a :class:`VoxelGridPhantom` from a .mat file.
|
|
194
215
|
|
|
@@ -225,15 +246,15 @@ class VoxelGridPhantom:
|
|
|
225
246
|
"""
|
|
226
247
|
data = _load_tensor_from_mat(file_name)
|
|
227
248
|
|
|
228
|
-
# TODO: Better handling of data not included in .mat
|
|
229
249
|
if data.ndim < 2 or data.shape[-1] != 5:
|
|
230
250
|
raise Exception(
|
|
231
251
|
f"Expected a tensor with shape [..., 5], "
|
|
232
252
|
f"but got {list(data.shape)}"
|
|
233
253
|
)
|
|
234
254
|
|
|
235
|
-
|
|
236
|
-
|
|
255
|
+
if data.ndim == 3:
|
|
256
|
+
# Expand to 3D: [x, y, i] -> [x, y, z, i]
|
|
257
|
+
data = data.unsqueeze(2)
|
|
237
258
|
|
|
238
259
|
if isinstance(T2dash, float):
|
|
239
260
|
T2dash = torch.full_like(data[..., 0], T2dash)
|
|
@@ -249,7 +270,7 @@ class VoxelGridPhantom:
|
|
|
249
270
|
data[..., 3], # B0
|
|
250
271
|
data[..., 4][None, ...], # B1
|
|
251
272
|
coil_sens=torch.ones(1, *data.shape[:-1]),
|
|
252
|
-
size=torch.
|
|
273
|
+
size=torch.as_tensor(size),
|
|
253
274
|
)
|
|
254
275
|
|
|
255
276
|
def slices(self, slices: list[int]) -> VoxelGridPhantom:
|
|
@@ -387,35 +408,36 @@ class VoxelGridPhantom:
|
|
|
387
408
|
plt.figure(figsize=(12, 10))
|
|
388
409
|
plt.subplot(331)
|
|
389
410
|
plt.title("PD")
|
|
390
|
-
|
|
411
|
+
|
|
412
|
+
imshow(self.PD, vmin=0)
|
|
391
413
|
plt.colorbar()
|
|
392
414
|
plt.subplot(332)
|
|
393
415
|
plt.title("T1")
|
|
394
|
-
|
|
416
|
+
imshow(self.T1, vmin=0)
|
|
395
417
|
plt.colorbar()
|
|
396
418
|
plt.subplot(333)
|
|
397
419
|
plt.title("T2")
|
|
398
|
-
|
|
420
|
+
imshow(self.T2, vmin=0)
|
|
399
421
|
plt.colorbar()
|
|
400
422
|
plt.subplot(334)
|
|
401
423
|
plt.title("T2'")
|
|
402
|
-
|
|
424
|
+
imshow(self.T2dash, vmin=0)
|
|
403
425
|
plt.colorbar()
|
|
404
426
|
plt.subplot(335)
|
|
405
427
|
plt.title("D")
|
|
406
|
-
|
|
428
|
+
imshow(self.D, vmin=0)
|
|
407
429
|
plt.colorbar()
|
|
408
430
|
plt.subplot(337)
|
|
409
431
|
plt.title("B0")
|
|
410
|
-
|
|
432
|
+
imshow(self.B0)
|
|
411
433
|
plt.colorbar()
|
|
412
434
|
plt.subplot(338)
|
|
413
435
|
plt.title("B1")
|
|
414
|
-
|
|
436
|
+
imshow(self.B1[0, ...])
|
|
415
437
|
plt.colorbar()
|
|
416
438
|
plt.subplot(339)
|
|
417
439
|
plt.title("coil sens")
|
|
418
|
-
|
|
440
|
+
imshow(self.coil_sens[0, ...], vmin=0)
|
|
419
441
|
plt.colorbar()
|
|
420
442
|
plt.show()
|
|
421
443
|
|
MRzeroCore/sequence.py
CHANGED
|
@@ -390,7 +390,7 @@ class Sequence(list):
|
|
|
390
390
|
>>> contrast_reco = reco(signal[mask], kspace[mask])
|
|
391
391
|
"""
|
|
392
392
|
return torch.cat(
|
|
393
|
-
[rep.adc_usage[rep.adc_usage
|
|
393
|
+
[rep.adc_usage[rep.adc_usage > 0] == contrast for rep in self]
|
|
394
394
|
)
|
|
395
395
|
|
|
396
396
|
def get_contrasts(self) -> list[int]:
|
|
@@ -16,6 +16,7 @@ Note that this still uses box shaped voxels, which is generally discouraged.
|
|
|
16
16
|
"""
|
|
17
17
|
|
|
18
18
|
from __future__ import annotations
|
|
19
|
+
from typing import Literal
|
|
19
20
|
import torch
|
|
20
21
|
from numpy import pi
|
|
21
22
|
|
|
@@ -26,9 +27,14 @@ from ..phantom.sim_data import SimData
|
|
|
26
27
|
def isochromat_sim(seq: Sequence, data: SimData, spin_count: int,
|
|
27
28
|
perfect_spoiling=False,
|
|
28
29
|
print_progress: bool = True,
|
|
30
|
+
spin_dist: Literal["r2", "rand"] = "rand",
|
|
31
|
+
r2_seed = None
|
|
29
32
|
) -> torch.Tensor:
|
|
30
|
-
"""
|
|
31
|
-
|
|
33
|
+
"""Simulate ``seq`` on ``data`` with ``spin_count`` spins per voxel.
|
|
34
|
+
|
|
35
|
+
The intra-voxel spin distribution is randomized, except if
|
|
36
|
+
`spin_dist = "r2"` and a fixed `r2_seed` are chosen. For a deterministic
|
|
37
|
+
distribution of spins, call `torch.manual_seed()` before this function.
|
|
32
38
|
|
|
33
39
|
Parameters
|
|
34
40
|
----------
|
|
@@ -40,7 +46,16 @@ def isochromat_sim(seq: Sequence, data: SimData, spin_count: int,
|
|
|
40
46
|
Number of spins used for simulation
|
|
41
47
|
perfect_spoiling: bool
|
|
42
48
|
If ``True``, the transversal magnetization is set to zero on excitation
|
|
43
|
-
|
|
49
|
+
print_progress: bool
|
|
50
|
+
If ``True``, the currently simulated repetition is printed
|
|
51
|
+
spin_dist: "r2" | "rand"
|
|
52
|
+
Use either a golden-ratio pseudo-random blue-noise like or
|
|
53
|
+
a white-noise like intra-voxel distribution of spins
|
|
54
|
+
r2_seed: None | torch.Tensor
|
|
55
|
+
The seed and position of the first spin for the blue-noise like spin
|
|
56
|
+
distribution. If ``None``, a random position is chosen. Expects a
|
|
57
|
+
tensor with 3 floats in the range of ``[0, 1]``
|
|
58
|
+
|
|
44
59
|
Returns
|
|
45
60
|
-------
|
|
46
61
|
torch.Tensor
|
|
@@ -54,16 +69,25 @@ def isochromat_sim(seq: Sequence, data: SimData, spin_count: int,
|
|
|
54
69
|
# Fallback voxel size
|
|
55
70
|
voxel_size = torch.tensor([0.1, 0.1, 0.1], device=data.device)
|
|
56
71
|
|
|
57
|
-
|
|
58
|
-
|
|
59
|
-
|
|
60
|
-
|
|
61
|
-
|
|
62
|
-
|
|
63
|
-
|
|
64
|
-
|
|
65
|
-
|
|
66
|
-
|
|
72
|
+
if spin_dist == "rand":
|
|
73
|
+
spin_pos = torch.rand(spin_count, 3)
|
|
74
|
+
elif spin_dist == "r2":
|
|
75
|
+
if r2_seed is None:
|
|
76
|
+
r2_seed = torch.rand(3)
|
|
77
|
+
|
|
78
|
+
# 3 dimensional R2 sequence for intravoxel spin distribution
|
|
79
|
+
g = 1.22074408460575947536 # 3D
|
|
80
|
+
# g = 1.32471795724474602596 # 2D
|
|
81
|
+
a = 1.0 / torch.tensor([g**1, g**2, g**3], device=data.device)
|
|
82
|
+
indices = torch.arange(spin_count, device=data.device)
|
|
83
|
+
spin_pos = torch.stack([
|
|
84
|
+
(r2_seed[0] + a[0] * indices) % 1,
|
|
85
|
+
(r2_seed[1] + a[1] * indices) % 1,
|
|
86
|
+
(r2_seed[2] + a[2] * indices) % 1
|
|
87
|
+
])
|
|
88
|
+
else:
|
|
89
|
+
raise ValueError("unexpected spin_dist", spin_dist)
|
|
90
|
+
|
|
67
91
|
# spin_pos = torch.rand_like(spin_pos) # Use white noise
|
|
68
92
|
spin_pos = 2 * pi * (spin_pos - 0.5) * voxel_size.unsqueeze(1)
|
|
69
93
|
|
|
@@ -12,12 +12,25 @@ import numpy as np
|
|
|
12
12
|
# has can extract all information it wants during simulation.
|
|
13
13
|
|
|
14
14
|
|
|
15
|
+
def rigid_motion(voxel_pos, motion_func):
|
|
16
|
+
"""Shape of returned tensor: events x voxels x 3"""
|
|
17
|
+
def voxel_motion(time):
|
|
18
|
+
# rot: events x 3 x 3, offset: events x 3
|
|
19
|
+
rot, offset = motion_func(time)
|
|
20
|
+
rot = rot.to(device=voxel_pos.device)
|
|
21
|
+
offset = offset.to(device=voxel_pos.device)
|
|
22
|
+
return torch.einsum("vi, eij -> evj", voxel_pos, rot) + offset[:, None, :]
|
|
23
|
+
|
|
24
|
+
return voxel_motion
|
|
25
|
+
|
|
26
|
+
|
|
15
27
|
def execute_graph(graph: Graph,
|
|
16
28
|
seq: Sequence,
|
|
17
29
|
data: SimData,
|
|
18
30
|
min_emitted_signal: float = 1e-2,
|
|
19
31
|
min_latent_signal: float = 1e-2,
|
|
20
32
|
print_progress: bool = True,
|
|
33
|
+
return_mag_p: int | bool | None = None,
|
|
21
34
|
return_mag_z: int | bool | None = None,
|
|
22
35
|
) -> torch.Tensor | list:
|
|
23
36
|
"""Calculate the signal of the sequence by executing the phase graph.
|
|
@@ -50,6 +63,14 @@ def execute_graph(graph: Graph,
|
|
|
50
63
|
The longitudinal magnetisation of the specified or all repetition(s).
|
|
51
64
|
|
|
52
65
|
"""
|
|
66
|
+
# This is a function that maps time to voxel positions.
|
|
67
|
+
# If it is defined, motion is simulated, otherwise the static data.voxel_pos is used
|
|
68
|
+
t0 = 0
|
|
69
|
+
voxel_pos_func = data.voxel_motion
|
|
70
|
+
if voxel_pos_func is None and data.phantom_motion is not None:
|
|
71
|
+
voxel_pos_func = rigid_motion(data.voxel_pos, data.phantom_motion)
|
|
72
|
+
|
|
73
|
+
|
|
53
74
|
if seq.normalized_grads:
|
|
54
75
|
grad_scale = 1 / data.size
|
|
55
76
|
else:
|
|
@@ -71,6 +92,7 @@ def execute_graph(graph: Graph,
|
|
|
71
92
|
# Calculate kt_vec ourselves for autograd
|
|
72
93
|
graph[0][0].kt_vec = torch.zeros(4, device=data.device)
|
|
73
94
|
|
|
95
|
+
mag_p = []
|
|
74
96
|
mag_z = []
|
|
75
97
|
for i, (dists, rep) in enumerate(zip(graph[1:], seq)):
|
|
76
98
|
if print_progress:
|
|
@@ -85,7 +107,7 @@ def execute_graph(graph: Graph,
|
|
|
85
107
|
B1 = data.B1.sum(0)
|
|
86
108
|
else:
|
|
87
109
|
assert shim_array.shape[0] == data.B1.shape[0]
|
|
88
|
-
shim = shim_array[:, 0] * torch.exp(1j * shim_array[:, 1])
|
|
110
|
+
shim = shim_array[:, 0] * torch.exp(-1j * shim_array[:, 1])
|
|
89
111
|
B1 = (data.B1 * shim[:, None]).sum(0)
|
|
90
112
|
|
|
91
113
|
angle = angle * B1.abs()
|
|
@@ -118,7 +140,8 @@ def execute_graph(graph: Graph,
|
|
|
118
140
|
raise ValueError(f"Unknown transform {ancestor[0]}")
|
|
119
141
|
|
|
120
142
|
# shape: events x coils
|
|
121
|
-
|
|
143
|
+
adc = rep.adc_usage > 0
|
|
144
|
+
rep_sig = torch.zeros(adc.sum(), coil_count,
|
|
122
145
|
dtype=torch.cfloat, device=data.device)
|
|
123
146
|
|
|
124
147
|
# shape: events x 4
|
|
@@ -134,6 +157,18 @@ def execute_graph(graph: Graph,
|
|
|
134
157
|
# Use the same adc phase for all coils
|
|
135
158
|
adc_rot = torch.exp(1j * rep.adc_phase).unsqueeze(1)
|
|
136
159
|
|
|
160
|
+
# Calculate the additional phase carried of voxels because of motion
|
|
161
|
+
motion_phase = 0
|
|
162
|
+
if voxel_pos_func is not None:
|
|
163
|
+
time = t0 + torch.cat([torch.zeros(1, device=data.device), trajectory[:, 3]])
|
|
164
|
+
# Shape: events x voxels x 3
|
|
165
|
+
voxel_traj = voxel_pos_func((time[:-1] + time[1:]) / 2) - data.voxel_pos[None, :, :]
|
|
166
|
+
# Shape: events x voxels
|
|
167
|
+
motion_phase = torch.einsum("evi, ei -> ev", voxel_traj, rep.gradm * grad_scale[None, :]).cumsum(0)
|
|
168
|
+
t0 += total_time
|
|
169
|
+
|
|
170
|
+
mag_p_rep = []
|
|
171
|
+
mag_p.append(mag_p_rep)
|
|
137
172
|
mag_z_rep = []
|
|
138
173
|
mag_z.append(mag_z_rep)
|
|
139
174
|
for dist in dists:
|
|
@@ -148,6 +183,8 @@ def execute_graph(graph: Graph,
|
|
|
148
183
|
continue # skip dists for which no ancestors were simulated
|
|
149
184
|
|
|
150
185
|
dist.mag = sum([calc_mag(ancestor) for ancestor in ancestors])
|
|
186
|
+
if dist.dist_type == '+' and return_mag_p in [i, True]:
|
|
187
|
+
mag_p_rep.append(dist.mag)
|
|
151
188
|
if dist.dist_type in ['z0', 'z'] and return_mag_z in [i, True]:
|
|
152
189
|
mag_z_rep.append(dist.mag)
|
|
153
190
|
|
|
@@ -169,7 +206,9 @@ def execute_graph(graph: Graph,
|
|
|
169
206
|
k1[0, :] = dist.kt_vec[:3]
|
|
170
207
|
k1[1:, :] = k2[:-1, :]
|
|
171
208
|
# Integrate over each event to get b factor (lin. interp. grad)
|
|
172
|
-
|
|
209
|
+
# Gradients are in rotations / meter, but we need rad / meter,
|
|
210
|
+
# as integrating over exp(-ikr) assumes that kr is a phase in rad
|
|
211
|
+
b = 1/3 * (2 * torch.pi)**2 * dt * (k1**2 + k1*k2 + k2**2).sum(1)
|
|
173
212
|
# shape: events x voxels
|
|
174
213
|
diffusion = torch.exp(-1e-9 * data.D * torch.cumsum(b, 0)[:, None])
|
|
175
214
|
|
|
@@ -186,21 +225,27 @@ def execute_graph(graph: Graph,
|
|
|
186
225
|
# just by switching 2pi * (pos @ grad) to 2pi * pos @ grad
|
|
187
226
|
|
|
188
227
|
if dist.dist_type == '+' and dist.emitted_signal >= min_emitted_signal:
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
228
|
+
adc_dist_traj = dist_traj[adc, :]
|
|
229
|
+
if isinstance(motion_phase, torch.Tensor):
|
|
230
|
+
adc_motion_phase = motion_phase[adc, :]
|
|
231
|
+
else:
|
|
232
|
+
adc_motion_phase = motion_phase
|
|
233
|
+
|
|
234
|
+
T2 = torch.exp(-trajectory[adc, 3:] / torch.abs(data.T2))
|
|
235
|
+
T2dash = torch.exp(-torch.abs(adc_dist_traj[:, 3:]) / torch.abs(data.T2dash))
|
|
192
236
|
rot = torch.exp(2j * np.pi * (
|
|
193
|
-
(
|
|
194
|
-
(
|
|
237
|
+
(adc_dist_traj[:, 3:] * data.B0) +
|
|
238
|
+
(adc_dist_traj[:, :3] @ data.voxel_pos.T) +
|
|
239
|
+
adc_motion_phase
|
|
195
240
|
))
|
|
196
241
|
dephasing = data.dephasing_func(
|
|
197
|
-
|
|
242
|
+
adc_dist_traj[:, :3], data.nyquist)[:, None]
|
|
198
243
|
|
|
199
244
|
# shape: events x voxels
|
|
200
245
|
transverse_mag = (
|
|
201
246
|
# Add event dimension
|
|
202
247
|
1.41421356237 * dist.mag.unsqueeze(0)
|
|
203
|
-
* rot * T2 * T2dash * diffusion * dephasing
|
|
248
|
+
* rot * T2 * T2dash * diffusion[adc, :] * dephasing
|
|
204
249
|
)
|
|
205
250
|
|
|
206
251
|
# (events x voxels) @ (voxels x coils) = (events x coils)
|
|
@@ -208,8 +253,10 @@ def execute_graph(graph: Graph,
|
|
|
208
253
|
rep_sig += dist_signal
|
|
209
254
|
|
|
210
255
|
if dist.dist_type == '+':
|
|
211
|
-
# Diffusion for whole trajectory + T2 relaxation
|
|
212
|
-
dist.mag = dist.mag * r2 * diffusion[-1, :]
|
|
256
|
+
# Diffusion for whole trajectory + T2 relaxation + final phase carried by motion
|
|
257
|
+
dist.mag = dist.mag * r2 * diffusion[-1, :]
|
|
258
|
+
if isinstance(motion_phase, torch.Tensor):
|
|
259
|
+
dist.mag = dist.mag * torch.exp(2j * np.pi * motion_phase[-1, :])
|
|
213
260
|
dist.kt_vec = dist_traj[-1]
|
|
214
261
|
else: # z or z0
|
|
215
262
|
k = torch.linalg.vector_norm(dist.kt_vec[:3])
|
|
@@ -218,7 +265,7 @@ def execute_graph(graph: Graph,
|
|
|
218
265
|
if dist.dist_type == 'z0':
|
|
219
266
|
dist.mag = dist.mag + 1 - r1
|
|
220
267
|
|
|
221
|
-
rep_sig *= adc_rot
|
|
268
|
+
rep_sig *= adc_rot[adc]
|
|
222
269
|
|
|
223
270
|
# Remove ancestors to save memory as we don't need them anymore.
|
|
224
271
|
# When running with autograd this doesn't change memory consumption
|
|
@@ -232,11 +279,14 @@ def execute_graph(graph: Graph,
|
|
|
232
279
|
if print_progress:
|
|
233
280
|
print(" - done")
|
|
234
281
|
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
282
|
+
measured = torch.cat(signal)
|
|
283
|
+
|
|
284
|
+
if return_mag_p is not None:
|
|
285
|
+
if return_mag_z is not None:
|
|
286
|
+
return measured, mag_p, mag_z
|
|
287
|
+
else:
|
|
288
|
+
return measured, mag_p
|
|
289
|
+
elif return_mag_z is not None:
|
|
240
290
|
return measured, mag_z
|
|
241
291
|
else:
|
|
242
292
|
return measured
|
|
@@ -1,4 +1,5 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
|
+
from warnings import warn
|
|
2
3
|
import torch
|
|
3
4
|
import numpy as np
|
|
4
5
|
import matplotlib.pyplot as plt
|
|
@@ -76,6 +77,9 @@ def compute_graph_ext(
|
|
|
76
77
|
torch.cos(angle),
|
|
77
78
|
torch.sin(angle/2)**2
|
|
78
79
|
], dim=1).type(torch.float32)
|
|
80
|
+
|
|
81
|
+
if any(rep.pulse.angle > 2*np.pi for rep in seq):
|
|
82
|
+
warn("Some flip angles are > 360°, inhomogeneities produced by extra rotations are ignored by the pre-pass B1 estimation")
|
|
79
83
|
|
|
80
84
|
return Graph(_prepass.compute_graph(
|
|
81
85
|
seq,
|
|
@@ -107,7 +111,8 @@ class Graph(list):
|
|
|
107
111
|
y-position of a state in the scatter plot
|
|
108
112
|
color : str
|
|
109
113
|
Use one of ``['abs(mag)', 'phase(mag)', 'latent signal', 'signal',
|
|
110
|
-
'emitted signal']``
|
|
114
|
+
'latent signal unormalized', 'emitted signal']``
|
|
115
|
+
as the color of a state in the scatter plot
|
|
111
116
|
log_color : bool
|
|
112
117
|
If true, use the logarithm of the chosen property for coloring
|
|
113
118
|
"""
|
|
@@ -123,8 +128,12 @@ class Graph(list):
|
|
|
123
128
|
value = state.latent_signal
|
|
124
129
|
elif color == "signal":
|
|
125
130
|
value = state.signal
|
|
131
|
+
elif color == "latent signal unormalized":
|
|
132
|
+
value = state.latent_signal_unormalized
|
|
126
133
|
elif color == "emitted signal":
|
|
127
134
|
value = state.emitted_signal
|
|
135
|
+
else:
|
|
136
|
+
raise AttributeError(f"Unknown property color={color}")
|
|
128
137
|
if log_color:
|
|
129
138
|
value = np.log10(np.abs(value) + 1e-7)
|
|
130
139
|
return value
|
MRzeroCore/util.py
CHANGED
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
import os
|
|
2
2
|
import time
|
|
3
|
-
from typing import Literal
|
|
3
|
+
from typing import Literal, Union
|
|
4
4
|
import torch
|
|
5
5
|
import numpy as np
|
|
6
6
|
|
|
@@ -192,8 +192,7 @@ def pulseq_plot(seq: pp.Sequence,
|
|
|
192
192
|
if is_valid:
|
|
193
193
|
if getattr(block, 'adc', None) is not None:
|
|
194
194
|
adc = block.adc
|
|
195
|
-
t = adc.delay +
|
|
196
|
-
for x in range(0, int(adc.num_samples))]
|
|
195
|
+
t = [(adc.delay + x * adc.dwell) for x in range(0, int(adc.num_samples))]
|
|
197
196
|
sp11.plot((t0 + t), np.zeros(len(t)), 'rx')
|
|
198
197
|
# >>>> Changed: store adc samples <<<<
|
|
199
198
|
t_adc = np.append(t_adc, t0 + t)
|
|
@@ -314,3 +313,32 @@ def pulseq_plot(seq: pp.Sequence,
|
|
|
314
313
|
|
|
315
314
|
# New: return plot axes and adc time points
|
|
316
315
|
return sp11, t_adc
|
|
316
|
+
|
|
317
|
+
|
|
318
|
+
def imshow(data: Union[np.ndarray, torch.Tensor], *args, **kwargs):
|
|
319
|
+
"""Alternative to matplotlib's `imshow`.
|
|
320
|
+
|
|
321
|
+
This function applies quadratic coil combine on 4D data and prints 3D
|
|
322
|
+
data as a grid of slices. Also prints x-axis horizontal and y vertiacl.
|
|
323
|
+
|
|
324
|
+
Assumes data to be indexed [c, x, y, z]"""
|
|
325
|
+
data = torch.as_tensor(data).detach().cpu()
|
|
326
|
+
assert 2 <= data.ndim <= 4
|
|
327
|
+
|
|
328
|
+
# Coil combine 4D data
|
|
329
|
+
if data.ndim == 4:
|
|
330
|
+
data = (data.abs()**2).sum(0)**0.5
|
|
331
|
+
|
|
332
|
+
# Shape 3D data into grid
|
|
333
|
+
if data.ndim == 3:
|
|
334
|
+
rows = int(np.floor(data.shape[2]**0.5))
|
|
335
|
+
cols = int(np.ceil(data.shape[2] / rows))
|
|
336
|
+
|
|
337
|
+
tmp = data
|
|
338
|
+
data = torch.zeros((tmp.shape[0] * cols, tmp.shape[1] * rows), dtype=tmp.dtype)
|
|
339
|
+
for i in range(tmp.shape[2]):
|
|
340
|
+
x = (i % cols)*tmp.shape[0]
|
|
341
|
+
y = ((rows * cols - i - 1) // cols)*tmp.shape[1]
|
|
342
|
+
data[x:x+tmp.shape[0], y:y+tmp.shape[1]] = tmp[:, :, i]
|
|
343
|
+
|
|
344
|
+
plt.imshow(data.T, *args, origin="lower", **kwargs)
|
|
@@ -1,18 +1,18 @@
|
|
|
1
|
-
Metadata-Version: 2.
|
|
1
|
+
Metadata-Version: 2.3
|
|
2
2
|
Name: MRzeroCore
|
|
3
|
-
Version: 0.
|
|
3
|
+
Version: 0.3.1
|
|
4
4
|
Classifier: Programming Language :: Rust
|
|
5
5
|
Classifier: Programming Language :: Python :: Implementation :: CPython
|
|
6
6
|
Classifier: Programming Language :: Python :: Implementation :: PyPy
|
|
7
7
|
Classifier: License :: OSI Approved :: GNU Affero General Public License v3
|
|
8
|
-
Requires-Dist: torch>=1.12
|
|
8
|
+
Requires-Dist: torch >=1.12
|
|
9
9
|
Requires-Dist: pypulseq
|
|
10
|
-
Requires-Dist: matplotlib>=3.5
|
|
11
|
-
Requires-Dist: scipy>=1.7
|
|
12
|
-
Requires-Dist: requests>=2.20
|
|
10
|
+
Requires-Dist: matplotlib >=3.5
|
|
11
|
+
Requires-Dist: scipy >=1.7
|
|
12
|
+
Requires-Dist: requests >=2.20
|
|
13
13
|
Requires-Dist: scikit-image
|
|
14
14
|
Requires-Dist: torchkbnufft
|
|
15
|
-
Requires-Dist: pydisseqt>=0.1.4
|
|
15
|
+
Requires-Dist: pydisseqt >=0.1.4
|
|
16
16
|
License-File: LICENSE
|
|
17
17
|
Summary: Core functionality of MRzero
|
|
18
18
|
Author-email: Jonathan Endres <jonathan.endres@uk-erlangen.de>
|
|
@@ -1,13 +1,13 @@
|
|
|
1
|
-
MRzeroCore-0.
|
|
2
|
-
MRzeroCore-0.
|
|
3
|
-
MRzeroCore-0.
|
|
1
|
+
MRzeroCore-0.3.1.dist-info/METADATA,sha256=VkmVanzGblXg8alZCu4Cb7EowUR2g2VdeDY2jDc5l0c,3805
|
|
2
|
+
MRzeroCore-0.3.1.dist-info/WHEEL,sha256=p0nSOBx01acCQRB4KO5wmZww0xaSuuHl1nwKWZurtTU,94
|
|
3
|
+
MRzeroCore-0.3.1.dist-info/license_files/LICENSE,sha256=rd_IFJ484uAluv8CigP2CpXg4l2GJLLKENqB6-RXPp4,35112
|
|
4
4
|
MRzeroCore/phantom/brainweb/brainweb_data.json,sha256=El9J5dfEIwGi4_SoRif6ic6IVSMKh01jK0hNRbBYNqA,1794
|
|
5
5
|
MRzeroCore/phantom/brainweb/brainweb_data_sources.txt,sha256=Sh6NFLU1bhdaD0pp12D_Hc70cA_UC1s7KdHcuoGTKuA,1853
|
|
6
6
|
MRzeroCore/phantom/brainweb/output/.gitkeep,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
|
7
|
-
MRzeroCore/phantom/brainweb/__init__.py,sha256=
|
|
8
|
-
MRzeroCore/phantom/custom_voxel_phantom.py,sha256=
|
|
9
|
-
MRzeroCore/phantom/sim_data.py,sha256=
|
|
10
|
-
MRzeroCore/phantom/voxel_grid_phantom.py,sha256
|
|
7
|
+
MRzeroCore/phantom/brainweb/__init__.py,sha256=jrM_YqK5twOLMDLIYRtCMywmmQdyPyY5qmKb49QS5-Y,6942
|
|
8
|
+
MRzeroCore/phantom/custom_voxel_phantom.py,sha256=oWWt-YXHmhG-wW3DxcrErWRA2szrjWpRaHRFl5Wi9RA,10427
|
|
9
|
+
MRzeroCore/phantom/sim_data.py,sha256=IOq_3vaPfzwAxSLvfu6mIJGxphLiFHTQ8kpGUeKrsRc,7397
|
|
10
|
+
MRzeroCore/phantom/voxel_grid_phantom.py,sha256=-JjxTMqrUDHBI4va_oV36pFUFqjFq9tIH7TvsfsovqY,17269
|
|
11
11
|
MRzeroCore/pulseq/exporter.py,sha256=RlIHkXNVmQy-ywixIPeWeICnb9jq4_1_9O8HujBigLs,17226
|
|
12
12
|
MRzeroCore/pulseq/exporter_v2.py,sha256=GVnhC6bmbxbYq4CFrBbL2nj98B_u3OVglOt50J19pkk,30554
|
|
13
13
|
MRzeroCore/pulseq/helpers.py,sha256=ZEtctXy7qm6BLexcwNRDd8xPSBFNiy5dUeifOo5bYkw,8774
|
|
@@ -26,11 +26,11 @@ MRzeroCore/pulseq/pulseq_loader/pulseq_file/__init__.py,sha256=VzN3j7pirpI8zJdfv
|
|
|
26
26
|
MRzeroCore/pulseq/pulseq_loader/spoiler.py,sha256=R6Hjfno3U-fuICjLUUxmDk_7-Z1HjZYmE55mXTWTIAI,1007
|
|
27
27
|
MRzeroCore/pulseq/pulseq_loader/__init__.py,sha256=6OLwC3k9LSjl8aRin9pV4-Rb-Q3YSbToayyeKbXnUdQ,2812
|
|
28
28
|
MRzeroCore/reconstruction.py,sha256=LGk5EdgQ4AkhWN-7Q81YMkIEEW0T9Q37O2Pat0d-d4o,4277
|
|
29
|
-
MRzeroCore/sequence.py,sha256=
|
|
30
|
-
MRzeroCore/simulation/isochromat_sim.py,sha256=
|
|
31
|
-
MRzeroCore/simulation/main_pass.py,sha256
|
|
32
|
-
MRzeroCore/simulation/pre_pass.py,sha256=
|
|
33
|
-
MRzeroCore/util.py,sha256=
|
|
34
|
-
MRzeroCore/__init__.py,sha256=
|
|
35
|
-
MRzeroCore/_prepass.pyd,sha256=
|
|
36
|
-
MRzeroCore-0.
|
|
29
|
+
MRzeroCore/sequence.py,sha256=uQgNcOICtcdSWXhv9SqBWu-FdJaK6Q6UgkIBHfULsqI,27646
|
|
30
|
+
MRzeroCore/simulation/isochromat_sim.py,sha256=jevQa38CZG_IodgU7LZMz4NKapm6iyENybaBr7dMl8w,10339
|
|
31
|
+
MRzeroCore/simulation/main_pass.py,sha256=-uZCAhyHbGTPEJb6b-XAh75V7hYj6SzHMcYkWwWDGZ4,12140
|
|
32
|
+
MRzeroCore/simulation/pre_pass.py,sha256=r_6XIWBEYW7YF2vM9LOR7Stu9nlcnT-CMM5tyjT7yOU,5589
|
|
33
|
+
MRzeroCore/util.py,sha256=qYrw88OJQ_x0iA0rtqT1RHbDk_929YLRt4tdW3m7YIY,12494
|
|
34
|
+
MRzeroCore/__init__.py,sha256=NYaEvn0xFI4EBX0PQrVnAB55VtocPcTkO0p1_P8foDI,770
|
|
35
|
+
MRzeroCore/_prepass.pyd,sha256=j_RTkM-BO8PCGMKR-YaRRNl89VG_L1iAH5pbq2FegOA,412160
|
|
36
|
+
MRzeroCore-0.3.1.dist-info/RECORD,,
|
|
File without changes
|