AOT-biomaps 2.1.3__py3-none-any.whl → 2.9.233__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of AOT-biomaps might be problematic. Click here for more details.

Files changed (50) hide show
  1. AOT_biomaps/AOT_Acoustic/AcousticEnums.py +64 -0
  2. AOT_biomaps/AOT_Acoustic/AcousticTools.py +221 -0
  3. AOT_biomaps/AOT_Acoustic/FocusedWave.py +244 -0
  4. AOT_biomaps/AOT_Acoustic/IrregularWave.py +66 -0
  5. AOT_biomaps/AOT_Acoustic/PlaneWave.py +43 -0
  6. AOT_biomaps/AOT_Acoustic/StructuredWave.py +392 -0
  7. AOT_biomaps/AOT_Acoustic/__init__.py +15 -0
  8. AOT_biomaps/AOT_Acoustic/_mainAcoustic.py +978 -0
  9. AOT_biomaps/AOT_Experiment/Focus.py +55 -0
  10. AOT_biomaps/AOT_Experiment/Tomography.py +505 -0
  11. AOT_biomaps/AOT_Experiment/__init__.py +9 -0
  12. AOT_biomaps/AOT_Experiment/_mainExperiment.py +532 -0
  13. AOT_biomaps/AOT_Optic/Absorber.py +24 -0
  14. AOT_biomaps/AOT_Optic/Laser.py +70 -0
  15. AOT_biomaps/AOT_Optic/OpticEnums.py +17 -0
  16. AOT_biomaps/AOT_Optic/__init__.py +10 -0
  17. AOT_biomaps/AOT_Optic/_mainOptic.py +204 -0
  18. AOT_biomaps/AOT_Recon/AOT_Optimizers/DEPIERRO.py +191 -0
  19. AOT_biomaps/AOT_Recon/AOT_Optimizers/LS.py +106 -0
  20. AOT_biomaps/AOT_Recon/AOT_Optimizers/MAPEM.py +456 -0
  21. AOT_biomaps/AOT_Recon/AOT_Optimizers/MLEM.py +333 -0
  22. AOT_biomaps/AOT_Recon/AOT_Optimizers/PDHG.py +221 -0
  23. AOT_biomaps/AOT_Recon/AOT_Optimizers/__init__.py +5 -0
  24. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/Huber.py +90 -0
  25. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/Quadratic.py +86 -0
  26. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/RelativeDifferences.py +59 -0
  27. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/__init__.py +3 -0
  28. AOT_biomaps/AOT_Recon/AlgebraicRecon.py +1023 -0
  29. AOT_biomaps/AOT_Recon/AnalyticRecon.py +154 -0
  30. AOT_biomaps/AOT_Recon/BayesianRecon.py +230 -0
  31. AOT_biomaps/AOT_Recon/DeepLearningRecon.py +35 -0
  32. AOT_biomaps/AOT_Recon/PrimalDualRecon.py +210 -0
  33. AOT_biomaps/AOT_Recon/ReconEnums.py +375 -0
  34. AOT_biomaps/AOT_Recon/ReconTools.py +273 -0
  35. AOT_biomaps/AOT_Recon/__init__.py +11 -0
  36. AOT_biomaps/AOT_Recon/_mainRecon.py +288 -0
  37. AOT_biomaps/Config.py +95 -0
  38. AOT_biomaps/Settings.py +45 -13
  39. AOT_biomaps/__init__.py +271 -18
  40. aot_biomaps-2.9.233.dist-info/METADATA +22 -0
  41. aot_biomaps-2.9.233.dist-info/RECORD +43 -0
  42. {AOT_biomaps-2.1.3.dist-info → aot_biomaps-2.9.233.dist-info}/WHEEL +1 -1
  43. AOT_biomaps/AOT_Acoustic.py +0 -1881
  44. AOT_biomaps/AOT_Experiment.py +0 -541
  45. AOT_biomaps/AOT_Optic.py +0 -219
  46. AOT_biomaps/AOT_Reconstruction.py +0 -1416
  47. AOT_biomaps/config.py +0 -54
  48. AOT_biomaps-2.1.3.dist-info/METADATA +0 -20
  49. AOT_biomaps-2.1.3.dist-info/RECORD +0 -11
  50. {AOT_biomaps-2.1.3.dist-info → aot_biomaps-2.9.233.dist-info}/top_level.txt +0 -0
@@ -1,1416 +0,0 @@
1
- import subprocess
2
- import os
3
- import numpy as np
4
- from abc import ABC, abstractmethod
5
- import enum
6
- import AOT_biomaps
7
- import matplotlib.pyplot as plt
8
- from tqdm import trange
9
- from .config import config
10
- import matplotlib.animation as animation
11
- from IPython.display import HTML
12
- import sys
13
- from datetime import datetime
14
- from tempfile import gettempdir
15
- from skimage.metrics import structural_similarity as ssim
16
- if config.get_process() == 'gpu':
17
- import torch
18
- try:
19
- from torch_scatter import scatter
20
- from torch_sparse import coalesce
21
- except ImportError:
22
- raise ImportError("torch_scatter and torch_sparse are required for GPU processing. Please install them using 'pip install torch-scatter torch-sparse' with correct link (follow instructions https://github.com/LucasDuclos/AcoustoOpticTomography/edit/main/README.md).")
23
- from numba import njit, prange
24
- import numba
25
- from skimage.metrics import structural_similarity as ssim
26
- from scipy.integrate import trapz
27
- from .AOT_Experiment import *
28
- from .AOT_Optic import *
29
- from .AOT_Acoustic import *
30
-
31
- class ReconType(enum.Enum):
32
- """
33
- Enum for different reconstruction types.
34
-
35
- Selection of reconstruction types:
36
- - Analytic: A reconstruction method based on analytical solutions.
37
- - Algebraic: A reconstruction method using algebraic techniques.
38
- - Iterative: A reconstruction method that iteratively refines the solution.
39
- - Bayesian: A reconstruction method based on Bayesian statistical approaches.
40
- - DeepLearning: A reconstruction method utilizing deep learning algorithms.
41
- """
42
-
43
- Analytic = 'analytic'
44
- """A reconstruction method based on analytical solutions."""
45
- Iterative = 'iterative'
46
- """A reconstruction method that iteratively refines the solution."""
47
- Bayesian = 'bayesian'
48
- """A reconstruction method based on Bayesian statistical approaches."""
49
- DeepLearning = 'deep_learning'
50
- """A reconstruction method utilizing deep learning algorithms."""
51
-
52
- class AnalyticType(enum.Enum):
53
- iFOURIER = 'iFOURIER'
54
- """
55
- This analytic reconstruction type uses the inverse Fourier transform to reconstruct the image.
56
- It is suitable for data that can be represented in the frequency domain.
57
- It is typically used for data that has been transformed into the frequency domain, such as in Fourier optics.
58
- It is not suitable for data that has not been transformed into the frequency domain.
59
- """
60
- iRADON = 'iRADON'
61
- """
62
- This analytic reconstruction type uses the inverse Radon transform to reconstruct the image.
63
- It is suitable for data that has been transformed into the Radon domain, such as in computed tomography (CT).
64
- It is typically used for data that has been transformed into the Radon domain, such as in CT.
65
- It is not suitable for data that has not been transformed into the Radon domain.
66
- """
67
-
68
- class IterativeType(enum.Enum):
69
- MLEM = 'MLEM'
70
- """
71
- This optimizer is the standard MLEM (for Maximum Likelihood Expectation Maximization).
72
- It is numerically implemented in the multiplicative form (as opposed to the gradient form).
73
- It truncates negative data to 0 to satisfy the positivity constraint.
74
- If subsets are used, it naturally becomes the OSEM optimizer.
75
-
76
- With transmission data, the log-converted pre-corrected data are used as in J. Nuyts et al:
77
- "Iterative reconstruction for helical CT: a simulation study", Phys. Med. Biol., vol. 43, pp. 729-737, 1998.
78
-
79
- The following options can be used (in this particular order when provided as a list):
80
- - Initial image value: Sets the uniform voxel value for the initial image.
81
- - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
82
- - Minimum image update: Sets the minimum of the image update factor under which it stays constant.
83
- (0 or a negative value means no minimum, thus allowing a 0 update).
84
- - Maximum image update: Sets the maximum of the image update factor over which it stays constant.
85
- (0 or a negative value means no maximum).
86
-
87
- This optimizer is compatible with both histogram and list-mode data.
88
- This optimizer is compatible with both emission and transmission data.
89
- """
90
- MLTR = 'MLTR'
91
- """
92
- This optimizer is a version of the MLTR algorithm implemented from equation 16 of the paper from K. Van Slambrouck and J. Nuyts:
93
- "Reconstruction scheme for accelerated maximum likelihood reconstruction: the patchwork structure",
94
- IEEE Trans. Nucl. Sci., vol. 61, pp. 173-81, 2014.
95
-
96
- An additional empiric relaxation factor has been added onto the additive update. Its value for the first and last updates
97
- can be parameterized. Its value for all updates in between is computed linearly from these first and last provided values.
98
-
99
- Subsets can be used.
100
-
101
- Options (in order when provided as a list):
102
- - Initial image value: Sets the uniform voxel value for the initial image.
103
- - Alpha ratio: Sets the ratio between exterior and interior of the cylindrical FOV alpha values (0 value means 0 inside exterior).
104
- - Initial relaxation factor: Sets the empiric multiplicative factor on the additive update used at the first update.
105
- - Final relaxation factor: Sets the empiric multiplicative factor on the additive update used at the last update.
106
- - Non-negativity constraint: 0 if no constraint or 1 to apply the constraint during the image update.
107
-
108
- This optimizer is only compatible with histogram data and transmission data.
109
- """
110
-
111
- NEGML = 'NEGML'
112
- """
113
- This optimizer is the NEGML algorithm from K. Van Slambrouck et al, IEEE TMI, Jan 2015, vol. 34, pp. 126-136.
114
-
115
- Subsets can be used. This implementation only considers the psi parameter, but not the alpha image design parameter,
116
- which is supposed to be 1 for all voxels. It implements equation 17 of the reference paper.
117
-
118
- This algorithm allows for negative image values.
119
-
120
- Options (in order when provided as a list):
121
- - Initial image value: Sets the uniform voxel value for the initial image.
122
- - Psi: Sets the psi parameter that sets the transition from Poisson to Gaussian statistics (must be positive).
123
- (If set to 0, then it is taken to infinity and implements equation 21 in the reference paper).
124
-
125
- This optimizer is only compatible with histogram data and emission data.
126
- """
127
-
128
- OSL = 'OSL'
129
- """
130
- This optimizer is the One-Step-Late algorithm from P. J. Green, IEEE TMI, Mar 1990, vol. 9, pp. 84-93.
131
-
132
- Subsets can be used as for OSEM. It accepts penalty terms that have a derivative order of at least one.
133
- Without penalty, it is strictly equivalent to the MLEM algorithm.
134
-
135
- It is numerically implemented in the multiplicative form (as opposed to the gradient form).
136
-
137
- Options (in order when provided as a list):
138
- - Initial image value: Sets the uniform voxel value for the initial image.
139
- - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
140
- - Minimum image update: Sets the minimum of the image update factor under which it stays constant (0 or a negative value
141
- means no minimum thus allowing a 0 update).
142
- - Maximum image update: Sets the maximum of the image update factor over which it stays constant (0 or a negative value means
143
- no maximum).
144
-
145
- This optimizer is compatible with both histogram and list-mode data, and with both emission and transmission data.
146
- """
147
-
148
- PPGMLEM = 'PPGML'
149
- """
150
- This optimizer is the Penalized Preconditioned Gradient algorithm from J. Nuyts et al, IEEE TNS, Feb 2002, vol. 49, pp. 56-60.
151
-
152
- It is a heuristic but effective gradient ascent algorithm for penalized maximum-likelihood reconstruction.
153
- It addresses the shortcoming of One-Step-Late when large penalty strengths can create numerical problems.
154
- Penalty terms must have a derivative order of at least two.
155
-
156
- Subsets can be used as for OSEM. Without penalty, it is equivalent to the gradient ascent form of the MLEM algorithm.
157
-
158
- Based on likelihood gradient and penalty, a multiplicative update factor is computed and its range is limited by provided parameters.
159
- Thus, negative values cannot occur and voxels cannot be trapped into 0 values, providing the first estimate is strictly positive.
160
-
161
- Options (in order when provided as a list):
162
- - Initial image value: Sets the uniform voxel value for the initial image.
163
- - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
164
- - Minimum image update: Sets the minimum of the image update factor under which it stays constant (0 or a negative value
165
- means no minimum thus allowing a 0 update).
166
- - Maximum image update: Sets the maximum of the image update factor over which it stays constant (0 or a negative value means
167
- no maximum).
168
-
169
- This optimizer is only compatible with histogram data and emission data.
170
- """
171
-
172
- AML = 'AML'
173
- """
174
- This optimizer is the AML algorithm derived from the AB-EMML of C. Byrne, Inverse Problems, 1998, vol. 14, pp. 1455-67.
175
-
176
- The bound B is taken to infinity, so only the bound A can be parameterized.
177
- This bound must be quantitative (same unit as the reconstructed image).
178
- It is provided as a single value and thus assuming a uniform bound.
179
-
180
- This algorithm allows for negative image values in case the provided bound is also negative.
181
-
182
- Subsets can be used.
183
-
184
- With a negative or null bound, this algorithm implements equation 6 of A. Rahmim et al, Phys. Med. Biol., 2012, vol. 57, pp. 733-55.
185
- If a positive bound is provided, then we suppose that the bound A is taken to minus infinity. In that case, this algorithm implements
186
- equation 22 of K. Van Slambrouck et al, IEEE TMI, Jan 2015, vol. 34, pp. 126-136.
187
-
188
- Options (in order when provided as a list):
189
- - Initial image value: Sets the uniform voxel value for the initial image.
190
- - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
191
- - Bound: Sets the bound parameter that shifts the Poisson law (quantitative, negative or null for standard AML and positive for infinite AML).
192
-
193
- This optimizer is only compatible with histogram data and emission data.
194
- """
195
-
196
- BSREM = 'BSREM'
197
- """
198
- This optimizer is the BSREM (for Block Sequential Regularized Expectation Maximization) algorithm, in development.
199
- It follows the definition of BSREM II in Ahn and Fessler 2003.
200
-
201
- This optimizer is the Block Sequential Regularized Expectation Maximization (BSREM) algorithm from S. Ahn and
202
- J. Fessler, IEEE TMI, May 2003, vol. 22, pp. 613-626. Its abbreviated name in this paper is BSREM-II.
203
-
204
- This algorithm is the only one to have proven convergence using subsets. Its implementation is entirely based
205
- on the reference paper. It may have numerical problems when a full field-of-view is used, because of the sharp
206
- sensitivity loss at the edges of the field-of-view. As it is simply based on the gradient, penalty terms must
207
- have a derivative order of at least one. Without penalty, it reduces to OSEM but where the sensitivity is not
208
- dependent on the current subset. This is a requirement of the algorithm, explaining why it starts by computing
209
- the global sensitivity before going through iterations. The algorithm is restricted to histograms.
210
-
211
- Options:
212
- - Initial image value: Sets the uniform voxel value for the initial image.
213
- - Minimum image value: Sets the minimum allowed image value (parameter 't' in the reference paper).
214
- - Maximum image value: Sets the maximum allowed image value (parameter 'U' in the reference paper).
215
- - Relaxation factor type: Type of relaxation factors (can be one of the following: 'classic').
216
-
217
- Relaxation factors of type 'classic' correspond to what was proposed in the reference paper in equation (31).
218
- This equation gives: alpha_n = alpha_0 / (gamma * iter_num + 1)
219
- The iteration number 'iter_num' is supposed to start at 0 so that for the first iteration, alpha_0 is used.
220
- This parameter can be provided using the following keyword: 'relaxation factor classic initial value'.
221
- The 'gamma' parameter can be provided using the following keyword: 'relaxation factor classic step size'.
222
-
223
- This optimizer is only compatible with histogram data and emission data.
224
- """
225
-
226
- DEPIERRO95 = 'DEPIERRO95'
227
- """
228
- This optimizer is based on the algorithm from A. De Pierro, IEEE TMI, vol. 14, pp. 132-137, 1995.
229
-
230
- This algorithm uses optimization transfer techniques to derive an exact and convergent algorithm
231
- for maximum likelihood reconstruction including a MRF penalty with different potential functions.
232
-
233
- The algorithm is convergent and is numerically robust to high penalty strength.
234
- It is strictly equivalent to MLEM without penalty, but can be unstable with extremely low penalty strength.
235
- Currently, it only implements the quadratic penalty.
236
-
237
- To be used, a MRF penalty still needs to be defined accordingly (at least to define the neighborhood).
238
- Subsets can be used as for OSEM, without proof of convergence however.
239
-
240
- The algorithm is compatible with list-mode or histogram data.
241
-
242
- Options (in order when provided as a list):
243
- - Initial image value: Sets the uniform voxel value for the initial image.
244
- - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
245
- - Minimum image update: Sets the minimum of the image update factor under which it stays constant (0 or a negative value
246
- means no minimum thus allowing a 0 update).
247
- - Maximum image update: Sets the maximum of the image update factor over which it stays constant (0 or a negative value means
248
- no maximum).
249
-
250
- This optimizer is compatible with both histogram and list-mode data, and only with emission data.
251
- """
252
-
253
- LDWB = 'LDWB'
254
- """
255
- This optimizer implements the standard Landweber algorithm for least-squares optimization.
256
-
257
- With transmission data, it uses the log-converted model to derive the update.
258
- Be aware that the relaxation parameter is not automatically set, so it often requires some
259
- trials and errors to find an optimal setting. Also, remember that this algorithm is particularly
260
- slow to converge.
261
-
262
- Options (in order when provided as a list):
263
- - Initial image value: Sets the uniform voxel value for the initial image.
264
- - Relaxation factor: Sets the relaxation factor applied to the update.
265
- - Non-negativity constraint: 0 if no constraint or 1 in order to apply the constraint during the image update.
266
-
267
- This optimizer is only compatible with histogram data, and with both emission and transmission data.
268
- """
269
-
270
- class PotentialType(enum.Enum):
271
- """The potential function actually penalizes the difference between the voxel of interest and a neighbor:
272
- p(u, v) = p(u - v)
273
-
274
- Descriptions of potential functions:
275
- - Quadratic: p(u, v) = 0.5 * (u - v)^2
276
- - Geman-McClure: p(u, v, d) = (u - v)^2 / (d^2 + (u - v)^2)
277
- - Hebert-Leahy: p(u, v, m) = log(1 + (u - v)^2 / m^2)
278
- - Green's log-cosh: p(u, v, d) = log(cosh((u - v) / d))
279
- - Huber piecewise: p(u, v, d) = d * |u - v| - 0.5 * d^2 if |u - v| > d, else 0.5 * (u - v)^2
280
- - Nuyts relative: p(u, v, g) = (u - v)^2 / (u + v + g * |u - v|)
281
- """
282
-
283
- QUADRATIC = 'QUADRATIC'
284
- """
285
- Quadratic potential:
286
- p(u, v) = 0.5 * (u - v)^2
287
-
288
- Reference: Geman and Geman, IEEE Trans. Pattern Anal. Machine Intell., vol. PAMI-6, pp. 721-741, 1984.
289
- """
290
-
291
- GEMAN_MCCLURE = 'GEMAN_MCCLURE'
292
- """
293
- Geman-McClure potential:
294
- p(u, v, d) = (u - v)^2 / (d^2 + (u - v)^2)
295
-
296
- The parameter 'd' can be set using the 'deltaGMC' keyword.
297
-
298
- Reference: Geman and McClure, Proc. Amer. Statist. Assoc., 1985.
299
- """
300
-
301
- HEBERT_LEAHY = 'HEBERT_LEAHY'
302
- """
303
- Hebert-Leahy potential:
304
- p(u, v, m) = log(1 + (u - v)^2 / m^2)
305
-
306
- The parameter 'm' can be set using the 'muHL' keyword.
307
-
308
- Reference: Hebert and Leahy, IEEE Trans. Med. Imaging, vol. 8, pp. 194-202, 1989.
309
- """
310
-
311
- GREEN_LOGCOSH = 'GREEN_LOGCOSH'
312
- """
313
- Green's log-cosh potential:
314
- p(u, v, d) = log(cosh((u - v) / d))
315
-
316
- The parameter 'd' can be set using the 'deltaLogCosh' keyword.
317
-
318
- Reference: Green, IEEE Trans. Med. Imaging, vol. 9, pp. 84-93, 1990.
319
- """
320
-
321
- HUBER_PIECEWISE = 'HUBER_PIECEWISE'
322
- """
323
- Huber piecewise potential:
324
- p(u, v, d) = d * |u - v| - 0.5 * d^2 if |u - v| > d, else 0.5 * (u - v)^2
325
-
326
- The parameter 'd' can be set using the 'deltaHuber' keyword.
327
-
328
- Reference: e.g. Mumcuoglu et al, Phys. Med. Biol., vol. 41, pp. 1777-1807, 1996.
329
- """
330
-
331
- NUYTS_RELATIVE = 'NUYTS_RELATIVE'
332
- """
333
- Nuyts relative potential:
334
- p(u, v, g) = (u - v)^2 / (u + v + g * |u - v|)
335
-
336
- The parameter 'g' can be set using the 'gammaRD' keyword.
337
-
338
- Reference: Nuyts et al, IEEE Trans. Nucl. Sci., vol. 49, pp. 56-60, 2002.
339
- """
340
-
341
- class ProcessType(enum.Enum):
342
- CASToR = 'CASToR'
343
- PYTHON = 'PYTHON'
344
-
345
- class Recon:
346
- def __init__(self, experiment, saveDir):
347
- self.reconOpticImage = None
348
- self.experiment = experiment
349
- self.saveDir = saveDir
350
-
351
- if type(self.experiment) is not AOT_biomaps.AOT_experiment.Experiment:
352
- raise TypeError(f"Experiment must be of type {AOT_biomaps.AOT_experiment.Experiment}")
353
-
354
- @abstractmethod
355
- def run(self):
356
- pass
357
-
358
- @staticmethod
359
- def load_recon(hdr_path):
360
- """
361
- Lit un fichier Interfile (.hdr) et son fichier binaire (.img) pour reconstruire une image comme le fait Vinci.
362
-
363
- Paramètres :
364
- ------------
365
- - hdr_path : chemin complet du fichier .hdr
366
-
367
- Retour :
368
- --------
369
- - image : tableau NumPy contenant l'image
370
- - header : dictionnaire contenant les métadonnées du fichier .hdr
371
- """
372
- header = {}
373
- with open(hdr_path, 'r') as f:
374
- for line in f:
375
- if ':=' in line:
376
- key, value = line.split(':=', 1) # s'assurer qu'on ne coupe que la première occurrence de ':='
377
- key = key.strip().lower().replace('!', '') # Nettoyage des caractères
378
- value = value.strip()
379
- header[key] = value
380
-
381
- # 📘 Obtenez le nom du fichier de données associé (le .img)
382
- data_file = header.get('name of data file')
383
- if data_file is None:
384
- raise ValueError(f"Impossible de trouver le fichier de données associé au fichier header {hdr_path}")
385
-
386
- img_path = os.path.join(os.path.dirname(hdr_path), data_file)
387
-
388
- # 📘 Récupérer la taille de l'image à partir des métadonnées
389
- shape = [int(header[f'matrix size [{i}]']) for i in range(1, 4) if f'matrix size [{i}]' in header]
390
- if shape and shape[-1] == 1: # Si la 3e dimension est 1, on la supprime
391
- shape = shape[:-1] # On garde (192, 240) par exemple
392
-
393
- if not shape:
394
- raise ValueError("Impossible de déterminer la forme de l'image à partir des métadonnées.")
395
-
396
- # 📘 Déterminez le type de données à utiliser
397
- data_type = header.get('number format', 'short float').lower()
398
- dtype_map = {
399
- 'short float': np.float32,
400
- 'float': np.float32,
401
- 'int16': np.int16,
402
- 'int32': np.int32,
403
- 'uint16': np.uint16,
404
- 'uint8': np.uint8
405
- }
406
- dtype = dtype_map.get(data_type)
407
- if dtype is None:
408
- raise ValueError(f"Type de données non pris en charge : {data_type}")
409
-
410
- # 📘 Ordre des octets (endianness)
411
- byte_order = header.get('imagedata byte order', 'LITTLEENDIAN').lower()
412
- endianess = '<' if 'little' in byte_order else '>'
413
-
414
- # 📘 Vérifie la taille réelle du fichier .img
415
- img_size = os.path.getsize(img_path)
416
- expected_size = np.prod(shape) * np.dtype(dtype).itemsize
417
-
418
- if img_size != expected_size:
419
- raise ValueError(f"La taille du fichier img ({img_size} octets) ne correspond pas à la taille attendue ({expected_size} octets).")
420
-
421
- # 📘 Lire les données binaires et les reformater
422
- with open(img_path, 'rb') as f:
423
- data = np.fromfile(f, dtype=endianess + np.dtype(dtype).char)
424
-
425
- image = data.reshape(shape[::-1])
426
-
427
- # 📘 Rescale l'image si nécessaire
428
- rescale_slope = float(header.get('data rescale slope', 1))
429
- rescale_offset = float(header.get('data rescale offset', 0))
430
- image = image * rescale_slope + rescale_offset
431
-
432
- return image.T
433
-
434
- class IterativeRecon(Recon):
435
- """
436
- This class implements the iterative reconstruction process.
437
- It currently does not perform any operations but serves as a template for future implementations.
438
- """
439
- def __init__(self, **kwargs):
440
- super().__init__(**kwargs)
441
- self.theta_matrix = []
442
- self.MSEArray = []
443
- self.SSIMArray = []
444
- self.opti = IterativeType(self.experiment.params.reconstruction['IerativeType'])
445
- self.numIterations = self.experiment.params.reconstruction['numIterations']
446
- self.numSubsets = self.experiment.params.reconstruction['numSubsets']
447
-
448
- if self.numIterations <= 0:
449
- raise ValueError("Number of iterations must be greater than 0.")
450
- if self.numSubsets <= 0:
451
- raise ValueError("Number of subsets must be greater than 0.")
452
- if type(self.numIterations) is not int:
453
- raise TypeError("Number of iterations must be an integer.")
454
- if type(self.numSubsets) is not int:
455
- raise TypeError("Number of subsets must be an integer.")
456
-
457
- def run(self, reconType):
458
- """
459
- This method is a placeholder for the iterative reconstruction process.
460
- It currently does not perform any operations but serves as a template for future implementations.
461
- """
462
- if(reconType == ProcessType.CASToR):
463
- self._iterativeReconCASToR()
464
- elif(reconType == ProcessType.PYTHON):
465
- self._iterativeReconPython()
466
- else:
467
- raise ValueError(f"Unknown iterative reconstruction type: {reconType}")
468
-
469
- self._MSE_calc()
470
- self._SSIM_calc()
471
-
472
- def _load_theta_matrix(self):
473
- for thetaFiles in os.path.join(self.saveDir, 'results'):
474
- if thetaFiles.endswith('.hdr'):
475
- theta = AOT_biomaps.AOT_reconstruction.load_recon(thetaFiles)
476
- self.theta_matrix.append(theta)
477
-
478
- def _iterativeReconPython(self):
479
- if self.opti.value == IterativeType.MLEM.value:
480
- self._MLEM()
481
- self.reconOpticImage = self.theta_matrix[-1] # Use the last iteration as the final reconstruction
482
-
483
- def _iterativeReconCASToR(self):
484
-
485
- # Define variables
486
- smatrix = os.path.join(self.saveDir,"system_matrix")
487
-
488
- # Check if input file exists
489
- if not os.path.isfile(f"{self.saveDir}/AOSignals.cdh"):
490
- self.experiment._saveAOsignalForCastor(self.saveDir)
491
- # Check if system matrix directory exists
492
- elif not os.path.isdir(smatrix):
493
- os.mkdir(smatrix)
494
- # check if system matrix is empty
495
- elif not os.listdir(smatrix):
496
- self.experiment._saveSMatrixForCastor(self.saveDir)
497
-
498
- # Construct the command
499
- cmd = [
500
- self.experiment.params.reconstruction['castor_executable'],
501
- "-df", f"{self.saveDir}/AOSignals.cdh",
502
- "-opti", self.opti.value,
503
- "-it", f"{self.numIterations}:{self.numSubsets}" ,
504
- "-proj", "matrix",
505
- "-dout", os.path.join(self.saveDir, 'results','recon'),
506
- "-th", f"{ os.cpu_count()}",
507
- "-vb", "5",
508
- "-proj-comp", "1",
509
- "-ignore-scanner",
510
- "-data-type", "AOT",
511
- "-ignore-corr", "cali,fdur",
512
- "-system-matrix", smatrix,
513
- ]
514
-
515
- # Print the command
516
- print(" ".join(cmd))
517
-
518
- #save the command to a script file
519
- recon_script_path = os.path.join(gettempdir(), 'recon.sh')
520
- with open(recon_script_path, 'w') as f:
521
- f.write("#!/bin/bash\n")
522
- f.write(" ".join(cmd) + "\n")
523
-
524
- sys.exit(0)
525
-
526
- # --- Run Reconstruction Script ---
527
- print(f"Running reconstruction script: {recon_script_path}")
528
- subprocess.run(["chmod", "+x", recon_script_path], check=True)
529
- subprocess.run([recon_script_path], check=True)
530
- print("Reconstruction script executed.")
531
-
532
- self._load_theta_matrix()
533
-
534
- def _MLEM(self):
535
- """
536
- This method implements the MLEM algorithm using either basic numpy operations or PyTorch for GPU acceleration.
537
- It is called by the iterative reconstruction process.
538
- """
539
- if config.get_process() == 'gpu':
540
- self.theta_matrix = IterativeRecon._MLEM_GPU(self.experiment.A_matrix, self.experiment.AOsignal, self.numIterations* self.numSubsets)
541
- else:
542
- self.theta_matrix = IterativeRecon._MLEM_CPU(self.experiment.A_matrix, self.experiment.AOsignal, self.numIterations* self.numSubsets)
543
-
544
- def _MSE_calc(self):
545
- """
546
- Calculate the Mean Squared Error (MSE) of the reconstruction.
547
-
548
- Parameters:
549
- theta_matrix: list of (z, x) ndarray, iterative reconstructions
550
- If None, uses the last theta matrix from self.theta_matrix.
551
-
552
- Returns:
553
- mse: float, Mean Squared Error of the reconstruction
554
- """
555
- self.MSEArray = []
556
- for theta in self.theta_matrix:
557
- if not isinstance(theta, np.ndarray):
558
- raise TypeError("Theta matrix must be a numpy ndarray.")
559
- self.MSEArray.append(np.mean((self.experiment.opticImage - theta) ** 2))
560
-
561
- def _SSIM_calc(self):
562
- self.SSIMArray = []
563
- for theta in self.theta_matrix:
564
- if not isinstance(theta, np.ndarray):
565
- raise TypeError("Theta matrix must be a numpy ndarray.")
566
- ssim_value = ssim(self.experiment.opticImage, theta, data_range=theta.max() - theta.min())
567
- self.SSIMArray.append(ssim_value)
568
-
569
- def plot_MSE(self, isSaving=True):
570
- """
571
- Plot the Mean Squared Error (MSE) of the reconstruction.
572
-
573
- Parameters:
574
- MSEArray: list of float, Mean Squared Error values for each iteration
575
- If None, uses the MSEArray from self.MSEArray.
576
-
577
- Returns:
578
- None
579
- """
580
- if not self.MSEArray:
581
- raise ValueError("MSEArray is empty. Please calculate MSE first.")
582
-
583
- best_idx = np.argmin(self.MSEArray)
584
-
585
- print(f"Lowest MSE = {np.min(self.MSEArray):.4f} at iteration {best_idx+1}")
586
-
587
- # Plot MSE curve
588
- plt.figure(figsize=(7, 5))
589
- plt.plot(self.MSEArray, 'r-', label="MSE curve")
590
-
591
- # Add blue dashed lines
592
- plt.axhline(np.min(self.MSEArray), color='blue', linestyle='--', label=f"Min MSE = {np.min(self.MSEArray):.4f}")
593
- plt.axvline(best_idx+1, color='blue', linestyle='--', label=f"Iteration = {best_idx+1}")
594
-
595
- plt.xlabel("Iteration")
596
- plt.ylabel("MSE")
597
- plt.title("MSE vs. Iteration")
598
- plt.legend()
599
- plt.grid(True)
600
- plt.tight_layout()
601
-
602
- if isSaving:
603
- now = datetime.now()
604
- date_str = now.strftime("%Y_%d_%m_%y")
605
- SavingFolder = os.path.join(self.saveDir, 'results', f'MSE_plot{date_str}.png')
606
- plt.savefig(SavingFolder, dpi=300)
607
- print(f"MSE plot saved to {SavingFolder}")
608
-
609
- plt.show()
610
-
611
- def plot_MSE_bestRecon(self, isSaving=True):
612
-
613
- if not self.MSEArray:
614
- raise ValueError("MSEArray is empty. Please calculate MSE first.")
615
-
616
- best_idx = np.argmin(self.MSEArray)
617
- best_recon = self.theta_matrix[best_idx]
618
-
619
- # ----------------- Plotting -----------------
620
- fig, axs = plt.subplots(1, 3, figsize=(15, 5)) # 1 row, 3 columns
621
-
622
- # Normalization based on LAMBDA max
623
- lambda_max = np.max(self.experiment.opticImage)
624
-
625
- # Left: Best reconstructed image (normalized)
626
- im0 = axs[0].imshow(best_recon / lambda_max,
627
- extent=(self.experiment.params['Xrange'][0], self.experiment.params['Xrange'][1], self.experiment.params['Zrange'][1], self.experiment.params['Zrange'][0]),
628
- cmap='hot', aspect='equal', vmin=0, vmax=1)
629
- axs[0].set_title(f"Min MSE Reconstruction\nIter {best_idx+1}, MSE={np.min(self.MSEArray):.4f}")
630
- axs[0].set_xlabel("x (mm)")
631
- axs[0].set_ylabel("z (mm)")
632
- plt.colorbar(im0, ax=axs[0])
633
-
634
- # Middle: Ground truth (normalized)
635
- im1 = axs[1].imshow(self.experiment.opticImage / lambda_max,
636
- extent=(self.experiment.params['Xrange'][0], self.experiment.params['Xrange'][1], self.experiment.params['Zrange'][1], self.experiment.params['Zrange'][0]),
637
- cmap='hot', aspect='equal', vmin=0, vmax=1)
638
- axs[1].set_title(r"Ground Truth ($\lambda$)")
639
- axs[1].set_xlabel("x (mm)")
640
- axs[1].set_ylabel("z (mm)")
641
- plt.colorbar(im1, ax=axs[1])
642
-
643
- # Right: Reconstruction at iter 350
644
- lastRecon = self.theta_matrix[-1]
645
- im2 = axs[2].imshow(lastRecon / lambda_max,
646
- extent=(self.experiment.params['Xrange'][0], self.experiment.params['Xrange'][1], self.experiment.params['Zrange'][1], self.experiment.params['Zrange'][0]),
647
- cmap='hot', aspect='equal', vmin=0, vmax=1)
648
- axs[2].set_title(f"Last Reconstruction\nIter {self.numIterations * self.numSubsets}, MSE={np.mean((self.experiment.opticImage - lastRecon) ** 2):.4f}")
649
- axs[2].set_xlabel("x (mm)")
650
- axs[2].set_ylabel("z (mm)")
651
- plt.colorbar(im2, ax=axs[2])
652
-
653
- plt.tight_layout()
654
- if isSaving:
655
- now = datetime.now()
656
- date_str = now.strftime("%Y_%d_%m_%y")
657
- SavingFolder = os.path.join(self.saveDir, 'results', f'comparison_MSE_BestANDLastRecon{date_str}.png')
658
- plt.savefig(SavingFolder, dpi=300)
659
- print(f"MSE plot saved to {SavingFolder}")
660
- plt.show()
661
-
662
- def plot_theta_animation(self, vmin=None, vmax=None, duration=5000, save_path=None):
663
- """
664
- Show theta iteration animation (for Jupyter) and optionally save it as a GIF.
665
-
666
- Parameters:
667
- matrix_theta: list of (z, x) ndarray, iterative reconstructions
668
- x: 1D array, x-coordinates (in meters)
669
- z: 1D array, z-coordinates (in meters)
670
- vmin, vmax: color limits (optional)
671
- duration: duration of the animation in milliseconds
672
- save_path: path to save animation (e.g., 'theta.gif' or 'theta.mp4')
673
- """
674
- if len(self.theta_matrix) == 0 or len(self.theta_matrix) == 1:
675
- raise ValueError("No theta matrix available for animation.")
676
-
677
- frames = np.array(self.theta_matrix)
678
- num_frames = len(frames)
679
-
680
- interval = max(1, int(duration / num_frames))
681
-
682
- if vmin is None:
683
- vmin = np.min(frames)
684
- if vmax is None:
685
- vmax = np.max(frames)
686
-
687
- fig, ax = plt.subplots(figsize=(5, 5))
688
- im = ax.imshow(frames[0],
689
- extent=(self.experiment.params['Xrange'][0],self.experiment.params['Xrange'][1], self.experiment.params['Zrange'][1], self.experiment.params['Zrange'][0]),
690
- vmin=vmin, vmax=vmax,
691
- aspect='equal', cmap='hot')
692
-
693
- title = ax.set_title("Iteration 0")
694
- ax.set_xlabel("x (mm)")
695
- ax.set_ylabel("z (mm)")
696
- plt.tight_layout()
697
-
698
- def update(frame_idx):
699
- im.set_array(frames[frame_idx])
700
- title.set_text(f"Iteration {frame_idx}")
701
- return [im, title]
702
-
703
- ani = animation.FuncAnimation(fig, update, frames=num_frames, interval=interval, blit=False)
704
-
705
- if save_path:
706
- if save_path.endswith(".gif"):
707
- ani.save(save_path, writer="pillow", fps=1000 // interval)
708
- elif save_path.endswith(".mp4"):
709
- ani.save(save_path, writer="ffmpeg", fps=1000 // interval)
710
- else:
711
- raise ValueError("Unsupported file format. Use .gif or .mp4")
712
- print(f"Animation saved to {save_path}")
713
-
714
- plt.close(fig)
715
- plt.rcParams["animation.html"] = "jshtml"
716
- return HTML(ani.to_jshtml())
717
-
718
- def plot_SSIM(self, isSaving=True):
719
-
720
- if not self.SSIMArray:
721
- raise ValueError("SSIMArray is empty. Please calculate SSIM first.")
722
-
723
- best_idx = np.argmax(self.SSIMArray)
724
-
725
- print(f"Highest SSIM = {np.max(self.SSIMArray):.4f} at iteration {best_idx+1}")
726
-
727
- # Plot SSIM curve
728
- plt.figure(figsize=(7, 5))
729
- plt.plot(self.SSIMArray, 'r-', label="SSIM curve")
730
-
731
- # Add blue dashed lines
732
- plt.axhline(np.max(self.SSIMArray), color='blue', linestyle='--', label=f"Max SSIM = {np.max(self.SSIMArray):.4f}")
733
- plt.axvline(best_idx+1, color='blue', linestyle='--', label=f"Iteration = {best_idx+1}")
734
-
735
- plt.xlabel("Iteration")
736
- plt.ylabel("SSIM")
737
- plt.title("SSIM vs. Iteration")
738
- plt.legend()
739
- plt.grid(True)
740
- plt.tight_layout()
741
-
742
- if isSaving:
743
- now = datetime.now()
744
- date_str = now.strftime("%Y_%d_%m_%y")
745
- SavingFolder = os.path.join(self.saveDir, 'results', f'SSIM_plot{date_str}.png')
746
- plt.savefig(SavingFolder, dpi=300)
747
- print(f"SSIM plot saved to {SavingFolder}")
748
-
749
- plt.show()
750
-
751
- def plot_SSIM_bestRecon(self, isSaving=True):
752
-
753
- if not self.SSIMArray:
754
- raise ValueError("SSIMArray is empty. Please calculate SSIM first.")
755
-
756
- best_idx = np.argmax(self.SSIMArray)
757
- best_recon = self.theta_matrix[best_idx]
758
-
759
- # ----------------- Plotting -----------------
760
- fig, axs = plt.subplots(1, 3, figsize=(15, 5)) # 1 row, 3 columns
761
-
762
- # Normalization based on LAMBDA max
763
- lambda_max = np.max(self.experiment.opticImage)
764
-
765
- # Left: Best reconstructed image (normalized)
766
- im0 = axs[0].imshow(best_recon / lambda_max,
767
- extent=(self.experiment.params['Xrange'][0], self.experiment.params['Xrange'][1], self.experiment.params['Zrange'][1], self.experiment.params['Zrange'][0]),
768
- cmap='hot', aspect='equal', vmin=0, vmax=1)
769
- axs[0].set_title(f"Max SSIM Reconstruction\nIter {best_idx+1}, SSIM={np.min(self.MSEArray):.4f}")
770
- axs[0].set_xlabel("x (mm)")
771
- axs[0].set_ylabel("z (mm)")
772
- plt.colorbar(im0, ax=axs[0])
773
-
774
- # Middle: Ground truth (normalized)
775
- im1 = axs[1].imshow(self.experiment.opticImage / lambda_max,
776
- extent=(self.experiment.params['Xrange'][0], self.experiment.params['Xrange'][1], self.experiment.params['Zrange'][1], self.experiment.params['Zrange'][0]),
777
- cmap='hot', aspect='equal', vmin=0, vmax=1)
778
- axs[1].set_title(r"Ground Truth ($\lambda$)")
779
- axs[1].set_xlabel("x (mm)")
780
- axs[1].set_ylabel("z (mm)")
781
- plt.colorbar(im1, ax=axs[1])
782
-
783
- # Right: Reconstruction at iter 350
784
- lastRecon = self.theta_matrix[-1]
785
- im2 = axs[2].imshow(lastRecon / lambda_max,
786
- extent=(self.experiment.params['Xrange'][0], self.experiment.params['Xrange'][1], self.experiment.params['Zrange'][1], self.experiment.params['Zrange'][0]),
787
- cmap='hot', aspect='equal', vmin=0, vmax=1)
788
- axs[2].set_title(f"Last Reconstruction\nIter {self.numIterations * self.numSubsets}, SSIM={self.SSIMArray[-1]:.4f}")
789
- axs[2].set_xlabel("x (mm)")
790
- axs[2].set_ylabel("z (mm)")
791
- plt.colorbar(im2, ax=axs[2])
792
-
793
- plt.tight_layout()
794
- if isSaving:
795
- now = datetime.now()
796
- date_str = now.strftime("%Y_%d_%m_%y")
797
- SavingFolder = os.path.join(self.saveDir, 'results', f'comparison_SSIM_BestANDLastRecon{date_str}.png')
798
- plt.savefig(SavingFolder, dpi=300)
799
- print(f"SSIM plot saved to {SavingFolder}")
800
- plt.show()
801
-
802
- ### ALGORITHMS IMPLEMENTATION ###
803
-
804
- @staticmethod
805
- def _MLEM_CPU(A_matrix, y, numIterations):
806
- if os.cpu_count() > 1:
807
- print(f"Using {os.cpu_count()} CPU cores for MLEM.")
808
- try:
809
- recons = IterativeRecon._MLEM_CPU_multi(A_matrix, y, numIterations)
810
- if recons is None or len(recons) == 0:
811
- raise ValueError("Multi-core MLEM returned empty result.")
812
- except Exception as e:
813
- print(f"Error using multi-core MLEM: {e}. Falling back to optimized MLEM.")
814
- try:
815
- recons = IterativeRecon._MLEM_CPU_opti(A_matrix, y, numIterations)
816
- if recons is None or len(recons) == 0:
817
- raise ValueError("Optimized MLEM returned empty result.")
818
- except Exception as e:
819
- print(f"Error using optimized MLEM: {e}. Falling back to basic MLEM.")
820
- try:
821
- recons = IterativeRecon._MLEM_CPU_basic(A_matrix, y, numIterations)
822
- if recons is None or len(recons) == 0:
823
- raise ValueError("Basic MLEM returned empty result.")
824
- except Exception as e:
825
- print(f"Error using basic MLEM: {e}. All methods failed.")
826
- raise
827
- else:
828
- print("Using optimized MLEM for CPU.")
829
- try:
830
- recons = IterativeRecon._MLEM_CPU_opti(A_matrix, y, numIterations)
831
- if recons is None or len(recons) == 0:
832
- raise ValueError("Optimized MLEM returned empty result.")
833
- except Exception as e:
834
- print(f"Error using optimized MLEM: {e}. Falling back to basic MLEM.")
835
- try:
836
- recons = IterativeRecon._MLEM_CPU_basic(A_matrix, y, numIterations)
837
- if recons is None or len(recons) == 0:
838
- raise ValueError("Basic MLEM returned empty result.")
839
- except Exception as e:
840
- print(f"Error using basic MLEM: {e}. All methods failed.")
841
- raise
842
-
843
- return recons
844
-
845
- @staticmethod
846
- def _MLEM_GPU(A_matrix, y, numIterations):
847
- if not torch.cuda.is_available():
848
- print("CUDA not available, falling back to CPU.")
849
- return IterativeRecon._MLEM_CPU(A_matrix, y, numIterations)
850
-
851
- try:
852
- if torch.cuda.device_count() > 1:
853
- print(f"Using {torch.cuda.device_count()} GPUs for MLEM.")
854
- recons = IterativeRecon._MLEM_GPU_multi(A_matrix, y, numIterations)
855
- if recons is None or len(recons) == 0:
856
- raise ValueError("Multi-GPU MLEM returned empty result.")
857
- else:
858
- print("Using single GPU for MLEM.")
859
- recons = IterativeRecon._MLEM_GPU_basic(A_matrix, y, numIterations)
860
- if recons is None or len(recons) == 0:
861
- raise ValueError("Single GPU MLEM returned empty result.")
862
-
863
- except Exception as e:
864
- print(f"Error using GPU MLEM: {e}. Falling back to CPU.")
865
- try:
866
- recons = IterativeRecon._MLEM_CPU(A_matrix, y, numIterations)
867
- if recons is None or len(recons) == 0:
868
- raise ValueError("CPU MLEM returned empty result.")
869
- except Exception as e:
870
- print(f"Error using CPU MLEM: {e}. All methods failed.")
871
- raise
872
-
873
- if not isinstance(recons[0], np.ndarray):
874
- recons = [theta.cpu().numpy() for theta in recons]
875
-
876
- return recons
877
-
878
- @staticmethod
879
- def _MLEM_GPU_basic(A_matrix, y, numIteration):
880
- """
881
- This method implements the MLEM algorithm using PyTorch for GPU acceleration.
882
- Parameters:
883
- A_matrix: 4D numpy array (time, z, x, nScans)
884
- y: 2D numpy array (time, nScans)
885
- numIteration: number of iterations for the MLEM algorithm
886
- """
887
- #TODO: Only return the last theta if isSavingTheta is False
888
- A_matrix_torch = torch.tensor(A_matrix, dtype=torch.float32).cuda() # shape: (T, Z, X, N)
889
- y_torch = torch.tensor(y, dtype=torch.float32).cuda() # shape: (T, N)
890
-
891
- # Initialize variables
892
- T, Z, X, N = A_matrix.shape
893
-
894
- # flat
895
- A_flat = A_matrix_torch.permute(0, 3, 1, 2).reshape(T * N, Z * X) # shape: (T * N, Z * X)
896
- y_flat = y_torch.reshape(-1) # shape: (T * N, )
897
-
898
- # Step 1: start from a strickly positive image theta^(0)
899
- theta_0 = torch.ones((Z, X), dtype=torch.float32, device='cuda') # shape: (Z, X)
900
- matrix_theta_torch = [theta_0]
901
- # matrix_theta_from_gpu = []
902
-
903
- # Compute normalization factor: A^T * 1
904
- normalization_factor = A_matrix_torch.sum(dim=(0, 3)) # shape: (Z, X)
905
- normalization_factor_flat = normalization_factor.reshape(-1) # shape: (Z * X, )
906
-
907
- # EM iterative update
908
- for _ in trange(numIteration, desc=f"ML-EM Reshape Iteration (GPU {torch.cuda.current_device()})"):
909
-
910
- theta_p = matrix_theta_torch[-1] # shape: (Z, X)
911
-
912
- # Step 2: Forward projection of current estimate : q = A * theta + b (acc with GPU)
913
- theta_p_flat = theta_p.reshape(-1) # shape: (Z * X, )
914
- q_flat = A_flat @ theta_p_flat # shape: (T * N, )
915
-
916
- # Step 3: Current error estimate : compute ratio e = m / q
917
- e_flat = y_flat / (q_flat + torch.finfo(torch.float32).tiny) # shape: (T * N, )
918
-
919
- # Step 4: Backward projection of the error estimate : c = A.T * e (acc with GPU)
920
- c_flat = A_flat.T @ e_flat # shape: (Z * X, )
921
-
922
- # Step 5: Multiplicative update of current estimate
923
- theta_p_plus_1_flat = (theta_p_flat / (normalization_factor_flat + torch.finfo(torch.float32).tiny)) * c_flat
924
-
925
- matrix_theta_torch.append(theta_p_plus_1_flat.reshape(Z, X)) # shape: (Z, X)
926
-
927
- return matrix_theta_torch # Return the list of tensors
928
-
929
- @staticmethod
930
- def _MLEM_CPU_basic(A_matrix, y, numIteration):
931
- """
932
- This method implements the MLEM algorithm using basic numpy operations.
933
- Parameters:
934
- A_matrix: 4D numpy array (time, z, x, nScans)
935
- y: 2D numpy array (time, nScans)
936
- numIteration: number of iterations for the MLEM algorithm
937
- """
938
- #TODO: Only return the last theta if isSavingTheta is False
939
-
940
- # Initialize variables
941
- q_p = np.zeros((A_matrix.shape[0], A_matrix.shape[3])) # shape : (t, i)
942
- c_p = np.zeros((A_matrix.shape[1], A_matrix.shape[2])) # shape : (z, x)
943
-
944
- # Step 1: start from a strickly positive image theta^(0)
945
- theta_p_0 = np.ones((A_matrix.shape[1], A_matrix.shape[2])) # initial theta^(0)
946
- matrix_theta = [theta_p_0] # store theta
947
-
948
- # Compute normalization factor: A^T * 1
949
- normalization_factor = np.sum(A_matrix, axis=(0, 3)) # shape: (z, x)
950
-
951
- # EM iterative update
952
- for _ in trange(numIteration, desc="ML-EM Iteration (CPU basic)"):
953
-
954
- theta_p = matrix_theta[-1]
955
-
956
- # Step 1: Forward projection of current estimate : q = A * theta + b
957
- for _t in range(A_matrix.shape[0]):
958
- for _n in range(A_matrix.shape[3]):
959
- q_p[_t, _n] = np.sum(A_matrix[_t, :, :, _n] * theta_p)
960
-
961
- # Step 2: Current error estimate : compute ratio e = m / q
962
- e_p = y / (q_p + 1e-8) # 避免除零
963
-
964
- # Step 3: Backward projection of the error estimate : c = A.T * e
965
- for _z in range(A_matrix.shape[1]):
966
- for _x in range(A_matrix.shape[2]):
967
- c_p[_z, _x] = np.sum(A_matrix[:, _z, _x, :] * e_p)
968
-
969
- # Step 4: Multiplicative update of current estimate
970
- theta_p_plus_1 = theta_p / (normalization_factor + 1e-8) * c_p
971
-
972
- # Step 5: Store current theta
973
- matrix_theta.append(theta_p_plus_1)
974
-
975
- return matrix_theta # Return the list of numpy arrays
976
-
977
- @staticmethod
978
- def _MLEM_CPU_multi(A_matrix, y, numIteration):
979
- """
980
- This method implements the MLEM algorithm using multi-threading with Numba.
981
- Parameters:
982
- A_matrix: 4D numpy array (time, z, x, nScans)
983
- y: 2D numpy array (time, nScans)
984
- numIteration: number of iterations for the MLEM algorithm
985
- """
986
- #TODO: Only return the last theta if isSavingTheta is False
987
- numba.set_num_threads(os.cpu_count())
988
- print(f"Number of threads : {numba.config.NUMBA_DEFAULT_NUM_THREADS}")
989
- q_p = np.zeros((A_matrix.shape[0], A_matrix.shape[3])) # shape : (t, i)
990
- c_p = np.zeros((A_matrix.shape[1], A_matrix.shape[2])) # shape : (z, x)
991
-
992
- # Step 1: start from a strickly positive image theta^(0)
993
- theta_p_0 = np.ones((A_matrix.shape[1], A_matrix.shape[2]))
994
- matrix_theta = [theta_p_0]
995
-
996
- # Compute normalization factor: A^T * 1
997
- normalization_factor = np.sum(A_matrix, axis=(0, 3)) # shape: (z, x)
998
-
999
- # EM iterative update
1000
- for _ in trange(numIteration, desc="ML-EM Iteration (CPU Multithread)"):
1001
-
1002
- theta_p = matrix_theta[-1]
1003
-
1004
- # Step 1: Forward projection of current estimate : q = A * theta + b (acc with njit)
1005
- IterativeRecon._forward_projection(A_matrix, theta_p, q_p)
1006
-
1007
- # Step 2: Current error estimate : compute ratio e = m / q
1008
- e_p = y / (q_p + 1e-8)
1009
-
1010
- # Step 3: Backward projection of the error estimate : c = A.T * e (acc with njit)
1011
- IterativeRecon._backward_projection(A_matrix, e_p, c_p)
1012
-
1013
- # Step 4: Multiplicative update of current estimate
1014
- theta_p_plus_1 = theta_p / (normalization_factor + 1e-8) * c_p
1015
-
1016
- # Step 5: Store current theta
1017
- matrix_theta.append(theta_p_plus_1)
1018
-
1019
- @staticmethod
1020
- def _MLEM_CPU_opti(A_matrix, y, numIteration):
1021
- """
1022
- This method implements the MLEM algorithm using optimized numpy operations.
1023
- Parameters:
1024
- A_matrix: 4D numpy array (time, z, x, nScans)
1025
- y: 2D numpy array (time, nScans)
1026
- numIteration: number of iterations for the MLEM algorithm
1027
- """
1028
- #TODO: Only return the last theta if isSavingTheta is False
1029
- # Initialize variables
1030
- T, Z, X, N = A_matrix.shape
1031
-
1032
- A_flat = A_matrix.astype(np.float32).transpose(0, 3, 1, 2).reshape(T * N, Z * X) # shape: (T * N, Z * X)
1033
- y_flat = y.astype(np.float32).reshape(-1) # shape: (T * N, )
1034
-
1035
- # Step 1: start from a strickly positive image theta^(0)
1036
- theta_0 = np.ones((Z, X), dtype=np.float32) # shape: (Z, X)
1037
- matrix_theta = [theta_0]
1038
-
1039
- # Compute normalization factor: A^T * 1
1040
- normalization_factor = np.sum(A_matrix, axis=(0, 3)).astype(np.float32) # shape: (Z, X)
1041
- normalization_factor_flat = normalization_factor.reshape(-1)
1042
-
1043
- # EM iterative update
1044
- for p in trange(numIteration, desc="ML-EM Reshape Iteration"):
1045
-
1046
- theta_p = matrix_theta[-1]
1047
-
1048
- # Step 2: Forward projection of current estimate : q = A * theta + b (acc with njit)
1049
- theta_p_flat = theta_p.reshape(-1) # shape: (Z * X, )
1050
- q_flat = A_flat @ theta_p_flat # shape: (T * N)
1051
-
1052
- # Step 3: Current error estimate : compute ratio e = m / q
1053
- e_flat = y_flat / (q_flat + np.finfo(np.float32).tiny) # shape: (T * N, )
1054
- # np.float32(1e-8)
1055
-
1056
- # Step 4: Backward projection of the error estimate : c = A.T * e (acc with njit)
1057
- c_flat = A_flat.T @ e_flat # shape: (Z * X, )
1058
-
1059
- # Step 5: Multiplicative update of current estimate
1060
- theta_p_plus_1_flat = theta_p_flat / (normalization_factor_flat + np.finfo(np.float32).tiny) * c_flat
1061
-
1062
-
1063
- # Step 5: Store current theta
1064
- matrix_theta.append(theta_p_plus_1_flat.reshape(Z, X))
1065
-
1066
- @staticmethod
1067
- def _MLEM_GPU_multi(A_matrix, y, numIteration):
1068
- """
1069
- This method implements the MLEM algorithm using PyTorch for GPU acceleration and multi-threading.
1070
- Parameters:
1071
- A_matrix: 4D numpy array (time, z, x, nScans)
1072
- y: 2D numpy array (time, nScans)
1073
- numIteration: number of iterations for the MLEM algorithm
1074
- """
1075
- #TODO: implement multi-threading with GPU
1076
- #TODO: Only return the last theta if isSavingTheta is False
1077
- pass
1078
-
1079
- @staticmethod
1080
- @njit(parallel=True)
1081
- def _forward_projection(A_matrix, theta_p, q_p):
1082
- t_dim, z_dim, x_dim, i_dim = A_matrix.shape
1083
- for _t in prange(t_dim):
1084
- for _n in range(i_dim):
1085
- total = 0.0
1086
- for _z in range(z_dim):
1087
- for _x in range(x_dim):
1088
- total += A_matrix[_t, _z, _x, _n] * theta_p[_z, _x]
1089
- q_p[_t, _n] = total
1090
-
1091
- @staticmethod
1092
- @njit(parallel=True)
1093
- def _backward_projection(A_matrix, e_p, c_p):
1094
- t_dim, z_dim, x_dim, n_dim = A_matrix.shape
1095
- for _z in prange(z_dim):
1096
- for _x in range(x_dim):
1097
- total = 0.0
1098
- for _t in range(t_dim):
1099
- for _n in range(n_dim):
1100
- total += A_matrix[_t, _z, _x, _n] * e_p[_t, _n]
1101
- c_p[_z, _x] = total
1102
-
1103
- class AnalyticRecon(Recon):
1104
- def __init__(self, **kwargs):
1105
- super().__init__(**kwargs)
1106
- self.MSE = []
1107
- self.SSIM = []
1108
- self.I_recon = None
1109
- self.I_ref = self.experiment.OpticImage.phantom
1110
-
1111
- def run(self, reconType):
1112
- """
1113
- This method is a placeholder for the analytic reconstruction process.
1114
- It currently does not perform any operations but serves as a template for future implementations.
1115
- """
1116
- if(reconType == ProcessType.CASToR):
1117
- raise NotImplementedError("CASToR analytic reconstruction is not implemented yet.")
1118
- elif(reconType == ProcessType.PYTHON):
1119
- self._analyticReconPython()
1120
- else:
1121
- raise ValueError(f"Unknown analytic reconstruction type: {reconType}")
1122
- self._getMSE()
1123
- self._getSSIM()
1124
-
1125
- def _analyticReconPython(self, analyticType):
1126
- """
1127
- This method is a placeholder for the analytic reconstruction process in Python.
1128
- It currently does not perform any operations but serves as a template for future implementations.
1129
-
1130
- Parameters:
1131
- analyticType: The type of analytic reconstruction to perform (default is iFOURIER).
1132
- """
1133
- if analyticType == AnalyticType.iFOURIER:
1134
- self._iFourierRecon()
1135
- elif analyticType == AnalyticType.iRADON:
1136
- self._iRadonRecon()
1137
- else:
1138
- raise ValueError(f"Unknown analytic type: {analyticType}")
1139
-
1140
- def _iFourierRecon(self, s_reconstructed):
1141
- """
1142
- Reconstruction d'image utilisant la transformation de Fourier inverse.
1143
-
1144
- :param s_reconstructed: Signal reconstruit dans le domaine fréquentiel.
1145
- :return: Image reconstruite dans le domaine spatial.
1146
- """
1147
- # Appliquer la transformation de Fourier inverse
1148
- image_reconstructed = np.fft.ifft2(s_reconstructed)
1149
-
1150
- # Prendre la partie réelle de l'image reconstruite
1151
- self.I_recon = np.real(image_reconstructed)
1152
-
1153
-
1154
- def _iRadonRecon(self):
1155
- """
1156
- Reconstruction d'image utilisant la méthode iRadon.
1157
-
1158
- :return: Image reconstruite.
1159
- """
1160
- # Initialisation de l'image reconstruite
1161
- I_rec = np.zeros((len(self.experiment.OpticImage.x), len(self.experiment.OpticImage.z)), dtype=complex)
1162
-
1163
- # Transformation de Fourier du signal
1164
- s_tilde = np.fft.fft(self.experiment.AOsignal, axis=0)
1165
-
1166
- # Extraction des angles et des fréquences spatiales
1167
- theta = [acoustic_field.angle for acoustic_field in self.experiment.AcousticFields]
1168
- f_s = [acoustic_field.f_s for acoustic_field in self.experiment.AcousticFields]
1169
-
1170
- # Calcul des coordonnées transformées et intégrales
1171
- for i, th in enumerate(theta):
1172
- x_prime = self.experiment.OpticImage.x[:, np.newaxis] * np.cos(th) - self.experiment.OpticImage.z[np.newaxis, :] * np.sin(th)
1173
- z_prime = self.experiment.OpticImage.z[np.newaxis, :] * np.cos(th) + self.experiment.OpticImage.x[:, np.newaxis] * np.sin(th)
1174
-
1175
- # Première intégrale : partie réelle
1176
- for j, fs in enumerate(f_s):
1177
- integrand = s_tilde[i, j] * np.exp(2j * np.pi * (x_prime * fs + z_prime * fs))
1178
- integral = trapz(integrand * fs, fs)
1179
- I_rec += 2 * np.real(integral)
1180
-
1181
- # Deuxième intégrale : partie centrale
1182
- for i, th in enumerate(theta):
1183
- x_prime = self.experiment.OpticImage.x[:, np.newaxis] * np.cos(th) - self.experiment.OpticImage.z[np.newaxis, :] * np.sin(th)
1184
- z_prime = self.experiment.OpticImage.z[np.newaxis, :] * np.cos(th) + self.experiment.OpticImage.x[:, np.newaxis] * np.sin(th)
1185
-
1186
- # Filtrer les fréquences spatiales pour ne garder que celles inférieures ou égales à f_s_max
1187
- filtered_f_s = np.array(f_s)[np.array(f_s) <= self.f_s_max]
1188
- integrand = s_tilde[i, np.array(f_s) == 0] * np.exp(2j * np.pi * z_prime * filtered_f_s)
1189
- integral = trapz(integrand * filtered_f_s, filtered_f_s)
1190
- I_rec += integral
1191
-
1192
- return I_rec
1193
-
1194
- def _getMSE(self):
1195
- """
1196
- Calcule l'erreur quadratique moyenne (MSE) entre l'image reconstruite et l'image de référence.
1197
-
1198
- :return: Valeur du MSE.
1199
- """
1200
- if self.I_recon is None:
1201
- raise ValueError("L'image reconstruite n'est pas disponible. Veuillez exécuter la reconstruction avant de calculer le MSE.")
1202
- self.MSE = np.mean((self.I_recon - self.I_ref) ** 2)
1203
-
1204
- def _getSSIM(self):
1205
- """
1206
- Calcule l'indice de similarité structurelle (SSIM) entre l'image reconstruite et l'image de référence.
1207
-
1208
- :return: Valeur du SSIM.
1209
- """
1210
- if self.I_recon is None:
1211
- raise ValueError("L'image reconstruite n'est pas disponible. Veuillez exécuter la reconstruction avant de calculer le SSIM.")
1212
- self.SSIM = ssim(self.I_recon, self.I_ref, data_range=self.I_ref.max() - self.I_ref.min())
1213
-
1214
- class BayesianRecon(Recon):
1215
- """
1216
- This class implements the Bayesian reconstruction process.
1217
- It currently does not perform any operations but serves as a template for future implementations.
1218
- """
1219
- def __init__(self, **kwargs):
1220
- super().__init__(**kwargs)
1221
- self.theta_matrix = []
1222
-
1223
- def run(self, reconType):
1224
- """
1225
- This method is a placeholder for the Bayesian reconstruction process.
1226
- It currently does not perform any operations but serves as a template for future implementations.
1227
- """
1228
- if(reconType == ProcessType.CASToR):
1229
- self._bayesianReconCASToR()
1230
- elif(reconType == ProcessType.PYTHON):
1231
- self._bayesianReconPython()
1232
- else:
1233
- raise ValueError(f"Unknown Bayesian reconstruction type: {reconType}")
1234
-
1235
- def _bayesianReconCASToR(self):
1236
- pass
1237
-
1238
- def _bayesianReconPython(self):
1239
- pass
1240
-
1241
- @staticmethod
1242
- def compute_grad_hess_huber_sparse(theta_flat, index, values, delta=0.1):
1243
- """
1244
- Compute the gradient and Hessian of the Huber penalty function for sparse data.
1245
- Parameters:
1246
- theta_flat (torch.Tensor): Flattened parameter vector.
1247
- index (torch.Tensor): Indices of the sparse matrix in COO format.
1248
- values (torch.Tensor): Values of the sparse matrix in COO format.
1249
- delta (float): Threshold for the Huber penalty.
1250
- Returns:
1251
- grad_U (torch.Tensor): Gradient of the penalty function.
1252
- hess_U (torch.Tensor): Hessian of the penalty function.
1253
- U_value (float): Value of the penalty function.
1254
- """
1255
-
1256
- j_idx, k_idx = index
1257
- diff = theta_flat[j_idx] - theta_flat[k_idx]
1258
- abs_diff = torch.abs(diff)
1259
-
1260
- # Huber penalty (potential function)
1261
- psi_pair = torch.where(abs_diff > delta,
1262
- delta * abs_diff - 0.5 * delta ** 2,
1263
- 0.5 * diff ** 2)
1264
- psi_pair = values * psi_pair
1265
-
1266
- # Huber gradient
1267
- grad_pair = torch.where(abs_diff > delta,
1268
- delta * torch.sign(diff),
1269
- diff)
1270
- grad_pair = values * grad_pair
1271
-
1272
- # Huber Hessian
1273
- hess_pair = torch.where(abs_diff > delta,
1274
- torch.zeros_like(diff),
1275
- torch.ones_like(diff))
1276
- hess_pair = values * hess_pair
1277
-
1278
- grad_U = scatter(grad_pair, j_idx, dim=0, dim_size=theta_flat.shape[0], reduce='sum')
1279
- hess_U = scatter(hess_pair, j_idx, dim=0, dim_size=theta_flat.shape[0], reduce='sum')
1280
-
1281
- # Total penalty energy
1282
- U_value = 0.5 * psi_pair.sum()
1283
-
1284
- return grad_U, hess_U, U_value
1285
-
1286
- @staticmethod
1287
- def build_adjacency_sparse(Z, X,corner,face,device):
1288
- rows = []
1289
- cols = []
1290
- weights = []
1291
-
1292
- for z in range(Z):
1293
- for x in range(X):
1294
- j = z * X + x
1295
- for dz in [-1, 0, 1]:
1296
- for dx in [-1, 0, 1]:
1297
- if dz == 0 and dx == 0:
1298
- continue
1299
- nz, nx = z + dz, x + dx
1300
- if 0 <= nz < Z and 0 <= nx < X:
1301
- k = nz * X + nx
1302
- weight = corner if abs(dz) + abs(dx) == 2 else face
1303
- rows.append(j)
1304
- cols.append(k)
1305
- weights.append(weight)
1306
-
1307
- index = torch.tensor([rows, cols], device=device)
1308
- values = torch.tensor(weights, dtype=torch.float32, device=device)
1309
- index, values = coalesce(index, values, m=Z*X, n=Z*X)
1310
- return index, values # COO 格式:用于稀疏邻接
1311
-
1312
- @staticmethod
1313
- def MAP_stop(A_matrix, y, device='cuda'):
1314
- """
1315
- Maximum A Posteriori (MAP) estimation for Bayesian reconstruction.
1316
- This method computes the MAP estimate of the parameters given the data.
1317
- """
1318
- A_matrix_torch = torch.tensor(A_matrix, dtype=torch.float32).to(device)
1319
- y_torch = torch.tensor(y, dtype=torch.float32).to(device)
1320
-
1321
- T, Z, X, N = A_matrix.shape
1322
- J = Z * X
1323
-
1324
- A_flat = A_matrix_torch.permute(0, 3, 1, 2).reshape(T * N, Z * X)
1325
- y_flat = y_torch.reshape(-1)
1326
-
1327
- theta_0 = torch.ones((Z, X), dtype=torch.float32, device=device)
1328
- matrix_theta_torch = []
1329
- matrix_theta_torch = [theta_0]
1330
- matrix_theta_from_gpu_MAPEM = []
1331
- matrix_theta_from_gpu_MAPEM = [theta_0.cpu().numpy()]
1332
-
1333
- normalization_factor = A_matrix_torch.sum(dim=(0, 3)) # (Z, X)
1334
- normalization_factor_flat = normalization_factor.reshape(-1) # (Z*X,)
1335
-
1336
- adj_index, adj_values = BayesianRecon.build_adjacency_sparse(Z, X)
1337
-
1338
- beta = 0.10
1339
- delta = 0.10
1340
- previous = -np.inf
1341
-
1342
- for p in trange(500000, desc="5.1 MAP-EM (Sparse Huber) + STOP condtion (penalized log-likelihood)"):
1343
- theta_p = matrix_theta_torch[-1]
1344
- theta_p_flat = theta_p.reshape(-1)
1345
-
1346
- q_flat = A_flat @ theta_p_flat
1347
- e_flat = (y_flat - q_flat) / (q_flat + torch.finfo(torch.float32).tiny)
1348
- c_flat = A_flat.T @ e_flat
1349
-
1350
- grad_U, hess_U, U_value = BayesianRecon.compute_grad_hess_huber_sparse(theta_p_flat, adj_index, adj_values, delta=delta)
1351
-
1352
- denom = normalization_factor_flat + theta_p_flat * beta * hess_U
1353
- num = theta_p_flat * (c_flat - beta * grad_U)
1354
-
1355
- theta_p_plus_1_flat = theta_p_flat + num / (denom + torch.finfo(torch.float32).tiny)
1356
- theta_p_plus_1_flat = torch.clamp(theta_p_plus_1_flat, min=0)
1357
-
1358
- theta_next = theta_p_plus_1_flat.reshape(Z, X)
1359
- #matrix_theta_torch.append(theta_next) # save theta in GPU
1360
- matrix_theta_torch[-1] = theta_next # do not save theta in GPU
1361
-
1362
- if p % 1 == 0:
1363
- matrix_theta_from_gpu_MAPEM.append(theta_next.cpu().numpy())
1364
-
1365
- # === compute penalized log-likelihood (without term ln(m_i !) inside) ===
1366
- # log-likelihood (without term ln(m_i !) inside)
1367
- # log_likelihood = (torch.where(q_flat > 0, y_flat * torch.log(q_flat), torch.zeros_like(q_flat)) - q_flat).sum()
1368
- # log_likelihood = (y_flat * torch.log(q_flat) - q_flat).sum()
1369
- log_likelihood = (y_flat * ( torch.log( q_flat + torch.finfo(torch.float32).tiny ) ) - (q_flat + torch.finfo(torch.float32).tiny)).sum()
1370
-
1371
- # penalized log-likelihood
1372
- penalized_log_likelihood = log_likelihood - beta * U_value
1373
-
1374
- if p == 0 or (p+1) % 100 == 0:
1375
- current = penalized_log_likelihood.item()
1376
-
1377
- if current<=previous:
1378
- nb_false_successive = nb_false_successive + 1
1379
-
1380
- else:
1381
- nb_false_successive = 0
1382
-
1383
- print(f"Iter {p+1}: lnL without term ln(m_i !) inside={log_likelihood.item():.8e}, Gibbs energy function U={U_value.item():.4e}, penalized lnL without term ln(m_i !) inside={penalized_log_likelihood.item():.8e}, p lnL (current {current:.8e} - previous {previous:.8e} > 0)={(current-previous>0)}, nb_false_successive={nb_false_successive}")
1384
-
1385
- #if nb_false_successive >= 25:
1386
- #break
1387
-
1388
- previous = penalized_log_likelihood.item()
1389
-
1390
- class DeepLearningRecon(Recon):
1391
- """
1392
- This class implements the deep learning reconstruction process.
1393
- It currently does not perform any operations but serves as a template for future implementations.
1394
- """
1395
- def __init__(self, **kwargs):
1396
- super().__init__(**kwargs)
1397
- self.model = None # Placeholder for the deep learning model
1398
- self.theta_matrix = []
1399
-
1400
- def run(self, reconType):
1401
- """
1402
- This method is a placeholder for the deep learning reconstruction process.
1403
- It currently does not perform any operations but serves as a template for future implementations.
1404
- """
1405
- if(reconType == ProcessType.CASToR):
1406
- self._deepLearningReconCASToR()
1407
- elif(reconType == ProcessType.PYTHON):
1408
- self._deepLearningReconPython()
1409
- else:
1410
- raise ValueError(f"Unknown deep learning reconstruction type: {reconType}")
1411
-
1412
- def _deepLearningReconCASToR(self):
1413
- pass
1414
-
1415
- def _deepLearningReconPython(self):
1416
- pass