AOT-biomaps 2.1.3__py3-none-any.whl → 2.9.233__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of AOT-biomaps might be problematic. Click here for more details.

Files changed (50) hide show
  1. AOT_biomaps/AOT_Acoustic/AcousticEnums.py +64 -0
  2. AOT_biomaps/AOT_Acoustic/AcousticTools.py +221 -0
  3. AOT_biomaps/AOT_Acoustic/FocusedWave.py +244 -0
  4. AOT_biomaps/AOT_Acoustic/IrregularWave.py +66 -0
  5. AOT_biomaps/AOT_Acoustic/PlaneWave.py +43 -0
  6. AOT_biomaps/AOT_Acoustic/StructuredWave.py +392 -0
  7. AOT_biomaps/AOT_Acoustic/__init__.py +15 -0
  8. AOT_biomaps/AOT_Acoustic/_mainAcoustic.py +978 -0
  9. AOT_biomaps/AOT_Experiment/Focus.py +55 -0
  10. AOT_biomaps/AOT_Experiment/Tomography.py +505 -0
  11. AOT_biomaps/AOT_Experiment/__init__.py +9 -0
  12. AOT_biomaps/AOT_Experiment/_mainExperiment.py +532 -0
  13. AOT_biomaps/AOT_Optic/Absorber.py +24 -0
  14. AOT_biomaps/AOT_Optic/Laser.py +70 -0
  15. AOT_biomaps/AOT_Optic/OpticEnums.py +17 -0
  16. AOT_biomaps/AOT_Optic/__init__.py +10 -0
  17. AOT_biomaps/AOT_Optic/_mainOptic.py +204 -0
  18. AOT_biomaps/AOT_Recon/AOT_Optimizers/DEPIERRO.py +191 -0
  19. AOT_biomaps/AOT_Recon/AOT_Optimizers/LS.py +106 -0
  20. AOT_biomaps/AOT_Recon/AOT_Optimizers/MAPEM.py +456 -0
  21. AOT_biomaps/AOT_Recon/AOT_Optimizers/MLEM.py +333 -0
  22. AOT_biomaps/AOT_Recon/AOT_Optimizers/PDHG.py +221 -0
  23. AOT_biomaps/AOT_Recon/AOT_Optimizers/__init__.py +5 -0
  24. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/Huber.py +90 -0
  25. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/Quadratic.py +86 -0
  26. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/RelativeDifferences.py +59 -0
  27. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/__init__.py +3 -0
  28. AOT_biomaps/AOT_Recon/AlgebraicRecon.py +1023 -0
  29. AOT_biomaps/AOT_Recon/AnalyticRecon.py +154 -0
  30. AOT_biomaps/AOT_Recon/BayesianRecon.py +230 -0
  31. AOT_biomaps/AOT_Recon/DeepLearningRecon.py +35 -0
  32. AOT_biomaps/AOT_Recon/PrimalDualRecon.py +210 -0
  33. AOT_biomaps/AOT_Recon/ReconEnums.py +375 -0
  34. AOT_biomaps/AOT_Recon/ReconTools.py +273 -0
  35. AOT_biomaps/AOT_Recon/__init__.py +11 -0
  36. AOT_biomaps/AOT_Recon/_mainRecon.py +288 -0
  37. AOT_biomaps/Config.py +95 -0
  38. AOT_biomaps/Settings.py +45 -13
  39. AOT_biomaps/__init__.py +271 -18
  40. aot_biomaps-2.9.233.dist-info/METADATA +22 -0
  41. aot_biomaps-2.9.233.dist-info/RECORD +43 -0
  42. {AOT_biomaps-2.1.3.dist-info → aot_biomaps-2.9.233.dist-info}/WHEEL +1 -1
  43. AOT_biomaps/AOT_Acoustic.py +0 -1881
  44. AOT_biomaps/AOT_Experiment.py +0 -541
  45. AOT_biomaps/AOT_Optic.py +0 -219
  46. AOT_biomaps/AOT_Reconstruction.py +0 -1416
  47. AOT_biomaps/config.py +0 -54
  48. AOT_biomaps-2.1.3.dist-info/METADATA +0 -20
  49. AOT_biomaps-2.1.3.dist-info/RECORD +0 -11
  50. {AOT_biomaps-2.1.3.dist-info → aot_biomaps-2.9.233.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,375 @@
1
+ from enum import Enum
2
+
3
+ class ReconType(Enum):
4
+ """
5
+ Enum for different reconstruction types.
6
+
7
+ Selection of reconstruction types:
8
+ - Analytic: A reconstruction method based on analytical solutions.
9
+ - Algebraic: A reconstruction method using algebraic techniques.
10
+ - Algebraic: A reconstruction method that Algebraicly refines the solution.
11
+ - Bayesian: A reconstruction method based on Bayesian statistical approaches.
12
+ - DeepLearning: A reconstruction method utilizing deep learning algorithms.
13
+ """
14
+
15
+ Analytic = 'analytic'
16
+ """A reconstruction method based on analytical solutions."""
17
+ Algebraic = 'algebraic'
18
+ """A reconstruction method that Algebraicly refines the solution."""
19
+ Bayesian = 'bayesian'
20
+ """A reconstruction method based on Bayesian statistical approaches."""
21
+ DeepLearning = 'deep_learning'
22
+ """A reconstruction method utilizing deep learning algorithms."""
23
+ Convex = 'convex'
24
+
25
+ class AnalyticType(Enum):
26
+ iFOURIER = 'iFOURIER'
27
+ """
28
+ This analytic reconstruction type uses the inverse Fourier transform to reconstruct the image.
29
+ It is suitable for data that can be represented in the frequency domain.
30
+ It is typically used for data that has been transformed into the frequency domain, such as in Fourier optics.
31
+ It is not suitable for data that has not been transformed into the frequency domain.
32
+ """
33
+ iRADON = 'iRADON'
34
+ """
35
+ This analytic reconstruction type uses the inverse Radon transform to reconstruct the image.
36
+ It is suitable for data that has been transformed into the Radon domain, such as in computed tomography (CT).
37
+ It is typically used for data that has been transformed into the Radon domain, such as in CT.
38
+ It is not suitable for data that has not been transformed into the Radon domain.
39
+ """
40
+
41
+ class OptimizerType(Enum):
42
+ MLEM = 'MLEM'
43
+ """
44
+ This optimizer is the standard MLEM (for Maximum Likelihood Expectation Maximization).
45
+ It is numerically implemented in the multiplicative form (as opposed to the gradient form).
46
+ It truncates negative data to 0 to satisfy the positivity constraint.
47
+ If subsets are used, it naturally becomes the OSEM optimizer.
48
+
49
+ With transmission data, the log-converted pre-corrected data are used as in J. Nuyts et al:
50
+ "Algebraic reconstruction for helical CT: a simulation study", Phys. Med. Biol., vol. 43, pp. 729-737, 1998.
51
+
52
+ The following options can be used (in this particular order when provided as a list):
53
+ - Initial image value: Sets the uniform voxel value for the initial image.
54
+ - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
55
+ - Minimum image update: Sets the minimum of the image update factor under which it stays constant.
56
+ (0 or a negative value means no minimum, thus allowing a 0 update).
57
+ - Maximum image update: Sets the maximum of the image update factor over which it stays constant.
58
+ (0 or a negative value means no maximum).
59
+
60
+ This optimizer is compatible with both histogram and list-mode data.
61
+ This optimizer is compatible with both emission and transmission data.
62
+ """
63
+ CP_TV = 'CP_TV'
64
+ """
65
+ This optimizer implements the Chambolle-Pock algorithm for total variation regularization.
66
+ It is suitable for problems where the objective function includes a total variation term.
67
+ It is particularly effective for preserving edges while reducing noise in the reconstructed image.
68
+ """
69
+ CP_KL = 'CP_KL'
70
+ """
71
+ This optimizer implements the Kullback-Leibler divergence for regularization.
72
+ It is suitable for problems where the objective function includes a Kullback-Leibler divergence term.
73
+ """
74
+ LS = 'LS'
75
+ """
76
+ This optimizer implements the standard Landweber algorithm for least-squares optimization.
77
+ With transmission data, it uses the log-converted model to derive the update.
78
+ Be aware that the relaxation parameter is not automatically set, so it often requires some
79
+ trials and errors to find an optimal setting. Also, remember that this algorithm is particularly
80
+ slow to converge.
81
+ Options (in order when provided as a list):
82
+ - Initial image value: Sets the uniform voxel value for the initial image.
83
+ - Relaxation factor: Sets the relaxation factor applied to the update.
84
+ - Non-negativity constraint: 0 if no constraint or 1 in order to apply the constraint during the image update.
85
+ This optimizer is only compatible with histogram data, and with both emission and transmission data.
86
+ """
87
+ MLTR = 'MLTR'
88
+ """
89
+ This optimizer is a version of the MLTR algorithm implemented from equation 16 of the paper from K. Van Slambrouck and J. Nuyts:
90
+ "Reconstruction scheme for accelerated maximum likelihood reconstruction: the patchwork structure",
91
+ IEEE Trans. Nucl. Sci., vol. 61, pp. 173-81, 2014.
92
+
93
+ An additional empiric relaxation factor has been added onto the additive update. Its value for the first and last updates
94
+ can be parameterized. Its value for all updates in between is computed linearly from these first and last provided values.
95
+
96
+ Subsets can be used.
97
+
98
+ Options (in order when provided as a list):
99
+ - Initial image value: Sets the uniform voxel value for the initial image.
100
+ - Alpha ratio: Sets the ratio between exterior and interior of the cylindrical FOV alpha values (0 value means 0 inside exterior).
101
+ - Initial relaxation factor: Sets the empiric multiplicative factor on the additive update used at the first update.
102
+ - Final relaxation factor: Sets the empiric multiplicative factor on the additive update used at the last update.
103
+ - Non-negativity constraint: 0 if no constraint or 1 to apply the constraint during the image update.
104
+
105
+ This optimizer is only compatible with histogram data and transmission data.
106
+ """
107
+
108
+ NEGML = 'NEGML'
109
+ """
110
+ This optimizer is the NEGML algorithm from K. Van Slambrouck et al, IEEE TMI, Jan 2015, vol. 34, pp. 126-136.
111
+
112
+ Subsets can be used. This implementation only considers the psi parameter, but not the alpha image design parameter,
113
+ which is supposed to be 1 for all voxels. It implements equation 17 of the reference paper.
114
+
115
+ This algorithm allows for negative image values.
116
+
117
+ Options (in order when provided as a list):
118
+ - Initial image value: Sets the uniform voxel value for the initial image.
119
+ - Psi: Sets the psi parameter that sets the transition from Poisson to Gaussian statistics (must be positive).
120
+ (If set to 0, then it is taken to infinity and implements equation 21 in the reference paper).
121
+
122
+ This optimizer is only compatible with histogram data and emission data.
123
+ """
124
+
125
+ OSL = 'OSL'
126
+ """
127
+ This optimizer is the One-Step-Late algorithm from P. J. Green, IEEE TMI, Mar 1990, vol. 9, pp. 84-93.
128
+
129
+ Subsets can be used as for OSEM. It accepts penalty terms that have a derivative order of at least one.
130
+ Without penalty, it is strictly equivalent to the MLEM algorithm.
131
+
132
+ It is numerically implemented in the multiplicative form (as opposed to the gradient form).
133
+
134
+ Options (in order when provided as a list):
135
+ - Initial image value: Sets the uniform voxel value for the initial image.
136
+ - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
137
+ - Minimum image update: Sets the minimum of the image update factor under which it stays constant (0 or a negative value
138
+ means no minimum thus allowing a 0 update).
139
+ - Maximum image update: Sets the maximum of the image update factor over which it stays constant (0 or a negative value means
140
+ no maximum).
141
+
142
+ This optimizer is compatible with both histogram and list-mode data, and with both emission and transmission data.
143
+ """
144
+
145
+ PPGMLEM = 'PPGML'
146
+ """
147
+ This optimizer is the Penalized Preconditioned Gradient algorithm from J. Nuyts et al, IEEE TNS, Feb 2002, vol. 49, pp. 56-60.
148
+
149
+ It is a heuristic but effective gradient ascent algorithm for penalized maximum-likelihood reconstruction.
150
+ It addresses the shortcoming of One-Step-Late when large penalty strengths can create numerical problems.
151
+ Penalty terms must have a derivative order of at least two.
152
+
153
+ Subsets can be used as for OSEM. Without penalty, it is equivalent to the gradient ascent form of the MLEM algorithm.
154
+
155
+ Based on likelihood gradient and penalty, a multiplicative update factor is computed and its range is limited by provided parameters.
156
+ Thus, negative values cannot occur and voxels cannot be trapped into 0 values, providing the first estimate is strictly positive.
157
+
158
+ Options (in order when provided as a list):
159
+ - Initial image value: Sets the uniform voxel value for the initial image.
160
+ - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
161
+ - Minimum image update: Sets the minimum of the image update factor under which it stays constant (0 or a negative value
162
+ means no minimum thus allowing a 0 update).
163
+ - Maximum image update: Sets the maximum of the image update factor over which it stays constant (0 or a negative value means
164
+ no maximum).
165
+
166
+ This optimizer is only compatible with histogram data and emission data.
167
+ """
168
+
169
+ AML = 'AML'
170
+ """
171
+ This optimizer is the AML algorithm derived from the AB-EMML of C. Byrne, Inverse Problems, 1998, vol. 14, pp. 1455-67.
172
+
173
+ The bound B is taken to infinity, so only the bound A can be parameterized.
174
+ This bound must be quantitative (same unit as the reconstructed image).
175
+ It is provided as a single value and thus assuming a uniform bound.
176
+
177
+ This algorithm allows for negative image values in case the provided bound is also negative.
178
+
179
+ Subsets can be used.
180
+
181
+ With a negative or null bound, this algorithm implements equation 6 of A. Rahmim et al, Phys. Med. Biol., 2012, vol. 57, pp. 733-55.
182
+ If a positive bound is provided, then we suppose that the bound A is taken to minus infinity. In that case, this algorithm implements
183
+ equation 22 of K. Van Slambrouck et al, IEEE TMI, Jan 2015, vol. 34, pp. 126-136.
184
+
185
+ Options (in order when provided as a list):
186
+ - Initial image value: Sets the uniform voxel value for the initial image.
187
+ - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
188
+ - Bound: Sets the bound parameter that shifts the Poisson law (quantitative, negative or null for standard AML and positive for infinite AML).
189
+
190
+ This optimizer is only compatible with histogram data and emission data.
191
+ """
192
+
193
+ BSREM = 'BSREM'
194
+ """
195
+ This optimizer is the BSREM (for Block Sequential Regularized Expectation Maximization) algorithm, in development.
196
+ It follows the definition of BSREM II in Ahn and Fessler 2003.
197
+
198
+ This optimizer is the Block Sequential Regularized Expectation Maximization (BSREM) algorithm from S. Ahn and
199
+ J. Fessler, IEEE TMI, May 2003, vol. 22, pp. 613-626. Its abbreviated name in this paper is BSREM-II.
200
+
201
+ This algorithm is the only one to have proven convergence using subsets. Its implementation is entirely based
202
+ on the reference paper. It may have numerical problems when a full field-of-view is used, because of the sharp
203
+ sensitivity loss at the edges of the field-of-view. As it is simply based on the gradient, penalty terms must
204
+ have a derivative order of at least one. Without penalty, it reduces to OSEM but where the sensitivity is not
205
+ dependent on the current subset. This is a requirement of the algorithm, explaining why it starts by computing
206
+ the global sensitivity before going through iterations. The algorithm is restricted to histograms.
207
+
208
+ Options:
209
+ - Initial image value: Sets the uniform voxel value for the initial image.
210
+ - Minimum image value: Sets the minimum allowed image value (parameter 't' in the reference paper).
211
+ - Maximum image value: Sets the maximum allowed image value (parameter 'U' in the reference paper).
212
+ - Relaxation factor type: Type of relaxation factors (can be one of the following: 'classic').
213
+
214
+ Relaxation factors of type 'classic' correspond to what was proposed in the reference paper in equation (31).
215
+ This equation gives: alpha_n = alpha_0 / (gamma * iter_num + 1)
216
+ The iteration number 'iter_num' is supposed to start at 0 so that for the first iteration, alpha_0 is used.
217
+ This parameter can be provided using the following keyword: 'relaxation factor classic initial value'.
218
+ The 'gamma' parameter can be provided using the following keyword: 'relaxation factor classic step size'.
219
+
220
+ This optimizer is only compatible with histogram data and emission data.
221
+ """
222
+
223
+ DEPIERRO95 = 'DEPIERRO95'
224
+ """
225
+ This optimizer is based on the algorithm from A. De Pierro, IEEE TMI, vol. 14, pp. 132-137, 1995.
226
+
227
+ This algorithm uses optimization transfer techniques to derive an exact and convergent algorithm
228
+ for maximum likelihood reconstruction including a MRF penalty with different potential functions.
229
+
230
+ The algorithm is convergent and is numerically robust to high penalty strength.
231
+ It is strictly equivalent to MLEM without penalty, but can be unstable with extremely low penalty strength.
232
+ Currently, it only implements the quadratic penalty.
233
+
234
+ To be used, a MRF penalty still needs to be defined accordingly (at least to define the neighborhood).
235
+ Subsets can be used as for OSEM, without proof of convergence however.
236
+
237
+ The algorithm is compatible with list-mode or histogram data.
238
+
239
+ Options (in order when provided as a list):
240
+ - Initial image value: Sets the uniform voxel value for the initial image.
241
+ - Denominator threshold: Sets the threshold of the data space denominator under which the ratio is set to 1.
242
+ - Minimum image update: Sets the minimum of the image update factor under which it stays constant (0 or a negative value
243
+ means no minimum thus allowing a 0 update).
244
+ - Maximum image update: Sets the maximum of the image update factor over which it stays constant (0 or a negative value means
245
+ no maximum).
246
+
247
+ This optimizer is compatible with both histogram and list-mode data, and only with emission data.
248
+ """
249
+
250
+ LDWB = 'LDWB'
251
+ """
252
+ This optimizer implements the standard Landweber algorithm for least-squares optimization.
253
+
254
+ With transmission data, it uses the log-converted model to derive the update.
255
+ Be aware that the relaxation parameter is not automatically set, so it often requires some
256
+ trials and errors to find an optimal setting. Also, remember that this algorithm is particularly
257
+ slow to converge.
258
+
259
+ Options (in order when provided as a list):
260
+ - Initial image value: Sets the uniform voxel value for the initial image.
261
+ - Relaxation factor: Sets the relaxation factor applied to the update.
262
+ - Non-negativity constraint: 0 if no constraint or 1 in order to apply the constraint during the image update.
263
+
264
+ This optimizer is only compatible with histogram data, and with both emission and transmission data.
265
+ """
266
+
267
+ PGC = 'PGC'
268
+ """
269
+ This optimizer implements the PGC (for Penalized Gauss-Newton Conjugate Gradient) algorithm from J. Nuyts et al, IEEE TNS, Feb 2002, vol. 49, pp. 56-60.
270
+ """
271
+
272
+ class PotentialType(Enum):
273
+ """The potential function actually penalizes the difference between the voxel of interest and a neighbor:
274
+ p(u, v) = p(u - v)
275
+
276
+ Descriptions of potential functions:
277
+ - Quadratic: p(u, v) = 0.5 * (u - v)^2
278
+ - Geman-McClure: p(u, v, d) = (u - v)^2 / (d^2 + (u - v)^2)
279
+ - Hebert-Leahy: p(u, v, m) = log(1 + (u - v)^2 / m^2)
280
+ - Green's log-cosh: p(u, v, d) = log(cosh((u - v) / d))
281
+ - Huber piecewise: p(u, v, d) = d * |u - v| - 0.5 * d^2 if |u - v| > d, else 0.5 * (u - v)^2
282
+ - Nuyts relative: p(u, v, g) = (u - v)^2 / (u + v + g * |u - v|)
283
+ """
284
+
285
+ QUADRATIC = 'QUADRATIC'
286
+ """
287
+ Quadratic potential:
288
+ p(u, v) = 0.5 * (u - v)^2
289
+
290
+ Reference: Geman and Geman, IEEE Trans. Pattern Anal. Machine Intell., vol. PAMI-6, pp. 721-741, 1984.
291
+ """
292
+
293
+ GEMAN_MCCLURE = 'GEMAN_MCCLURE'
294
+ """
295
+ Geman-McClure potential:
296
+ p(u, v, d) = (u - v)^2 / (d^2 + (u - v)^2)
297
+
298
+ The parameter 'd' can be set using the 'deltaGMC' keyword.
299
+
300
+ Reference: Geman and McClure, Proc. Amer. Statist. Assoc., 1985.
301
+ """
302
+
303
+ HEBERT_LEAHY = 'HEBERT_LEAHY'
304
+ """
305
+ Hebert-Leahy potential:
306
+ p(u, v, m) = log(1 + (u - v)^2 / m^2)
307
+
308
+ The parameter 'm' can be set using the 'muHL' keyword.
309
+
310
+ Reference: Hebert and Leahy, IEEE Trans. Med. Imaging, vol. 8, pp. 194-202, 1989.
311
+ """
312
+
313
+ GREEN_LOGCOSH = 'GREEN_LOGCOSH'
314
+ """
315
+ Green's log-cosh potential:
316
+ p(u, v, d) = log(cosh((u - v) / d))
317
+
318
+ The parameter 'd' can be set using the 'deltaLogCosh' keyword.
319
+
320
+ Reference: Green, IEEE Trans. Med. Imaging, vol. 9, pp. 84-93, 1990.
321
+ """
322
+
323
+ HUBER_PIECEWISE = 'HUBER_PIECEWISE'
324
+ """
325
+ Huber piecewise potential:
326
+ p(u, v, d) = d * |u - v| - 0.5 * d^2 if |u - v| > d, else 0.5 * (u - v)^2
327
+
328
+ The parameter 'd' can be set using the 'deltaHuber' keyword.
329
+
330
+ Reference: e.g. Mumcuoglu et al, Phys. Med. Biol., vol. 41, pp. 1777-1807, 1996.
331
+ """
332
+
333
+ RELATIVE_DIFFERENCE = 'NUYTS_RELATIVE'
334
+ """
335
+ Nuyts relative potential:
336
+ p(u, v, g) = (u - v)^2 / (u + v + g * |u - v|)
337
+
338
+ The parameter 'g' can be set using the 'gammaRD' keyword.
339
+
340
+ Reference: Nuyts et al, IEEE Trans. Nucl. Sci., vol. 49, pp. 56-60, 2002.
341
+ """
342
+
343
+ class ProcessType(Enum):
344
+ CASToR = 'CASToR'
345
+ PYTHON = 'PYTHON'
346
+
347
+ class NoiseType(Enum):
348
+ """
349
+ Enum for different noise types used in reconstructions.
350
+
351
+ Selection of noise types:
352
+ - Poisson: Poisson noise, typically used for emission data.
353
+ - Gaussian: Gaussian noise, typically used for transmission data.
354
+ - None: No noise is applied.
355
+ """
356
+ POISSON = 'poisson'
357
+ """Poisson noise, typically used for emission data."""
358
+ GAUSSIAN = 'gaussian'
359
+ """Gaussian noise, typically used for transmission data."""
360
+ None_ = 'none'
361
+ """No noise is applied."""
362
+
363
+ class SparsingType(Enum):
364
+ """
365
+ Enum for different sparsing methods used in reconstructions.
366
+
367
+ Selection of sparsing methods:
368
+ - Thresholding: Sparsing based on a threshold value.
369
+ - TopK: Sparsing by retaining the top K values.
370
+ - None: No sparsing is applied.
371
+ """
372
+ CSR = 'CSR'
373
+ """Sparsing based on a threshold value."""
374
+ COO = 'COO'
375
+ """Sparsing by retaining the top K values."""
@@ -0,0 +1,273 @@
1
+ import os
2
+ import torch
3
+ import numpy as np
4
+ from numba import njit, prange
5
+ from torch_sparse import coalesce
6
+ import torch.nn.functional as F
7
+
8
+ def load_recon(hdr_path):
9
+ """
10
+ Lit un fichier Interfile (.hdr) et son fichier binaire (.img) pour reconstruire une image comme le fait Vinci.
11
+
12
+ Paramètres :
13
+ ------------
14
+ - hdr_path : chemin complet du fichier .hdr
15
+
16
+ Retour :
17
+ --------
18
+ - image : tableau NumPy contenant l'image
19
+ - header : dictionnaire contenant les métadonnées du fichier .hdr
20
+ """
21
+ header = {}
22
+ with open(hdr_path, 'r') as f:
23
+ for line in f:
24
+ if ':=' in line:
25
+ key, value = line.split(':=', 1) # s'assurer qu'on ne coupe que la première occurrence de ':='
26
+ key = key.strip().lower().replace('!', '') # Nettoyage des caractères
27
+ value = value.strip()
28
+ header[key] = value
29
+
30
+ # 📘 Obtenez le nom du fichier de données associé (le .img)
31
+ data_file = header.get('name of data file')
32
+ if data_file is None:
33
+ raise ValueError(f"Impossible de trouver le fichier de données associé au fichier header {hdr_path}")
34
+
35
+ img_path = os.path.join(os.path.dirname(hdr_path), data_file)
36
+
37
+ # 📘 Récupérer la taille de l'image à partir des métadonnées
38
+ shape = [int(header[f'matrix size [{i}]']) for i in range(1, 4) if f'matrix size [{i}]' in header]
39
+ if shape and shape[-1] == 1: # Si la 3e dimension est 1, on la supprime
40
+ shape = shape[:-1] # On garde (192, 240) par exemple
41
+
42
+ if not shape:
43
+ raise ValueError("Impossible de déterminer la forme de l'image à partir des métadonnées.")
44
+
45
+ # 📘 Déterminez le type de données à utiliser
46
+ data_type = header.get('number format', 'short float').lower()
47
+ dtype_map = {
48
+ 'short float': np.float32,
49
+ 'float': np.float32,
50
+ 'int16': np.int16,
51
+ 'int32': np.int32,
52
+ 'uint16': np.uint16,
53
+ 'uint8': np.uint8
54
+ }
55
+ dtype = dtype_map.get(data_type)
56
+ if dtype is None:
57
+ raise ValueError(f"Type de données non pris en charge : {data_type}")
58
+
59
+ # 📘 Ordre des octets (endianness)
60
+ byte_order = header.get('imagedata byte order', 'LITTLEENDIAN').lower()
61
+ endianess = '<' if 'little' in byte_order else '>'
62
+
63
+ # 📘 Vérifie la taille réelle du fichier .img
64
+ img_size = os.path.getsize(img_path)
65
+ expected_size = np.prod(shape) * np.dtype(dtype).itemsize
66
+
67
+ if img_size != expected_size:
68
+ raise ValueError(f"La taille du fichier img ({img_size} octets) ne correspond pas à la taille attendue ({expected_size} octets).")
69
+
70
+ # 📘 Lire les données binaires et les reformater
71
+ with open(img_path, 'rb') as f:
72
+ data = np.fromfile(f, dtype=endianess + np.dtype(dtype).char)
73
+
74
+ image = data.reshape(shape[::-1])
75
+
76
+ # 📘 Rescale l'image si nécessaire
77
+ rescale_slope = float(header.get('data rescale slope', 1))
78
+ rescale_offset = float(header.get('data rescale offset', 0))
79
+ image = image * rescale_slope + rescale_offset
80
+
81
+ return image
82
+
83
+ def mse(y_true, y_pred):
84
+ """
85
+ Calcule la Mean Squared Error (MSE) entre deux tableaux.
86
+ Équivalent à sklearn.metrics.mean_squared_error.
87
+ """
88
+ y_true = np.asarray(y_true)
89
+ y_pred = np.asarray(y_pred)
90
+ return np.mean((y_true - y_pred) ** 2)
91
+
92
+ def ssim(img1, img2, win_size=7, k1=0.01, k2=0.03, L=1.0):
93
+ """
94
+ Calcule l'SSIM entre deux images 2D (niveaux de gris).
95
+ Équivalent à skimage.metrics.structural_similarity avec :
96
+ - data_range=1.0 (images normalisées entre 0 et 1)
97
+ - gaussian_weights=True (fenêtre gaussienne)
98
+ - multichannel=False (1 canal)
99
+
100
+ Args:
101
+ img1, img2: Images 2D (numpy arrays) de même taille.
102
+ win_size: Taille de la fenêtre de comparaison (doit être impair).
103
+ k1, k2: Constantes pour stabiliser la division (valeurs typiques : 0.01, 0.03).
104
+ L: Dynamique des pixels (1.0 si images dans [0,1], 255 si dans [0,255]).
105
+ Returns:
106
+ SSIM moyen sur l'image (float entre -1 et 1).
107
+ """
108
+ if img1.shape != img2.shape:
109
+ raise ValueError("Les images doivent avoir la même taille.")
110
+ if win_size % 2 == 0:
111
+ raise ValueError("win_size doit être impair.")
112
+
113
+ # Constantes
114
+ C1 = (k1 * L) ** 2
115
+ C2 = (k2 * L) ** 2
116
+
117
+ # Fenêtre gaussienne
118
+ window = np.ones((win_size, win_size)) / (win_size ** 2) # Approximation (skimage utilise une gaussienne)
119
+ window = window / np.sum(window) # Normalisation
120
+
121
+ # Pad les images pour éviter les bords
122
+ pad = win_size // 2
123
+ img1_pad = np.pad(img1, pad, mode='reflect')
124
+ img2_pad = np.pad(img2, pad, mode='reflect')
125
+
126
+ # Calcul des statistiques locales
127
+ mu1 = np.zeros_like(img1, dtype=np.float64)
128
+ mu2 = np.zeros_like(img1, dtype=np.float64)
129
+ sigma1_sq = np.zeros_like(img1, dtype=np.float64)
130
+ sigma2_sq = np.zeros_like(img1, dtype=np.float64)
131
+ sigma12 = np.zeros_like(img1, dtype=np.float64)
132
+
133
+ # Itère sur chaque pixel (optimisé avec des convolutions)
134
+ for i in range(pad, img1_pad.shape[0] - pad):
135
+ for j in range(pad, img1_pad.shape[1] - pad):
136
+ patch1 = img1_pad[i-pad:i+pad+1, j-pad:j+pad+1]
137
+ patch2 = img2_pad[i-pad:i+pad+1, j-pad:j+pad+1]
138
+
139
+ mu1[i-pad, j-pad] = np.sum(patch1 * window)
140
+ mu2[i-pad, j-pad] = np.sum(patch2 * window)
141
+ sigma1_sq[i-pad, j-pad] = np.sum(window * (patch1 - mu1[i-pad, j-pad]) ** 2)
142
+ sigma2_sq[i-pad, j-pad] = np.sum(window * (patch2 - mu2[i-pad, j-pad]) ** 2)
143
+ sigma12[i-pad, j-pad] = np.sum(window * (patch1 - mu1[i-pad, j-pad]) * (patch2 - mu2[i-pad, j-pad]))
144
+
145
+ # SSIM locale
146
+ ssim_map = ((2 * mu1 * mu2 + C1) * (2 * sigma12 + C2)) / (
147
+ (mu1**2 + mu2**2 + C1) * (sigma1_sq + sigma2_sq + C2)
148
+ )
149
+
150
+ return np.mean(ssim_map)
151
+
152
+ def calculate_memory_requirement(SMatrix, y):
153
+ """Calculate the memory requirement for the given matrices in GB."""
154
+ num_elements_SMatrix = SMatrix.size
155
+ num_elements_y = y.size
156
+ num_elements_theta = SMatrix.shape[1] * SMatrix.shape[2] # Assuming theta has shape (Z, X)
157
+
158
+ # Calculate total memory requirement in GB
159
+ total_memory = (num_elements_SMatrix + num_elements_y + num_elements_theta) * 32 / 8 / 1024**3
160
+ return total_memory
161
+
162
+ def check_gpu_memory(device_index, required_memory, show_logs=True):
163
+ """Check if enough memory is available on the specified GPU."""
164
+ free_memory, _ = torch.cuda.mem_get_info(f"cuda:{device_index}")
165
+ free_memory_gb = free_memory / 1024**3
166
+ if show_logs:
167
+ print(f"Free memory on GPU {device_index}: {free_memory_gb:.2f} GB, Required memory: {required_memory:.2f} GB")
168
+ return free_memory_gb >= required_memory
169
+
170
+ @njit(parallel=True)
171
+ def _forward_projection(SMatrix, theta_p, q_p):
172
+ t_dim, z_dim, x_dim, i_dim = SMatrix.shape
173
+ for _t in prange(t_dim):
174
+ for _n in range(i_dim):
175
+ total = 0.0
176
+ for _z in range(z_dim):
177
+ for _x in range(x_dim):
178
+ total += SMatrix[_t, _z, _x, _n] * theta_p[_z, _x]
179
+ q_p[_t, _n] = total
180
+
181
+ @njit(parallel=True)
182
+ def _backward_projection(SMatrix, e_p, c_p):
183
+ t_dim, z_dim, x_dim, n_dim = SMatrix.shape
184
+ for _z in prange(z_dim):
185
+ for _x in range(x_dim):
186
+ total = 0.0
187
+ for _t in range(t_dim):
188
+ for _n in range(n_dim):
189
+ total += SMatrix[_t, _z, _x, _n] * e_p[_t, _n]
190
+ c_p[_z, _x] = total
191
+
192
+
193
+ def _build_adjacency_sparse(Z, X, device, corner=(0.5 - np.sqrt(2) / 4) / np.sqrt(2), face=0.5 - np.sqrt(2) / 4,dtype=torch.float32):
194
+ rows, cols, weights = [], [], []
195
+ for z in range(Z):
196
+ for x in range(X):
197
+ j = z * X + x
198
+ for dz, dx in [(-1, -1), (-1, 0), (-1, 1),
199
+ (0, -1), (0, 1),
200
+ (1, -1), (1, 0), (1, 1)]:
201
+ nz, nx = z + dz, x + dx
202
+ if 0 <= nz < Z and 0 <= nx < X:
203
+ k = nz * X + nx
204
+ weight = corner if abs(dz) + abs(dx) == 2 else face
205
+ rows.append(j)
206
+ cols.append(k)
207
+ weights.append(weight)
208
+ index = torch.tensor([rows, cols], dtype=torch.long, device=device)
209
+ values = torch.tensor(weights, dtype=dtype, device=device)
210
+ index, values = coalesce(index, values, m=Z*X, n=Z*X)
211
+ return index, values
212
+
213
+
214
+ def power_method(P, PT, data, Z, X, n_it=10):
215
+ x = torch.randn(Z * X, device=data.device)
216
+ x = x / torch.norm(x)
217
+ for _ in range(n_it):
218
+ Ax = P(x)
219
+ ATax = PT(Ax)
220
+ x = ATax / torch.norm(ATax)
221
+ ATax = PT(P(x))
222
+ return torch.sqrt(torch.dot(x, ATax))
223
+
224
+ def proj_l2(p, alpha):
225
+ if alpha <= 0:
226
+ return torch.zeros_like(p)
227
+ norm = torch.sqrt(torch.sum(p**2, dim=0, keepdim=True) + 1e-12)
228
+ return p * torch.min(norm, torch.tensor(alpha, device=p.device)) / (norm + 1e-12)
229
+
230
+ def gradient(x):
231
+ grad_x = torch.zeros_like(x)
232
+ grad_y = torch.zeros_like(x)
233
+ grad_x[:, :-1] = x[:, 1:] - x[:, :-1] # Gradient horizontal
234
+ grad_y[:-1, :] = x[1:, :] - x[:-1, :] # Gradient vertical
235
+ return torch.stack((grad_x, grad_y), dim=0)
236
+
237
+ def div(x):
238
+ if x.dim() == 3:
239
+ x = x.unsqueeze(0) # Ajoute une dimension batch si nécessaire
240
+
241
+ gx = x[:, 0, :, :] # Gradient horizontal (shape: [1, H, W] ou [H, W])
242
+ gy = x[:, 1, :, :] # Gradient vertical (shape: [1, H, W] ou [H, W])
243
+
244
+ # Divergence du gradient horizontal (gx)
245
+ div_x = torch.zeros_like(gx)
246
+ div_x[:, :, 1:] += gx[:, :, :-1] # Contribution positive (gauche)
247
+ div_x[:, :, :-1] -= gx[:, :, :-1] # Contribution négative (droite)
248
+
249
+ # Divergence du gradient vertical (gy)
250
+ div_y = torch.zeros_like(gy)
251
+ div_y[:, 1:, :] += gy[:, :-1, :] # Contribution positive (haut)
252
+ div_y[:, :-1, :] -= gy[:, :-1, :] # Contribution négative (bas)
253
+
254
+ return -(div_x + div_y)
255
+
256
+ def norm2sq(x):
257
+ return torch.sum(x**2)
258
+
259
+ def norm1(x):
260
+ return torch.sum(torch.abs(x))
261
+
262
+ def KL_divergence(Ax, y):
263
+ return torch.sum(Ax - y * torch.log(Ax + 1e-10))
264
+
265
+ def gradient_KL(Ax, y):
266
+ return 1 - y / (Ax + 1e-10)
267
+
268
+ def prox_F_star(y, sigma, a):
269
+ return 0.5 * (y - torch.sqrt(y**2 + 4 * sigma * a))
270
+
271
+ def prox_G(x, tau, K):
272
+ return torch.clamp(x - tau * K, min=0)
273
+
@@ -0,0 +1,11 @@
1
+ from ._mainRecon import *
2
+ from .AlgebraicRecon import *
3
+ from .AnalyticRecon import *
4
+ from .BayesianRecon import *
5
+ from .DeepLearningRecon import *
6
+ from .PrimalDualRecon import *
7
+ from .ReconEnums import *
8
+ from .ReconTools import *
9
+
10
+
11
+