AOT-biomaps 2.9.186__py3-none-any.whl → 2.9.294__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of AOT-biomaps might be problematic. Click here for more details.
- AOT_biomaps/AOT_Acoustic/StructuredWave.py +2 -2
- AOT_biomaps/AOT_Acoustic/_mainAcoustic.py +11 -6
- AOT_biomaps/AOT_Experiment/Tomography.py +74 -4
- AOT_biomaps/AOT_Experiment/_mainExperiment.py +95 -55
- AOT_biomaps/AOT_Recon/AOT_Optimizers/DEPIERRO.py +48 -13
- AOT_biomaps/AOT_Recon/AOT_Optimizers/LS.py +406 -13
- AOT_biomaps/AOT_Recon/AOT_Optimizers/MAPEM.py +118 -38
- AOT_biomaps/AOT_Recon/AOT_Optimizers/MLEM.py +303 -102
- AOT_biomaps/AOT_Recon/AOT_Optimizers/PDHG.py +443 -12
- AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/RelativeDifferences.py +10 -14
- AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/SparseSMatrix_CSR.py +274 -0
- AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/SparseSMatrix_SELL.py +328 -0
- AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/__init__.py +2 -0
- AOT_biomaps/AOT_Recon/AOT_biomaps_kernels.cubin +0 -0
- AOT_biomaps/AOT_Recon/AlgebraicRecon.py +243 -113
- AOT_biomaps/AOT_Recon/AnalyticRecon.py +26 -41
- AOT_biomaps/AOT_Recon/BayesianRecon.py +81 -146
- AOT_biomaps/AOT_Recon/PrimalDualRecon.py +157 -94
- AOT_biomaps/AOT_Recon/ReconEnums.py +27 -2
- AOT_biomaps/AOT_Recon/ReconTools.py +229 -12
- AOT_biomaps/AOT_Recon/__init__.py +1 -0
- AOT_biomaps/AOT_Recon/_mainRecon.py +60 -53
- AOT_biomaps/__init__.py +4 -69
- {aot_biomaps-2.9.186.dist-info → aot_biomaps-2.9.294.dist-info}/METADATA +2 -1
- aot_biomaps-2.9.294.dist-info/RECORD +47 -0
- aot_biomaps-2.9.186.dist-info/RECORD +0 -43
- {aot_biomaps-2.9.186.dist-info → aot_biomaps-2.9.294.dist-info}/WHEEL +0 -0
- {aot_biomaps-2.9.186.dist-info → aot_biomaps-2.9.294.dist-info}/top_level.txt +0 -0
|
@@ -1,22 +1,32 @@
|
|
|
1
1
|
from AOT_biomaps.AOT_Recon.AlgebraicRecon import AlgebraicRecon
|
|
2
|
-
from AOT_biomaps.AOT_Recon.ReconEnums import ReconType, ProcessType
|
|
2
|
+
from AOT_biomaps.AOT_Recon.ReconEnums import ReconType, ProcessType, SMatrixType
|
|
3
3
|
from AOT_biomaps.AOT_Recon.AOT_Optimizers import CP_KL, CP_TV
|
|
4
4
|
from AOT_biomaps.AOT_Recon.ReconEnums import OptimizerType
|
|
5
5
|
|
|
6
6
|
import os
|
|
7
7
|
from datetime import datetime
|
|
8
8
|
import numpy as np
|
|
9
|
+
import re
|
|
9
10
|
|
|
10
11
|
class PrimalDualRecon(AlgebraicRecon):
|
|
11
12
|
"""
|
|
12
13
|
This class implements the convex reconstruction process.
|
|
13
14
|
It currently does not perform any operations but serves as a template for future implementations.
|
|
14
15
|
"""
|
|
15
|
-
def __init__(self, theta=1.0, L=None, **kwargs):
|
|
16
|
+
def __init__(self, alpha, beta, theta=1.0, L=None, k_security=0.8, use_power_method=True, auto_alpha_gamma=0.05, apply_positivity_clamp=True, tikhonov_as_gradient=False, use_laplacian=True, laplacian_beta_scale=1.0, **kwargs):
|
|
16
17
|
super().__init__(**kwargs)
|
|
17
18
|
self.reconType = ReconType.Convex
|
|
19
|
+
self.alpha = alpha # TV regularization parameter (if None, alpha is auto-scaled)
|
|
20
|
+
self.beta=beta # Tikhonov regularization parameter
|
|
18
21
|
self.theta = theta # relaxation parameter (between 1 and 2)
|
|
19
22
|
self.L = L # norme spectrale de l'opérateur linéaire défini par les matrices P et P^T
|
|
23
|
+
self.k_security=k_security
|
|
24
|
+
self.use_power_method=use_power_method
|
|
25
|
+
self.auto_alpha_gamma=auto_alpha_gamma # gamma for auto alpha: alpha = gamma * data_term / tv_term
|
|
26
|
+
self.apply_positivity_clamp=apply_positivity_clamp
|
|
27
|
+
self.tikhonov_as_gradient=tikhonov_as_gradient # if True, apply -tau*2*beta*x instead of prox multiplicative
|
|
28
|
+
self.use_laplacian=use_laplacian # enable Laplacian (Hessian scalar) penalty
|
|
29
|
+
self.laplacian_beta_scale=laplacian_beta_scale # multiply beta for laplacian term if you want separate scaling
|
|
20
30
|
|
|
21
31
|
def run(self, processType=ProcessType.PYTHON, withTumor=True):
|
|
22
32
|
"""
|
|
@@ -48,24 +58,20 @@ class PrimalDualRecon(AlgebraicRecon):
|
|
|
48
58
|
)
|
|
49
59
|
os.makedirs(results_dir, exist_ok=True)
|
|
50
60
|
|
|
51
|
-
if os.path.exists(results_dir):
|
|
61
|
+
if os.path.exists(os.path.join(results_dir,"indices.npy")):
|
|
52
62
|
return (True, results_dir)
|
|
53
63
|
|
|
54
64
|
return (False, results_dir)
|
|
55
65
|
|
|
56
|
-
|
|
57
|
-
|
|
58
|
-
def load(self, withTumor=True, results_date=None, optimizer=None, alpha=None, theta=None, L=None, filePath=None):
|
|
66
|
+
def load(self, withTumor=True, results_date=None, optimizer=None, filePath=None, show_logs=True):
|
|
59
67
|
"""
|
|
60
|
-
Load the reconstruction results and indices
|
|
68
|
+
Load the reconstruction results (reconPhantom or reconLaser) and indices as lists of 2D np arrays into self.
|
|
69
|
+
If the loaded file is a 3D array, it is split into a list of 2D arrays.
|
|
61
70
|
Args:
|
|
62
|
-
withTumor
|
|
63
|
-
results_date
|
|
64
|
-
optimizer
|
|
65
|
-
|
|
66
|
-
theta (float): Theta parameter to match the saved directory. If None, uses the current theta of the instance.
|
|
67
|
-
L (float): L parameter to match the saved directory. If None, uses the current L of the instance.
|
|
68
|
-
filePath (str): Optional. If provided, loads directly from this path (overrides saveDir and results_date).
|
|
71
|
+
withTumor: If True, loads reconPhantom (with tumor), else reconLaser (without tumor).
|
|
72
|
+
results_date: Date string (format "ddmm") to specify which results to load. If None, uses the most recent date in saveDir.
|
|
73
|
+
optimizer: Optimizer name (as string or enum) to filter results. If None, uses the current optimizer of the instance.
|
|
74
|
+
filePath: Optional. If provided, loads directly from this path (overrides saveDir and results_date).
|
|
69
75
|
"""
|
|
70
76
|
if filePath is not None:
|
|
71
77
|
# Mode chargement direct depuis un fichier
|
|
@@ -73,122 +79,179 @@ class PrimalDualRecon(AlgebraicRecon):
|
|
|
73
79
|
recon_path = filePath
|
|
74
80
|
if not os.path.exists(recon_path):
|
|
75
81
|
raise FileNotFoundError(f"No reconstruction file found at {recon_path}.")
|
|
76
|
-
|
|
77
|
-
|
|
78
|
-
|
|
82
|
+
# Charge les données
|
|
83
|
+
data = np.load(recon_path, allow_pickle=True)
|
|
84
|
+
# Découpe en liste de 2D si c'est un tableau 3D
|
|
85
|
+
if isinstance(data, np.ndarray) and data.ndim == 3:
|
|
86
|
+
if withTumor:
|
|
87
|
+
self.reconPhantom = [data[i, :, :] for i in range(data.shape[0])]
|
|
88
|
+
else:
|
|
89
|
+
self.reconLaser = [data[i, :, :] for i in range(data.shape[0])]
|
|
79
90
|
else:
|
|
80
|
-
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
|
|
84
|
-
|
|
85
|
-
|
|
86
|
-
|
|
87
|
-
|
|
88
|
-
|
|
91
|
+
# Sinon, suppose que c'est déjà une liste de 2D
|
|
92
|
+
if withTumor:
|
|
93
|
+
self.reconPhantom = data
|
|
94
|
+
else:
|
|
95
|
+
self.reconLaser = data
|
|
96
|
+
# Essayer de charger les indices
|
|
97
|
+
base_dir, _ = os.path.split(recon_path)
|
|
98
|
+
indices_path = os.path.join(base_dir, "indices.npy")
|
|
89
99
|
if os.path.exists(indices_path):
|
|
90
|
-
|
|
100
|
+
indices_data = np.load(indices_path, allow_pickle=True)
|
|
101
|
+
if isinstance(indices_data, np.ndarray) and indices_data.ndim == 3:
|
|
102
|
+
self.indices = [indices_data[i, :, :] for i in range(indices_data.shape[0])]
|
|
103
|
+
else:
|
|
104
|
+
self.indices = indices_data
|
|
91
105
|
else:
|
|
92
106
|
self.indices = None
|
|
93
|
-
|
|
94
|
-
|
|
107
|
+
if show_logs:
|
|
108
|
+
print(f"Loaded reconstruction results and indices from {recon_path}")
|
|
95
109
|
else:
|
|
96
110
|
# Mode chargement depuis le répertoire de résultats
|
|
97
111
|
if self.saveDir is None:
|
|
98
112
|
raise ValueError("Save directory is not specified. Please set saveDir before loading.")
|
|
99
|
-
|
|
100
|
-
# Use current optimizer if not provided
|
|
113
|
+
# Determine optimizer name for path matching
|
|
101
114
|
opt_name = optimizer.value if optimizer is not None else self.optimizer.value
|
|
102
|
-
|
|
103
|
-
|
|
115
|
+
# Find the most recent results directory if no date is specified
|
|
116
|
+
dir_pattern = f'results_*_{opt_name}'
|
|
117
|
+
if opt_name == OptimizerType.CP_TV.value or opt_name == OptimizerType.CP_KL.value:
|
|
118
|
+
dir_pattern += f'_Alpha_{self.alpha}_Theta_{self.theta}_L_{self.L}'
|
|
104
119
|
if results_date is None:
|
|
105
|
-
dir_pattern = f'results_*_{opt_name}_Alpha_{alpha if alpha is not None else self.alpha}_Theta_{theta if theta is not None else self.theta}_L_{L if L is not None else self.L}'
|
|
106
120
|
dirs = [d for d in os.listdir(self.saveDir) if os.path.isdir(os.path.join(self.saveDir, d)) and dir_pattern in d]
|
|
107
121
|
if not dirs:
|
|
108
122
|
raise FileNotFoundError(f"No matching results directory found for pattern '{dir_pattern}' in {self.saveDir}.")
|
|
109
123
|
dirs.sort(reverse=True) # Most recent first
|
|
110
124
|
results_dir = os.path.join(self.saveDir, dirs[0])
|
|
111
125
|
else:
|
|
112
|
-
results_dir = os.path.join(self.saveDir, f'results_{results_date}_{opt_name}
|
|
126
|
+
results_dir = os.path.join(self.saveDir, f'results_{results_date}_{opt_name}')
|
|
127
|
+
if opt_name == OptimizerType.CP_TV.value or opt_name == OptimizerType.CP_KL.value:
|
|
128
|
+
results_dir += f'_Alpha_{self.alpha}_Theta_{self.theta}_L_{self.L}'
|
|
113
129
|
if not os.path.exists(results_dir):
|
|
114
130
|
raise FileNotFoundError(f"Directory {results_dir} does not exist.")
|
|
115
|
-
|
|
116
131
|
# Load reconstruction results
|
|
117
132
|
recon_key = 'reconPhantom' if withTumor else 'reconLaser'
|
|
118
133
|
recon_path = os.path.join(results_dir, f'{recon_key}.npy')
|
|
119
134
|
if not os.path.exists(recon_path):
|
|
120
135
|
raise FileNotFoundError(f"No reconstruction file found at {recon_path}.")
|
|
121
|
-
|
|
122
|
-
|
|
123
|
-
|
|
136
|
+
data = np.load(recon_path, allow_pickle=True)
|
|
137
|
+
# Découpe en liste de 2D si c'est un tableau 3D
|
|
138
|
+
if isinstance(data, np.ndarray) and data.ndim == 3:
|
|
139
|
+
if withTumor:
|
|
140
|
+
self.reconPhantom = [data[i, :, :] for i in range(data.shape[0])]
|
|
141
|
+
else:
|
|
142
|
+
self.reconLaser = [data[i, :, :] for i in range(data.shape[0])]
|
|
124
143
|
else:
|
|
125
|
-
|
|
126
|
-
|
|
127
|
-
|
|
128
|
-
|
|
129
|
-
if
|
|
130
|
-
|
|
131
|
-
|
|
132
|
-
|
|
133
|
-
|
|
134
|
-
|
|
135
|
-
|
|
136
|
-
|
|
137
|
-
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
y=self.experiment.AOsignal_withoutTumor
|
|
144
|
+
if withTumor:
|
|
145
|
+
self.reconPhantom = data
|
|
146
|
+
else:
|
|
147
|
+
self.reconLaser = data
|
|
148
|
+
# Try to load saved indices (if file exists)
|
|
149
|
+
indices_path = os.path.join(results_dir, 'indices.npy')
|
|
150
|
+
if os.path.exists(indices_path):
|
|
151
|
+
indices_data = np.load(indices_path, allow_pickle=True)
|
|
152
|
+
if isinstance(indices_data, np.ndarray) and indices_data.ndim == 3:
|
|
153
|
+
self.indices = [indices_data[i, :, :] for i in range(indices_data.shape[0])]
|
|
154
|
+
else:
|
|
155
|
+
self.indices = indices_data
|
|
156
|
+
else:
|
|
157
|
+
self.indices = None
|
|
158
|
+
if show_logs:
|
|
159
|
+
print(f"Loaded reconstruction results and indices from {results_dir}")
|
|
142
160
|
|
|
161
|
+
def _convexReconPython(self, withTumor,show_logs=True):
|
|
143
162
|
if self.optimizer == OptimizerType.CP_TV:
|
|
144
163
|
if withTumor:
|
|
145
164
|
self.reconPhantom, self.indices = CP_TV(
|
|
146
|
-
|
|
147
|
-
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
|
|
152
|
-
|
|
153
|
-
|
|
154
|
-
|
|
155
|
-
|
|
165
|
+
SMatrix = self.SMatrix,
|
|
166
|
+
y = self.experiment.AOsignal_withTumor,
|
|
167
|
+
alpha=self.alpha,
|
|
168
|
+
beta=self.beta,
|
|
169
|
+
theta=self.theta,
|
|
170
|
+
numIterations=self.numIterations,
|
|
171
|
+
isSavingEachIteration=self.isSavingEachIteration,
|
|
172
|
+
L=self.L,
|
|
173
|
+
withTumor=withTumor,
|
|
174
|
+
device=self.device,
|
|
175
|
+
max_saves=self.maxSaves,
|
|
176
|
+
show_logs=show_logs,
|
|
177
|
+
smatrixType= self.smatrixType,
|
|
178
|
+
k_security=self.k_security,
|
|
179
|
+
use_power_method=self.use_power_method,
|
|
180
|
+
auto_alpha_gamma=self.auto_alpha_gamma,
|
|
181
|
+
apply_positivity_clamp=self.apply_positivity_clamp,
|
|
182
|
+
tikhonov_as_gradient=self.tikhonov_as_gradient,
|
|
183
|
+
use_laplacian=self.use_laplacian,
|
|
184
|
+
laplacian_beta_scale=self.laplacian_beta_scale
|
|
185
|
+
)
|
|
156
186
|
else:
|
|
157
187
|
self.reconLaser, self.indices = CP_TV(
|
|
158
|
-
|
|
159
|
-
|
|
160
|
-
|
|
161
|
-
|
|
162
|
-
|
|
163
|
-
|
|
164
|
-
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
|
|
188
|
+
SMatrix = self.SMatrix,
|
|
189
|
+
y = self.experiment.AOsignal_withoutTumor,
|
|
190
|
+
alpha=self.alpha,
|
|
191
|
+
beta=self.beta,
|
|
192
|
+
theta=self.theta,
|
|
193
|
+
numIterations=self.numIterations,
|
|
194
|
+
isSavingEachIteration=self.isSavingEachIteration,
|
|
195
|
+
L=self.L,
|
|
196
|
+
withTumor=withTumor,
|
|
197
|
+
device=self.device,
|
|
198
|
+
max_saves=self.maxSaves,
|
|
199
|
+
show_logs=show_logs,
|
|
200
|
+
smatrixType= self.smatrixType,
|
|
201
|
+
k_security=self.k_security,
|
|
202
|
+
use_power_method=self.use_power_method,
|
|
203
|
+
auto_alpha_gamma=self.auto_alpha_gamma,
|
|
204
|
+
apply_positivity_clamp=self.apply_positivity_clamp,
|
|
205
|
+
tikhonov_as_gradient=self.tikhonov_as_gradient,
|
|
206
|
+
use_laplacian=self.use_laplacian,
|
|
207
|
+
laplacian_beta_scale=self.laplacian_beta_scale
|
|
208
|
+
)
|
|
168
209
|
elif self.optimizer == OptimizerType.CP_KL:
|
|
169
210
|
if withTumor:
|
|
170
211
|
self.reconPhantom, self.indices = CP_KL(
|
|
171
|
-
|
|
172
|
-
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
|
|
176
|
-
|
|
177
|
-
|
|
178
|
-
|
|
179
|
-
|
|
212
|
+
SMatrix = self.SMatrix,
|
|
213
|
+
y = self.experiment.AOsignal_withTumor,
|
|
214
|
+
alpha=self.alpha,
|
|
215
|
+
beta=self.beta,
|
|
216
|
+
theta=self.theta,
|
|
217
|
+
numIterations=self.numIterations,
|
|
218
|
+
isSavingEachIteration=self.isSavingEachIteration,
|
|
219
|
+
L=self.L,
|
|
220
|
+
withTumor=withTumor,
|
|
221
|
+
device=self.device,
|
|
222
|
+
max_saves=self.maxSaves,
|
|
223
|
+
show_logs=show_logs,
|
|
224
|
+
smatrixType= self.smatrixType,
|
|
225
|
+
k_security=self.k_security,
|
|
226
|
+
use_power_method=self.use_power_method,
|
|
227
|
+
auto_alpha_gamma=self.auto_alpha_gamma,
|
|
228
|
+
apply_positivity_clamp=self.apply_positivity_clamp,
|
|
229
|
+
tikhonov_as_gradient=self.tikhonov_as_gradient,
|
|
230
|
+
use_laplacian=self.use_laplacian,
|
|
231
|
+
laplacian_beta_scale=self.laplacian_beta_scale
|
|
180
232
|
)
|
|
181
233
|
else:
|
|
182
234
|
self.reconLaser, self.indices = CP_KL(
|
|
183
|
-
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
235
|
+
SMatrix = self.SMatrix,
|
|
236
|
+
y = self.experiment.AOsignal_withoutTumor,
|
|
237
|
+
alpha=self.alpha,
|
|
238
|
+
beta=self.beta,
|
|
239
|
+
theta=self.theta,
|
|
240
|
+
numIterations=self.numIterations,
|
|
241
|
+
isSavingEachIteration=self.isSavingEachIteration,
|
|
242
|
+
L=self.L,
|
|
243
|
+
withTumor=withTumor,
|
|
244
|
+
device=self.device,
|
|
245
|
+
max_saves=self.maxSaves,
|
|
246
|
+
show_logs=show_logs,
|
|
247
|
+
smatrixType= self.smatrixType,
|
|
248
|
+
k_security=self.k_security,
|
|
249
|
+
use_power_method=self.use_power_method,
|
|
250
|
+
auto_alpha_gamma=self.auto_alpha_gamma,
|
|
251
|
+
apply_positivity_clamp=self.apply_positivity_clamp,
|
|
252
|
+
tikhonov_as_gradient=self.tikhonov_as_gradient,
|
|
253
|
+
use_laplacian=self.use_laplacian,
|
|
254
|
+
laplacian_beta_scale=self.laplacian_beta_scale
|
|
192
255
|
)
|
|
193
256
|
else:
|
|
194
257
|
raise ValueError(f"Optimizer value must be CP_TV or CP_KL, got {self.optimizer}")
|
|
@@ -354,8 +354,33 @@ class NoiseType(Enum):
|
|
|
354
354
|
- None: No noise is applied.
|
|
355
355
|
"""
|
|
356
356
|
POISSON = 'poisson'
|
|
357
|
-
"""Poisson noise
|
|
357
|
+
"""Poisson noise."""
|
|
358
358
|
GAUSSIAN = 'gaussian'
|
|
359
|
-
"""Gaussian noise
|
|
359
|
+
"""Gaussian noise."""
|
|
360
360
|
None_ = 'none'
|
|
361
361
|
"""No noise is applied."""
|
|
362
|
+
|
|
363
|
+
class SMatrixType(Enum):
|
|
364
|
+
"""
|
|
365
|
+
Enum for different sparsing methods used in reconstructions.
|
|
366
|
+
|
|
367
|
+
Selection of sparsing methods:
|
|
368
|
+
- Thresholding: Sparsing based on a threshold value.
|
|
369
|
+
- TopK: Sparsing by retaining the top K values.
|
|
370
|
+
- None: No sparsing is applied.
|
|
371
|
+
"""
|
|
372
|
+
DENSE = 'DENSE'
|
|
373
|
+
"""No sparsing is applied."""
|
|
374
|
+
CSR = 'CSR'
|
|
375
|
+
"""Sparsing based on a threshold value."""
|
|
376
|
+
COO = 'COO'
|
|
377
|
+
"""Sparsing by retaining the top K values."""
|
|
378
|
+
SELL = 'SELL'
|
|
379
|
+
"""Sparsing using sell C sigma method.
|
|
380
|
+
Optimized variant of ELLPACK, dividing the matrix into fixed-size "chunks" of `C` rows.
|
|
381
|
+
Non-zero elements are sorted by column within each chunk to improve memory coalescing on GPUs.
|
|
382
|
+
Rows are padded with zeros to align their length to the longest row in the chunk.
|
|
383
|
+
** Ref : Kreutzer, M., Hager, G., Wellein, G., Fehske, H., & Bishop, A. R. (2014).
|
|
384
|
+
"A Unified Sparse Matrix Data Format for Efficient General Sparse Matrix-Vector Multiply on Modern Processors".
|
|
385
|
+
ACM Transactions on Mathematical Software, 41(2), 1–24. DOI: 10.1145/2592376.
|
|
386
|
+
"""
|
|
@@ -1,9 +1,11 @@
|
|
|
1
1
|
import os
|
|
2
|
+
from AOT_biomaps.AOT_Recon.AOT_SparseSMatrix import SparseSMatrix_CSR, SparseSMatrix_SELL
|
|
2
3
|
import torch
|
|
3
4
|
import numpy as np
|
|
5
|
+
import pycuda.driver as drv
|
|
4
6
|
from numba import njit, prange
|
|
5
7
|
from torch_sparse import coalesce
|
|
6
|
-
|
|
8
|
+
from scipy.signal.windows import hann
|
|
7
9
|
|
|
8
10
|
def load_recon(hdr_path):
|
|
9
11
|
"""
|
|
@@ -78,7 +80,7 @@ def load_recon(hdr_path):
|
|
|
78
80
|
rescale_offset = float(header.get('data rescale offset', 0))
|
|
79
81
|
image = image * rescale_slope + rescale_offset
|
|
80
82
|
|
|
81
|
-
return image
|
|
83
|
+
return image
|
|
82
84
|
|
|
83
85
|
def mse(y_true, y_pred):
|
|
84
86
|
"""
|
|
@@ -150,20 +152,82 @@ def ssim(img1, img2, win_size=7, k1=0.01, k2=0.03, L=1.0):
|
|
|
150
152
|
return np.mean(ssim_map)
|
|
151
153
|
|
|
152
154
|
def calculate_memory_requirement(SMatrix, y):
|
|
153
|
-
"""
|
|
154
|
-
|
|
155
|
-
|
|
156
|
-
|
|
155
|
+
"""
|
|
156
|
+
Calcule la mémoire requise (en Go) pour :
|
|
157
|
+
- SMatrix : Matrice (np.ndarray, CuPy CSR, SparseSMatrix_CSR ou SparseSMatrix_SELL)
|
|
158
|
+
- y : vecteur (NumPy ou CuPy, float32)
|
|
159
|
+
|
|
160
|
+
Args:
|
|
161
|
+
SMatrix: Matrix object (np.ndarray, cpsparse.csr_matrix, SparseSMatrix_CSR, or SparseSMatrix_SELL)
|
|
162
|
+
y: Vector (float32)
|
|
163
|
+
"""
|
|
164
|
+
total_bytes = 0
|
|
165
|
+
|
|
166
|
+
# --- 1. Memory for SMatrix ---
|
|
167
|
+
|
|
168
|
+
# 1.1. Custom Sparse Matrix (SELL/CSR)
|
|
169
|
+
if isinstance(SMatrix, (SparseSMatrix_SELL, SparseSMatrix_CSR)):
|
|
170
|
+
# We rely on the getMatrixSize method, which we fixed to track all host/GPU bytes.
|
|
171
|
+
# This is the most reliable way to estimate memory for custom GPU-backed structures.
|
|
172
|
+
try:
|
|
173
|
+
matrix_size_gb = SMatrix.getMatrixSize()
|
|
174
|
+
if isinstance(matrix_size_gb, dict) and 'error' in matrix_size_gb:
|
|
175
|
+
raise ValueError(f"SMatrix allocation error: {matrix_size_gb['error']}")
|
|
176
|
+
|
|
177
|
+
# Convert GB back to bytes (1 GB = 1024^3 bytes)
|
|
178
|
+
size_SMatrix = matrix_size_gb * (1024 ** 3)
|
|
179
|
+
total_bytes += size_SMatrix
|
|
180
|
+
print(f"SMatrix (Custom Sparse) size: {matrix_size_gb:.3f} GB")
|
|
181
|
+
|
|
182
|
+
except AttributeError:
|
|
183
|
+
raise AttributeError("Custom Sparse Matrix must implement the getMatrixSize() method.")
|
|
184
|
+
|
|
185
|
+
# 1.2. NumPy Dense Array (Standard)
|
|
186
|
+
elif isinstance(SMatrix, np.ndarray):
|
|
187
|
+
# Dense NumPy array (float32)
|
|
188
|
+
size_SMatrix = SMatrix.nbytes
|
|
189
|
+
total_bytes += size_SMatrix
|
|
190
|
+
print(f"SMatrix (NumPy Dense) size: {size_SMatrix / (1024 ** 3):.3f} GB")
|
|
191
|
+
|
|
192
|
+
# 1.3. CuPy CSR Matrix (Standard Sparse CuPy)
|
|
193
|
+
# Note: Requires CuPy to be imported, which is usually done outside this function.
|
|
194
|
+
# Assuming 'cpsparse.csr_matrix' is available in the environment if this path is taken.
|
|
195
|
+
elif 'cupy.sparse' in str(type(SMatrix)): # Using string check for type safety outside CuPy context
|
|
196
|
+
# CuPy CSR matrix structure: data (float32), indices (int32), indptr (int32)
|
|
197
|
+
nnz = SMatrix.nnz
|
|
198
|
+
num_rows = SMatrix.shape[0]
|
|
199
|
+
size_data = nnz * 4 # float32 = 4 bytes
|
|
200
|
+
size_indices = nnz * 4 # int32 = 4 bytes
|
|
201
|
+
size_indptr = (num_rows + 1) * 4 # int32 = 4 bytes
|
|
202
|
+
size_SMatrix = size_data + size_indices + size_indptr
|
|
203
|
+
total_bytes += size_SMatrix
|
|
204
|
+
print(f"SMatrix (CuPy CSR) size: {size_SMatrix / (1024 ** 3):.3f} GB")
|
|
205
|
+
|
|
206
|
+
else:
|
|
207
|
+
raise ValueError("SMatrix must be a np.ndarray, cpsparse.csr_matrix, or a custom SparseSMatrix object (CSR/SELL).")
|
|
208
|
+
|
|
209
|
+
# --- 2. Memory for Vector y ---
|
|
210
|
+
|
|
211
|
+
# Check if y is a CuPy array or NumPy array (assuming float32 based on docstring)
|
|
212
|
+
if hasattr(y, 'nbytes'):
|
|
213
|
+
size_y = y.nbytes
|
|
214
|
+
total_bytes += size_y
|
|
215
|
+
print(f"Vector y size: {size_y / (1024 ** 3):.3f} GB")
|
|
216
|
+
else:
|
|
217
|
+
# Fallback if object doesn't expose nbytes (e.g., custom buffer), but usually array objects do.
|
|
218
|
+
raise ValueError("Vector y must be an array type exposing the .nbytes attribute.")
|
|
219
|
+
|
|
157
220
|
|
|
158
|
-
#
|
|
159
|
-
|
|
160
|
-
return total_memory
|
|
221
|
+
# --- 3. Final Result ---
|
|
222
|
+
return total_bytes / (1024 ** 3)
|
|
161
223
|
|
|
162
|
-
|
|
224
|
+
|
|
225
|
+
def check_gpu_memory(device_index, required_memory, show_logs=True):
|
|
163
226
|
"""Check if enough memory is available on the specified GPU."""
|
|
164
|
-
free_memory,
|
|
227
|
+
free_memory, _ = torch.cuda.mem_get_info(f"cuda:{device_index}")
|
|
165
228
|
free_memory_gb = free_memory / 1024**3
|
|
166
|
-
|
|
229
|
+
if show_logs:
|
|
230
|
+
print(f"Free memory on GPU {device_index}: {free_memory_gb:.2f} GB, Required memory: {required_memory:.2f} GB")
|
|
167
231
|
return free_memory_gb >= required_memory
|
|
168
232
|
|
|
169
233
|
@njit(parallel=True)
|
|
@@ -270,3 +334,156 @@ def prox_F_star(y, sigma, a):
|
|
|
270
334
|
def prox_G(x, tau, K):
|
|
271
335
|
return torch.clamp(x - tau * K, min=0)
|
|
272
336
|
|
|
337
|
+
def filter_radon(f, N, filter_type, Fc):
|
|
338
|
+
"""
|
|
339
|
+
Implémente les filtres pour la rétroprojection filtrée (iRadon).
|
|
340
|
+
Inspirée de la fonction MATLAB FilterRadon de Mamouna Bocoum.
|
|
341
|
+
|
|
342
|
+
Paramètres :
|
|
343
|
+
------------
|
|
344
|
+
f : np.ndarray
|
|
345
|
+
Vecteur des fréquences (ex: f_t ou f_z).
|
|
346
|
+
N : int
|
|
347
|
+
Taille du filtre (longueur de f).
|
|
348
|
+
filter_type : str
|
|
349
|
+
Type de filtre : 'ram-lak', 'shepp-logan', 'cosine', 'hamming', 'hann'.
|
|
350
|
+
Fc : float
|
|
351
|
+
Fréquence de coupure.
|
|
352
|
+
|
|
353
|
+
Retourne :
|
|
354
|
+
-----------
|
|
355
|
+
FILTER : np.ndarray
|
|
356
|
+
Filtre appliqué aux fréquences.
|
|
357
|
+
"""
|
|
358
|
+
FILTER = np.abs(f)
|
|
359
|
+
|
|
360
|
+
if filter_type == 'ram-lak':
|
|
361
|
+
pass # FILTER = |f| (déjà calculé)
|
|
362
|
+
elif filter_type == 'shepp-logan':
|
|
363
|
+
# Évite la division par zéro
|
|
364
|
+
with np.errstate(divide='ignore', invalid='ignore'):
|
|
365
|
+
FILTER = FILTER * (np.sinc(2 * f / (2 * Fc))) # sin(2πf/(2Fc))/(2πf/(4Fc)) = sinc(2f/(2Fc))
|
|
366
|
+
FILTER[np.isnan(FILTER)] = 1.0 # Pour f=0
|
|
367
|
+
elif filter_type == 'cosine':
|
|
368
|
+
FILTER = FILTER * np.cos(2 * np.pi * f / (4 * Fc))
|
|
369
|
+
elif filter_type == 'hamming':
|
|
370
|
+
FILTER = FILTER * (0.54 + 0.46 * np.cos(2 * np.pi * f / Fc))
|
|
371
|
+
elif filter_type == 'hann':
|
|
372
|
+
FILTER = FILTER * (1 + np.cos(2 * np.pi * f / (4 * Fc))) / 2
|
|
373
|
+
else:
|
|
374
|
+
raise ValueError(f"Type de filtre inconnu : {filter_type}")
|
|
375
|
+
|
|
376
|
+
# Coupure des fréquences au-delà de Fc
|
|
377
|
+
FILTER[np.abs(f) > Fc] = 0
|
|
378
|
+
# Atténuation exponentielle (optionnelle, comme dans le code MATLAB)
|
|
379
|
+
FILTER = FILTER * np.exp(-2 * (np.abs(f) / Fc)**10)
|
|
380
|
+
|
|
381
|
+
return FILTER
|
|
382
|
+
|
|
383
|
+
def compute_TV_cpu(x, Z, X, isotropic=False):
|
|
384
|
+
"""
|
|
385
|
+
Compute total variation of x (1D flattened of shape Z*X).
|
|
386
|
+
isotropic=False -> anisotropic (sum |dx| + |dy|)
|
|
387
|
+
isotropic=True -> isotropic sqrt(dx^2 + dy^2)
|
|
388
|
+
"""
|
|
389
|
+
x2d = x.reshape(Z, X)
|
|
390
|
+
dx = np.diff(x2d, axis=1)
|
|
391
|
+
dy = np.diff(x2d, axis=0)
|
|
392
|
+
if isotropic:
|
|
393
|
+
# pad to original size for consistent measure (we only need sum of norms)
|
|
394
|
+
mags = np.sqrt(dx**2 + dy**2)
|
|
395
|
+
return float(np.sum(mags))
|
|
396
|
+
else:
|
|
397
|
+
return float(np.sum(np.abs(dx)) + np.sum(np.abs(dy)))
|
|
398
|
+
|
|
399
|
+
def get_apodization_vector_gpu(matrix_sparse_obj):
|
|
400
|
+
"""
|
|
401
|
+
Génère un vecteur de fenêtrage 2D (Hanning) pour l'apodisation
|
|
402
|
+
de la matrice système A et le transfère sur le GPU.
|
|
403
|
+
Ce vecteur doit être multiplié par les colonnes de A (pixels Z*X).
|
|
404
|
+
"""
|
|
405
|
+
Z = matrix_sparse_obj.Z
|
|
406
|
+
X = matrix_sparse_obj.X
|
|
407
|
+
|
|
408
|
+
# 1. Génération des fenêtres 1D sur l'axe X et Z
|
|
409
|
+
# Forte apodisation latérale (X) pour cibler l'artefact de bordure.
|
|
410
|
+
fenetre_x = hann(X).astype(np.float32)
|
|
411
|
+
|
|
412
|
+
# Fenêtre uniforme en profondeur (Z), car l'artefact est surtout latéral.
|
|
413
|
+
fenetre_z = np.ones(Z, dtype=np.float32)
|
|
414
|
+
|
|
415
|
+
# 2. Création de la matrice de fenêtre 2D (Z, X)
|
|
416
|
+
fenetre_2d = np.outer(fenetre_z, fenetre_x)
|
|
417
|
+
|
|
418
|
+
# 3. Vectorisation (Z*X)
|
|
419
|
+
fenetre_vectorisee = fenetre_2d.flatten()
|
|
420
|
+
|
|
421
|
+
# 4. Transfert sur GPU (mémoire contiguë)
|
|
422
|
+
fenetre_gpu = drv.mem_alloc(fenetre_vectorisee.nbytes)
|
|
423
|
+
drv.memcpy_htod(fenetre_gpu, fenetre_vectorisee)
|
|
424
|
+
|
|
425
|
+
print(f"✅ Vecteur de fenêtrage (Z*X={Z*X}) généré et transféré sur GPU.")
|
|
426
|
+
|
|
427
|
+
return fenetre_gpu
|
|
428
|
+
|
|
429
|
+
def _call_axpby(axpby_kernel, out_ptr, x_ptr, y_ptr, a, b, N, stream, block):
|
|
430
|
+
grid = ((int(N) + block - 1) // block, 1, 1)
|
|
431
|
+
axpby_kernel(out_ptr, x_ptr, y_ptr,
|
|
432
|
+
np.float32(a), np.float32(b),
|
|
433
|
+
np.int32(N),
|
|
434
|
+
block=(block, 1, 1), grid=grid, stream=stream)
|
|
435
|
+
|
|
436
|
+
def _call_minus_axpy(minus_kernel, out_ptr, z_ptr, a, N, stream, block):
|
|
437
|
+
grid = ((int(N) + block - 1) // block, 1, 1)
|
|
438
|
+
minus_kernel(out_ptr, z_ptr, np.float32(a), np.int32(N),
|
|
439
|
+
block=(block, 1, 1), grid=grid, stream=stream)
|
|
440
|
+
|
|
441
|
+
def power_method_estimate_L__SELL(SMatrix, stream, n_it=20, block_size=256):
|
|
442
|
+
"""Estimate ||A||^2 using power method (uses your projection/backprojection kernels)."""
|
|
443
|
+
TN = int(SMatrix.N * SMatrix.T)
|
|
444
|
+
ZX = int(SMatrix.Z * SMatrix.X)
|
|
445
|
+
proj = SMatrix.sparse_mod.get_function("projection_kernel__SELL")
|
|
446
|
+
back = SMatrix.sparse_mod.get_function("backprojection_kernel__SELL")
|
|
447
|
+
TN_i = np.int32(TN)
|
|
448
|
+
ZX_i = np.int32(ZX)
|
|
449
|
+
slice_h = np.int32(SMatrix.slice_height)
|
|
450
|
+
grid_rows = ((TN + block_size - 1) // block_size, 1, 1)
|
|
451
|
+
block_1D = (block_size, 1, 1)
|
|
452
|
+
|
|
453
|
+
dtype = np.float32
|
|
454
|
+
x_host = np.random.randn(ZX).astype(dtype)
|
|
455
|
+
x_host /= np.linalg.norm(x_host) + 1e-12
|
|
456
|
+
x_gpu = drv.mem_alloc(x_host.nbytes)
|
|
457
|
+
drv.memcpy_htod_async(x_gpu, x_host, stream)
|
|
458
|
+
q_gpu = drv.mem_alloc(TN * np.dtype(dtype).itemsize)
|
|
459
|
+
ATq_gpu = drv.mem_alloc(ZX * np.dtype(dtype).itemsize)
|
|
460
|
+
ATq_host = np.empty(ZX, dtype=dtype)
|
|
461
|
+
|
|
462
|
+
for _ in range(n_it):
|
|
463
|
+
proj(q_gpu, SMatrix.sell_values_gpu, SMatrix.sell_colinds_gpu, SMatrix.slice_ptr_gpu, SMatrix.slice_len_gpu,
|
|
464
|
+
x_gpu, TN_i, slice_h, block=block_1D, grid=grid_rows, stream=stream)
|
|
465
|
+
drv.memset_d32_async(ATq_gpu, 0, ZX, stream)
|
|
466
|
+
back(SMatrix.sell_values_gpu, SMatrix.sell_colinds_gpu, SMatrix.slice_ptr_gpu, SMatrix.slice_len_gpu,
|
|
467
|
+
q_gpu, ATq_gpu, TN_i, slice_h, block=block_1D, grid=grid_rows, stream=stream)
|
|
468
|
+
stream.synchronize()
|
|
469
|
+
drv.memcpy_dtoh(ATq_host, ATq_gpu)
|
|
470
|
+
norm = np.linalg.norm(ATq_host)
|
|
471
|
+
if norm < 1e-12:
|
|
472
|
+
break
|
|
473
|
+
x_host = ATq_host / norm
|
|
474
|
+
drv.memcpy_htod_async(x_gpu, x_host, stream)
|
|
475
|
+
# final Rayleigh quotient
|
|
476
|
+
proj(q_gpu, SMatrix.sell_values_gpu, SMatrix.sell_colinds_gpu, SMatrix.slice_ptr_gpu, SMatrix.slice_len_gpu,
|
|
477
|
+
x_gpu, TN_i, slice_h, block=block_1D, grid=grid_rows, stream=stream)
|
|
478
|
+
drv.memset_d32_async(ATq_gpu, 0, ZX, stream)
|
|
479
|
+
back(SMatrix.sell_values_gpu, SMatrix.sell_colinds_gpu, SMatrix.slice_ptr_gpu, SMatrix.slice_len_gpu,
|
|
480
|
+
q_gpu, ATq_gpu, TN_i, slice_h, block=block_1D, grid=grid_rows, stream=stream)
|
|
481
|
+
stream.synchronize()
|
|
482
|
+
drv.memcpy_dtoh(ATq_host, ATq_gpu)
|
|
483
|
+
L_sq = float(np.dot(x_host, ATq_host))
|
|
484
|
+
for g in (x_gpu, q_gpu, ATq_gpu):
|
|
485
|
+
try:
|
|
486
|
+
g.free()
|
|
487
|
+
except:
|
|
488
|
+
pass
|
|
489
|
+
return max(L_sq, 1e-6)
|