AOT-biomaps 2.9.138__py3-none-any.whl → 2.9.279__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of AOT-biomaps might be problematic. Click here for more details.

Files changed (31) hide show
  1. AOT_biomaps/AOT_Acoustic/AcousticTools.py +35 -115
  2. AOT_biomaps/AOT_Acoustic/StructuredWave.py +2 -2
  3. AOT_biomaps/AOT_Acoustic/_mainAcoustic.py +22 -18
  4. AOT_biomaps/AOT_Experiment/Tomography.py +74 -4
  5. AOT_biomaps/AOT_Experiment/_mainExperiment.py +102 -68
  6. AOT_biomaps/AOT_Optic/_mainOptic.py +124 -58
  7. AOT_biomaps/AOT_Recon/AOT_Optimizers/DEPIERRO.py +72 -108
  8. AOT_biomaps/AOT_Recon/AOT_Optimizers/LS.py +474 -289
  9. AOT_biomaps/AOT_Recon/AOT_Optimizers/MAPEM.py +173 -68
  10. AOT_biomaps/AOT_Recon/AOT_Optimizers/MLEM.py +360 -154
  11. AOT_biomaps/AOT_Recon/AOT_Optimizers/PDHG.py +150 -111
  12. AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/RelativeDifferences.py +10 -14
  13. AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/SparseSMatrix_CSR.py +281 -0
  14. AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/SparseSMatrix_SELL.py +328 -0
  15. AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/__init__.py +2 -0
  16. AOT_biomaps/AOT_Recon/AOT_biomaps_kernels.cubin +0 -0
  17. AOT_biomaps/AOT_Recon/AlgebraicRecon.py +359 -238
  18. AOT_biomaps/AOT_Recon/AnalyticRecon.py +29 -41
  19. AOT_biomaps/AOT_Recon/BayesianRecon.py +165 -91
  20. AOT_biomaps/AOT_Recon/DeepLearningRecon.py +4 -1
  21. AOT_biomaps/AOT_Recon/PrimalDualRecon.py +175 -31
  22. AOT_biomaps/AOT_Recon/ReconEnums.py +38 -3
  23. AOT_biomaps/AOT_Recon/ReconTools.py +184 -77
  24. AOT_biomaps/AOT_Recon/__init__.py +1 -0
  25. AOT_biomaps/AOT_Recon/_mainRecon.py +144 -74
  26. AOT_biomaps/__init__.py +4 -36
  27. {aot_biomaps-2.9.138.dist-info → aot_biomaps-2.9.279.dist-info}/METADATA +2 -1
  28. aot_biomaps-2.9.279.dist-info/RECORD +47 -0
  29. aot_biomaps-2.9.138.dist-info/RECORD +0 -43
  30. {aot_biomaps-2.9.138.dist-info → aot_biomaps-2.9.279.dist-info}/WHEEL +0 -0
  31. {aot_biomaps-2.9.138.dist-info → aot_biomaps-2.9.279.dist-info}/top_level.txt +0 -0
@@ -60,7 +60,17 @@ class OptimizerType(Enum):
60
60
  This optimizer is compatible with both histogram and list-mode data.
61
61
  This optimizer is compatible with both emission and transmission data.
62
62
  """
63
- LS_TV = 'LS_TV'
63
+ CP_TV = 'CP_TV'
64
+ """
65
+ This optimizer implements the Chambolle-Pock algorithm for total variation regularization.
66
+ It is suitable for problems where the objective function includes a total variation term.
67
+ It is particularly effective for preserving edges while reducing noise in the reconstructed image.
68
+ """
69
+ CP_KL = 'CP_KL'
70
+ """
71
+ This optimizer implements the Kullback-Leibler divergence for regularization.
72
+ It is suitable for problems where the objective function includes a Kullback-Leibler divergence term.
73
+ """
64
74
  LS = 'LS'
65
75
  """
66
76
  This optimizer implements the standard Landweber algorithm for least-squares optimization.
@@ -344,8 +354,33 @@ class NoiseType(Enum):
344
354
  - None: No noise is applied.
345
355
  """
346
356
  POISSON = 'poisson'
347
- """Poisson noise, typically used for emission data."""
357
+ """Poisson noise."""
348
358
  GAUSSIAN = 'gaussian'
349
- """Gaussian noise, typically used for transmission data."""
359
+ """Gaussian noise."""
350
360
  None_ = 'none'
351
361
  """No noise is applied."""
362
+
363
+ class SMatrixType(Enum):
364
+ """
365
+ Enum for different sparsing methods used in reconstructions.
366
+
367
+ Selection of sparsing methods:
368
+ - Thresholding: Sparsing based on a threshold value.
369
+ - TopK: Sparsing by retaining the top K values.
370
+ - None: No sparsing is applied.
371
+ """
372
+ DENSE = 'DENSE'
373
+ """No sparsing is applied."""
374
+ CSR = 'CSR'
375
+ """Sparsing based on a threshold value."""
376
+ COO = 'COO'
377
+ """Sparsing by retaining the top K values."""
378
+ SELL = 'SELL'
379
+ """Sparsing using sell C sigma method.
380
+ Optimized variant of ELLPACK, dividing the matrix into fixed-size "chunks" of `C` rows.
381
+ Non-zero elements are sorted by column within each chunk to improve memory coalescing on GPUs.
382
+ Rows are padded with zeros to align their length to the longest row in the chunk.
383
+ ** Ref : Kreutzer, M., Hager, G., Wellein, G., Fehske, H., & Bishop, A. R. (2014).
384
+ "A Unified Sparse Matrix Data Format for Efficient General Sparse Matrix-Vector Multiply on Modern Processors".
385
+ ACM Transactions on Mathematical Software, 41(2), 1–24. DOI: 10.1145/2592376.
386
+ """
@@ -1,9 +1,11 @@
1
1
  import os
2
+ from AOT_biomaps.AOT_Recon.AOT_SparseSMatrix import SparseSMatrix_CSR, SparseSMatrix_SELL
2
3
  import torch
3
4
  import numpy as np
5
+ import pycuda.driver as drv
4
6
  from numba import njit, prange
5
7
  from torch_sparse import coalesce
6
- import torch.nn.functional as F
8
+ from scipy.signal.windows import hann
7
9
 
8
10
  def load_recon(hdr_path):
9
11
  """
@@ -78,7 +80,7 @@ def load_recon(hdr_path):
78
80
  rescale_offset = float(header.get('data rescale offset', 0))
79
81
  image = image * rescale_slope + rescale_offset
80
82
 
81
- return image.T
83
+ return image
82
84
 
83
85
  def mse(y_true, y_pred):
84
86
  """
@@ -150,20 +152,82 @@ def ssim(img1, img2, win_size=7, k1=0.01, k2=0.03, L=1.0):
150
152
  return np.mean(ssim_map)
151
153
 
152
154
  def calculate_memory_requirement(SMatrix, y):
153
- """Calculate the memory requirement for the given matrices in GB."""
154
- num_elements_SMatrix = SMatrix.size
155
- num_elements_y = y.size
156
- num_elements_theta = SMatrix.shape[1] * SMatrix.shape[2] # Assuming theta has shape (Z, X)
155
+ """
156
+ Calcule la mémoire requise (en Go) pour :
157
+ - SMatrix : Matrice (np.ndarray, CuPy CSR, SparseSMatrix_CSR ou SparseSMatrix_SELL)
158
+ - y : vecteur (NumPy ou CuPy, float32)
159
+
160
+ Args:
161
+ SMatrix: Matrix object (np.ndarray, cpsparse.csr_matrix, SparseSMatrix_CSR, or SparseSMatrix_SELL)
162
+ y: Vector (float32)
163
+ """
164
+ total_bytes = 0
165
+
166
+ # --- 1. Memory for SMatrix ---
167
+
168
+ # 1.1. Custom Sparse Matrix (SELL/CSR)
169
+ if isinstance(SMatrix, (SparseSMatrix_SELL, SparseSMatrix_CSR)):
170
+ # We rely on the getMatrixSize method, which we fixed to track all host/GPU bytes.
171
+ # This is the most reliable way to estimate memory for custom GPU-backed structures.
172
+ try:
173
+ matrix_size_gb = SMatrix.getMatrixSize()
174
+ if isinstance(matrix_size_gb, dict) and 'error' in matrix_size_gb:
175
+ raise ValueError(f"SMatrix allocation error: {matrix_size_gb['error']}")
176
+
177
+ # Convert GB back to bytes (1 GB = 1024^3 bytes)
178
+ size_SMatrix = matrix_size_gb * (1024 ** 3)
179
+ total_bytes += size_SMatrix
180
+ print(f"SMatrix (Custom Sparse) size: {matrix_size_gb:.3f} GB")
181
+
182
+ except AttributeError:
183
+ raise AttributeError("Custom Sparse Matrix must implement the getMatrixSize() method.")
184
+
185
+ # 1.2. NumPy Dense Array (Standard)
186
+ elif isinstance(SMatrix, np.ndarray):
187
+ # Dense NumPy array (float32)
188
+ size_SMatrix = SMatrix.nbytes
189
+ total_bytes += size_SMatrix
190
+ print(f"SMatrix (NumPy Dense) size: {size_SMatrix / (1024 ** 3):.3f} GB")
191
+
192
+ # 1.3. CuPy CSR Matrix (Standard Sparse CuPy)
193
+ # Note: Requires CuPy to be imported, which is usually done outside this function.
194
+ # Assuming 'cpsparse.csr_matrix' is available in the environment if this path is taken.
195
+ elif 'cupy.sparse' in str(type(SMatrix)): # Using string check for type safety outside CuPy context
196
+ # CuPy CSR matrix structure: data (float32), indices (int32), indptr (int32)
197
+ nnz = SMatrix.nnz
198
+ num_rows = SMatrix.shape[0]
199
+ size_data = nnz * 4 # float32 = 4 bytes
200
+ size_indices = nnz * 4 # int32 = 4 bytes
201
+ size_indptr = (num_rows + 1) * 4 # int32 = 4 bytes
202
+ size_SMatrix = size_data + size_indices + size_indptr
203
+ total_bytes += size_SMatrix
204
+ print(f"SMatrix (CuPy CSR) size: {size_SMatrix / (1024 ** 3):.3f} GB")
205
+
206
+ else:
207
+ raise ValueError("SMatrix must be a np.ndarray, cpsparse.csr_matrix, or a custom SparseSMatrix object (CSR/SELL).")
208
+
209
+ # --- 2. Memory for Vector y ---
210
+
211
+ # Check if y is a CuPy array or NumPy array (assuming float32 based on docstring)
212
+ if hasattr(y, 'nbytes'):
213
+ size_y = y.nbytes
214
+ total_bytes += size_y
215
+ print(f"Vector y size: {size_y / (1024 ** 3):.3f} GB")
216
+ else:
217
+ # Fallback if object doesn't expose nbytes (e.g., custom buffer), but usually array objects do.
218
+ raise ValueError("Vector y must be an array type exposing the .nbytes attribute.")
219
+
157
220
 
158
- # Calculate total memory requirement in GB
159
- total_memory = (num_elements_SMatrix + num_elements_y + num_elements_theta) * 32 / 8 / 1024**3
160
- return total_memory
221
+ # --- 3. Final Result ---
222
+ return total_bytes / (1024 ** 3)
161
223
 
162
- def check_gpu_memory(device_index, required_memory):
224
+
225
+ def check_gpu_memory(device_index, required_memory, show_logs=True):
163
226
  """Check if enough memory is available on the specified GPU."""
164
- free_memory, total_memory = torch.cuda.mem_get_info(f"cuda:{device_index}")
227
+ free_memory, _ = torch.cuda.mem_get_info(f"cuda:{device_index}")
165
228
  free_memory_gb = free_memory / 1024**3
166
- print(f"Free memory on GPU {device_index}: {free_memory_gb:.2f} GB, Required memory: {required_memory:.2f} GB")
229
+ if show_logs:
230
+ print(f"Free memory on GPU {device_index}: {free_memory_gb:.2f} GB, Required memory: {required_memory:.2f} GB")
167
231
  return free_memory_gb >= required_memory
168
232
 
169
233
  @njit(parallel=True)
@@ -210,86 +274,53 @@ def _build_adjacency_sparse(Z, X, device, corner=(0.5 - np.sqrt(2) / 4) / np.sqr
210
274
  return index, values
211
275
 
212
276
 
213
- def power_method(P, PT, data, Z, X, n_it=10, isGPU=False):
214
- x = PT(data)
215
- x = x.reshape(Z, X)
277
+ def power_method(P, PT, data, Z, X, n_it=10):
278
+ x = torch.randn(Z * X, device=data.device)
279
+ x = x / torch.norm(x)
216
280
  for _ in range(n_it):
217
- grad = gradient_gpu(x) if isGPU else gradient_cpu(x)
218
- div = div_gpu(grad) if isGPU else div_cpu(grad)
219
- x = PT(P(x.ravel())) - div.ravel()
220
- s = torch.sqrt(torch.sum(x**2))
221
- x /= s
222
- x = x.reshape(Z, X)
223
- return torch.sqrt(s)
281
+ Ax = P(x)
282
+ ATax = PT(Ax)
283
+ x = ATax / torch.norm(ATax)
284
+ ATax = PT(P(x))
285
+ return torch.sqrt(torch.dot(x, ATax))
224
286
 
225
287
  def proj_l2(p, alpha):
226
- norm = torch.sqrt(torch.sum(p**2, dim=0, keepdim=True))
227
- return p * alpha / torch.max(norm, torch.tensor(alpha, device=p.device))
288
+ if alpha <= 0:
289
+ return torch.zeros_like(p)
290
+ norm = torch.sqrt(torch.sum(p**2, dim=0, keepdim=True) + 1e-12)
291
+ return p * torch.min(norm, torch.tensor(alpha, device=p.device)) / (norm + 1e-12)
228
292
 
229
- def norm2sq(x):
230
- return torch.sum(x**2)
231
-
232
- def norm1(x):
233
- return torch.sum(torch.abs(x))
234
-
235
- def gradient_cpu(x):
293
+ def gradient(x):
236
294
  grad_x = torch.zeros_like(x)
237
295
  grad_y = torch.zeros_like(x)
238
-
239
- grad_x[:-1, :] = x[1:, :] - x[:-1, :]
240
- grad_y[:, :-1] = x[:, 1:] - x[:, :-1]
241
-
296
+ grad_x[:, :-1] = x[:, 1:] - x[:, :-1] # Gradient horizontal
297
+ grad_y[:-1, :] = x[1:, :] - x[:-1, :] # Gradient vertical
242
298
  return torch.stack((grad_x, grad_y), dim=0)
243
299
 
244
- def div_cpu(x):
300
+ def div(x):
245
301
  if x.dim() == 3:
246
- x = x.unsqueeze(0) # Devient [1, 2, H, W]
302
+ x = x.unsqueeze(0) # Ajoute une dimension batch si nécessaire
247
303
 
248
- gx = x[:, 0:1, :, :] # gradient horizontal
249
- gy = x[:, 1:2, :, :] # gradient vertical
304
+ gx = x[:, 0, :, :] # Gradient horizontal (shape: [1, H, W] ou [H, W])
305
+ gy = x[:, 1, :, :] # Gradient vertical (shape: [1, H, W] ou [H, W])
250
306
 
251
- # Définition des noyaux de divergence
252
- kernel_x = torch.tensor([[[[1.0], [-1.0]]]], dtype=torch.float32)
253
- kernel_y = torch.tensor([[[[1.0, -1.0]]]], dtype=torch.float32)
307
+ # Divergence du gradient horizontal (gx)
308
+ div_x = torch.zeros_like(gx)
309
+ div_x[:, :, 1:] += gx[:, :, :-1] # Contribution positive (gauche)
310
+ div_x[:, :, :-1] -= gx[:, :, :-1] # Contribution négative (droite)
254
311
 
255
- # Appliquer la convolution
256
- div_x = F.conv2d(gx, kernel_x, padding=(1, 0))
257
- div_y = F.conv2d(gy, kernel_y, padding=(0, 1))
312
+ # Divergence du gradient vertical (gy)
313
+ div_y = torch.zeros_like(gy)
314
+ div_y[:, 1:, :] += gy[:, :-1, :] # Contribution positive (haut)
315
+ div_y[:, :-1, :] -= gy[:, :-1, :] # Contribution négative (bas)
258
316
 
259
- # Rogner pour avoir la même taille (H, W)
260
- H, W = x.shape[2:]
261
- div_x = div_x[:, :, :H, :]
262
- div_y = div_y[:, :, :, :W]
263
-
264
- return -(div_x + div_y).squeeze()
265
-
266
- def gradient_gpu(x):
267
- grad_x = torch.zeros_like(x)
268
- grad_y = torch.zeros_like(x)
269
- grad_x[:-1, :] = x[1:, :] - x[:-1, :]
270
- grad_y[:, :-1] = x[:, 1:] - x[:, :-1]
271
- return torch.stack((grad_x, grad_y), dim=0)
317
+ return -(div_x + div_y)
272
318
 
273
- def div_gpu(x):
274
- if x.dim() == 3:
275
- x = x.unsqueeze(0) # Devient [1, 2, H, W]
276
- gx = x[:, 0:1, :, :] # gradient horizontal
277
- gy = x[:, 1:2, :, :] # gradient vertical
278
-
279
- # Définition des noyaux de divergence
280
- kernel_x = torch.tensor([[[[1.0], [-1.0]]]], dtype=torch.float32, device=x.device)
281
- kernel_y = torch.tensor([[[[1.0, -1.0]]]], dtype=torch.float32, device=x.device)
282
-
283
- # Appliquer la convolution
284
- div_x = F.conv2d(gx, kernel_x, padding=(1, 0))
285
- div_y = F.conv2d(gy, kernel_y, padding=(0, 1))
286
-
287
- # Rogner pour avoir la même taille (H, W)
288
- H, W = x.shape[2:]
289
- div_x = div_x[:, :, :H, :]
290
- div_y = div_y[:, :, :, :W]
319
+ def norm2sq(x):
320
+ return torch.sum(x**2)
291
321
 
292
- return -(div_x + div_y).squeeze()
322
+ def norm1(x):
323
+ return torch.sum(torch.abs(x))
293
324
 
294
325
  def KL_divergence(Ax, y):
295
326
  return torch.sum(Ax - y * torch.log(Ax + 1e-10))
@@ -303,3 +334,79 @@ def prox_F_star(y, sigma, a):
303
334
  def prox_G(x, tau, K):
304
335
  return torch.clamp(x - tau * K, min=0)
305
336
 
337
+ def filter_radon(f, N, filter_type, Fc):
338
+ """
339
+ Implémente les filtres pour la rétroprojection filtrée (iRadon).
340
+ Inspirée de la fonction MATLAB FilterRadon de Mamouna Bocoum.
341
+
342
+ Paramètres :
343
+ ------------
344
+ f : np.ndarray
345
+ Vecteur des fréquences (ex: f_t ou f_z).
346
+ N : int
347
+ Taille du filtre (longueur de f).
348
+ filter_type : str
349
+ Type de filtre : 'ram-lak', 'shepp-logan', 'cosine', 'hamming', 'hann'.
350
+ Fc : float
351
+ Fréquence de coupure.
352
+
353
+ Retourne :
354
+ -----------
355
+ FILTER : np.ndarray
356
+ Filtre appliqué aux fréquences.
357
+ """
358
+ FILTER = np.abs(f)
359
+
360
+ if filter_type == 'ram-lak':
361
+ pass # FILTER = |f| (déjà calculé)
362
+ elif filter_type == 'shepp-logan':
363
+ # Évite la division par zéro
364
+ with np.errstate(divide='ignore', invalid='ignore'):
365
+ FILTER = FILTER * (np.sinc(2 * f / (2 * Fc))) # sin(2πf/(2Fc))/(2πf/(4Fc)) = sinc(2f/(2Fc))
366
+ FILTER[np.isnan(FILTER)] = 1.0 # Pour f=0
367
+ elif filter_type == 'cosine':
368
+ FILTER = FILTER * np.cos(2 * np.pi * f / (4 * Fc))
369
+ elif filter_type == 'hamming':
370
+ FILTER = FILTER * (0.54 + 0.46 * np.cos(2 * np.pi * f / Fc))
371
+ elif filter_type == 'hann':
372
+ FILTER = FILTER * (1 + np.cos(2 * np.pi * f / (4 * Fc))) / 2
373
+ else:
374
+ raise ValueError(f"Type de filtre inconnu : {filter_type}")
375
+
376
+ # Coupure des fréquences au-delà de Fc
377
+ FILTER[np.abs(f) > Fc] = 0
378
+ # Atténuation exponentielle (optionnelle, comme dans le code MATLAB)
379
+ FILTER = FILTER * np.exp(-2 * (np.abs(f) / Fc)**10)
380
+
381
+ return FILTER
382
+
383
+
384
+ def get_apodization_vector_gpu(matrix_sparse_obj):
385
+ """
386
+ Génère un vecteur de fenêtrage 2D (Hanning) pour l'apodisation
387
+ de la matrice système A et le transfère sur le GPU.
388
+ Ce vecteur doit être multiplié par les colonnes de A (pixels Z*X).
389
+ """
390
+ Z = matrix_sparse_obj.Z
391
+ X = matrix_sparse_obj.X
392
+
393
+ # 1. Génération des fenêtres 1D sur l'axe X et Z
394
+ # Forte apodisation latérale (X) pour cibler l'artefact de bordure.
395
+ fenetre_x = hann(X).astype(np.float32)
396
+
397
+ # Fenêtre uniforme en profondeur (Z), car l'artefact est surtout latéral.
398
+ fenetre_z = np.ones(Z, dtype=np.float32)
399
+
400
+ # 2. Création de la matrice de fenêtre 2D (Z, X)
401
+ fenetre_2d = np.outer(fenetre_z, fenetre_x)
402
+
403
+ # 3. Vectorisation (Z*X)
404
+ fenetre_vectorisee = fenetre_2d.flatten()
405
+
406
+ # 4. Transfert sur GPU (mémoire contiguë)
407
+ fenetre_gpu = drv.mem_alloc(fenetre_vectorisee.nbytes)
408
+ drv.memcpy_htod(fenetre_gpu, fenetre_vectorisee)
409
+
410
+ print(f"✅ Vecteur de fenêtrage (Z*X={Z*X}) généré et transféré sur GPU.")
411
+
412
+ return fenetre_gpu
@@ -9,3 +9,4 @@ from .ReconTools import *
9
9
 
10
10
 
11
11
 
12
+