AOT-biomaps 2.9.281__tar.gz → 2.9.291__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of AOT-biomaps might be problematic. Click here for more details.

Files changed (52) hide show
  1. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_Optimizers/LS.py +14 -16
  2. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_Optimizers/PDHG.py +116 -19
  3. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_biomaps_kernels.cubin +0 -0
  4. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/PrimalDualRecon.py +94 -41
  5. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/__init__.py +11 -1
  6. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps.egg-info/PKG-INFO +1 -1
  7. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/PKG-INFO +1 -1
  8. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/setup.py +11 -1
  9. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/AcousticEnums.py +0 -0
  10. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/AcousticTools.py +0 -0
  11. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/FocusedWave.py +0 -0
  12. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/IrregularWave.py +0 -0
  13. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/PlaneWave.py +0 -0
  14. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/StructuredWave.py +0 -0
  15. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/__init__.py +0 -0
  16. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Acoustic/_mainAcoustic.py +0 -0
  17. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Experiment/Focus.py +0 -0
  18. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Experiment/Tomography.py +0 -0
  19. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Experiment/__init__.py +0 -0
  20. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Experiment/_mainExperiment.py +0 -0
  21. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Optic/Absorber.py +0 -0
  22. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Optic/Laser.py +0 -0
  23. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Optic/OpticEnums.py +0 -0
  24. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Optic/__init__.py +0 -0
  25. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Optic/_mainOptic.py +0 -0
  26. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_Optimizers/DEPIERRO.py +0 -0
  27. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_Optimizers/MAPEM.py +0 -0
  28. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_Optimizers/MLEM.py +0 -0
  29. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_Optimizers/__init__.py +0 -0
  30. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/Huber.py +0 -0
  31. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/Quadratic.py +0 -0
  32. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/RelativeDifferences.py +0 -0
  33. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_PotentialFunctions/__init__.py +0 -0
  34. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/SparseSMatrix_CSR.py +0 -0
  35. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/SparseSMatrix_SELL.py +0 -0
  36. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AOT_SparseSMatrix/__init__.py +0 -0
  37. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AlgebraicRecon.py +0 -0
  38. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/AnalyticRecon.py +0 -0
  39. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/BayesianRecon.py +0 -0
  40. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/DeepLearningRecon.py +0 -0
  41. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/ReconEnums.py +0 -0
  42. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/ReconTools.py +0 -0
  43. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/__init__.py +0 -0
  44. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/AOT_Recon/_mainRecon.py +0 -0
  45. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/Config.py +0 -0
  46. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps/Settings.py +0 -0
  47. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps.egg-info/SOURCES.txt +0 -0
  48. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps.egg-info/dependency_links.txt +0 -0
  49. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps.egg-info/requires.txt +0 -0
  50. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/AOT_biomaps.egg-info/top_level.txt +0 -0
  51. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/README.md +0 -0
  52. {aot_biomaps-2.9.281 → aot_biomaps-2.9.291}/setup.cfg +0 -0
@@ -181,13 +181,12 @@ def _LS_CG_sparseCSR_pycuda(SMatrix, y, numIterations, isSavingEachIteration, tu
181
181
  print(f"Dim X: {X}, Dim Z: {Z}, TN: {TN}, ZX: {ZX}")
182
182
 
183
183
  stream = drv.Stream()
184
- mod = drv.module_from_file('AOT_biomaps_kernels.cubin')
185
184
 
186
185
  # Récupération des Kernels
187
- projection_kernel = mod.get_function('projection_kernel__CSR')
188
- backprojection_kernel = mod.get_function('backprojection_kernel__CSR')
189
- axpby_kernel = mod.get_function("vector_axpby_kernel")
190
- minus_axpy_kernel = mod.get_function("vector_minus_axpy_kernel")
186
+ projection_kernel = SMatrix.sparse_mod.get_function('projection_kernel__CSR')
187
+ backprojection_kernel = SMatrix.sparse_mod.get_function('backprojection_kernel__CSR')
188
+ axpby_kernel = SMatrix.sparse_mod.get_function("vector_axpby_kernel")
189
+ minus_axpy_kernel = SMatrix.sparse_mod.get_function("vector_minus_axpy_kernel")
191
190
 
192
191
  # --- Allocation des buffers (Pointeurs Bruts) ---
193
192
  y = y.T.flatten().astype(dtype)
@@ -231,7 +230,7 @@ def _LS_CG_sparseCSR_pycuda(SMatrix, y, numIterations, isSavingEachIteration, tu
231
230
  drv.memcpy_dtod(p_flat_gpu, r_flat_gpu, ZX * np.dtype(dtype).itemsize)
232
231
 
233
232
  # 6. rho_prev = ||r_0||^2
234
- rho_prev = _dot_product_gpu(mod, r_flat_gpu, r_flat_gpu, ZX, stream)
233
+ rho_prev = _dot_product_gpu(SMatrix.sparse_mod, r_flat_gpu, r_flat_gpu, ZX, stream)
235
234
 
236
235
  # --- Boucle itérative ---
237
236
  saved_theta, saved_indices = [], []
@@ -258,7 +257,7 @@ def _LS_CG_sparseCSR_pycuda(SMatrix, y, numIterations, isSavingEachIteration, tu
258
257
  block=(block_size, 1, 1), grid=((TN + block_size - 1) // block_size, 1, 1), stream=stream)
259
258
 
260
259
  # c. alpha = rho_prev / <p, z>
261
- pAp = _dot_product_gpu(mod, p_flat_gpu, z_flat_gpu, ZX, stream)
260
+ pAp = _dot_product_gpu(SMatrix.sparse_mod, p_flat_gpu, z_flat_gpu, ZX, stream)
262
261
 
263
262
  if abs(pAp) < 1e-15: break
264
263
  alpha = rho_prev / pAp
@@ -273,7 +272,7 @@ def _LS_CG_sparseCSR_pycuda(SMatrix, y, numIterations, isSavingEachIteration, tu
273
272
  block=(block_size, 1, 1), grid=((ZX + block_size - 1) // block_size, 1, 1), stream=stream)
274
273
 
275
274
  # f. rho_curr = ||r||^2
276
- rho_curr = _dot_product_gpu(mod, r_flat_gpu, r_flat_gpu, ZX, stream)
275
+ rho_curr = _dot_product_gpu(SMatrix.sparse_mod, r_flat_gpu, r_flat_gpu, ZX, stream)
277
276
 
278
277
  if rho_curr < tolerance: break
279
278
 
@@ -364,11 +363,10 @@ def _LS_CG_sparseSELL_pycuda(SMatrix, y, numIterations, isSavingEachIteration, t
364
363
  tolerance = 1e-12
365
364
 
366
365
  # Accès aux paramètres SELL
367
- mod = SMatrix.sparse_mod
368
- projection_kernel = mod.get_function("projection_kernel__SELL")
369
- backprojection_kernel = mod.get_function("backprojection_kernel__SELL")
370
- axpby_kernel = mod.get_function("vector_axpby_kernel")
371
- minus_axpy_kernel = mod.get_function("vector_minus_axpy_kernel")
366
+ projection_kernel = SMatrix.sparse_mod.get_function("projection_kernel__SELL")
367
+ backprojection_kernel = SMatrix.sparse_mod.get_function("backprojection_kernel__SELL")
368
+ axpby_kernel = SMatrix.sparse_mod.get_function("vector_axpby_kernel")
369
+ minus_axpy_kernel = SMatrix.sparse_mod.get_function("vector_minus_axpy_kernel")
372
370
  slice_height = np.int32(SMatrix.slice_height)
373
371
  grid_rows = ((TN + block_size - 1) // block_size, 1, 1)
374
372
 
@@ -416,7 +414,7 @@ def _LS_CG_sparseSELL_pycuda(SMatrix, y, numIterations, isSavingEachIteration, t
416
414
  drv.memcpy_dtod(p_flat_gpu, r_flat_gpu, ZX * np.dtype(dtype).itemsize)
417
415
 
418
416
  # 6. rho_prev = ||r_0||^2
419
- rho_prev = _dot_product_gpu(mod, r_flat_gpu, r_flat_gpu, ZX, stream)
417
+ rho_prev = _dot_product_gpu(SMatrix.sparse_mod, r_flat_gpu, r_flat_gpu, ZX, stream)
420
418
 
421
419
  # --- Boucle itérative ---
422
420
  saved_theta, saved_indices = [], []
@@ -443,7 +441,7 @@ def _LS_CG_sparseSELL_pycuda(SMatrix, y, numIterations, isSavingEachIteration, t
443
441
  block=(block_size, 1, 1), grid=grid_rows, stream=stream)
444
442
 
445
443
  # c. alpha = rho_prev / <p, z>
446
- pAp = _dot_product_gpu(mod, p_flat_gpu, z_flat_gpu, ZX, stream)
444
+ pAp = _dot_product_gpu(SMatrix.sparse_mod, p_flat_gpu, z_flat_gpu, ZX, stream)
447
445
 
448
446
  if abs(pAp) < 1e-15: break
449
447
  alpha = rho_prev / pAp
@@ -458,7 +456,7 @@ def _LS_CG_sparseSELL_pycuda(SMatrix, y, numIterations, isSavingEachIteration, t
458
456
  block=(block_size, 1, 1), grid=((ZX + block_size - 1) // block_size, 1, 1), stream=stream)
459
457
 
460
458
  # f. rho_curr = ||r||^2
461
- rho_curr = _dot_product_gpu(mod, r_flat_gpu, r_flat_gpu, ZX, stream)
459
+ rho_curr = _dot_product_gpu(SMatrix.sparse_mod, r_flat_gpu, r_flat_gpu, ZX, stream)
462
460
 
463
461
  if rho_curr < tolerance: break
464
462
 
@@ -1,6 +1,6 @@
1
- from AOT_biomaps.AOT_Recon.ReconTools import power_method, gradient, div, proj_l2, prox_G, prox_F_star, _call_axpby, _call_minus_axpy, compute_TV_cpu, power_method_estimate_L__SELL
1
+ from AOT_biomaps.AOT_Recon.ReconTools import power_method, gradient, div, proj_l2, prox_G, prox_F_star, _call_axpby, _call_minus_axpy, compute_TV_cpu, power_method_estimate_L__SELL, calculate_memory_requirement, check_gpu_memory
2
2
  from AOT_biomaps.Config import config
3
- from AOT_biomaps.AOT_Recon.ReconEnums import NoiseType
3
+ from AOT_biomaps.AOT_Recon.ReconEnums import NoiseType, SMatrixType
4
4
  import torch
5
5
  from tqdm import trange
6
6
  import numpy as np
@@ -13,6 +13,103 @@ The methods can run on both CPU and GPU, with configurations set in the AOT_biom
13
13
  '''
14
14
 
15
15
  def CP_TV(
16
+ SMatrix,
17
+ y,
18
+ alpha=None, # TV regularization parameter (if None, alpha is auto-scaled)
19
+ beta=1e-4, # Tikhonov regularization parameter
20
+ theta=1.0,
21
+ numIterations=5000,
22
+ isSavingEachIteration=True,
23
+ L=None,
24
+ withTumor=True,
25
+ device=None,
26
+ max_saves=5000,
27
+ show_logs=True,
28
+ smatrixType=SMatrixType.SELL,
29
+ k_security=0.8,
30
+ use_power_method=True,
31
+ auto_alpha_gamma=0.05, # gamma for auto alpha: alpha = gamma * data_term / tv_term
32
+ apply_positivity_clamp=True,
33
+ tikhonov_as_gradient=False, # if True, apply -tau*2*beta*x instead of prox multiplicative
34
+ use_laplacian=True, # enable Laplacian (Hessian scalar) penalty
35
+ laplacian_beta_scale=1.0 # multiply beta for laplacian term if you want separate scaling
36
+ ):
37
+ # try:
38
+ tumor_str = "WITH" if withTumor else "WITHOUT"
39
+ # Auto-select device and method
40
+ if device is None:
41
+ if torch.cuda.is_available() and check_gpu_memory(config.select_best_gpu(), calculate_memory_requirement(SMatrix, y), show_logs=show_logs):
42
+ device = torch.device(f"cuda:{config.select_best_gpu()}")
43
+ use_gpu = True
44
+ else:
45
+ device = torch.device("cpu")
46
+ use_gpu = False
47
+ else:
48
+ use_gpu = device.type == "cuda"
49
+ # Dispatch to the appropriate implementation
50
+ if use_gpu:
51
+ if smatrixType == SMatrixType.CSR:
52
+ raise NotImplementedError("GPU Chambolle Pock (LS-TV) with CSR not implemented.")
53
+ elif smatrixType == SMatrixType.SELL:
54
+ return CP_TV_Tikhonov_sparseCSR_pycuda(SMatrix, y, alpha,beta, theta, numIterations, isSavingEachIteration, L, tumor_str, device, max_saves, show_logs, k_security, use_power_method, auto_alpha_gamma, apply_positivity_clamp, tikhonov_as_gradient, use_laplacian, laplacian_beta_scale)
55
+ elif smatrixType == SMatrixType.DENSE:
56
+ return CP_TV_dense(SMatrix, y, alpha, theta, numIterations, isSavingEachIteration, L, tumor_str, device, max_saves, show_logs)
57
+ else:
58
+ raise ValueError("Unsupported SMatrixType for GPU Chambolle Pock (LS-TV).")
59
+ else:
60
+ raise NotImplementedError("CPU Chambolle Pock (LS-TV) not implemented.")
61
+
62
+ def CP_KL(
63
+ SMatrix,
64
+ y,
65
+ alpha=None, # TV regularization parameter (if None, alpha is auto-scaled)
66
+ beta=1e-4, # Tikhonov regularization parameter
67
+ theta=1.0,
68
+ numIterations=5000,
69
+ isSavingEachIteration=True,
70
+ L=None,
71
+ withTumor=True,
72
+ device=None,
73
+ max_saves=5000,
74
+ show_logs=True,
75
+ smatrixType=SMatrixType.SELL,
76
+ k_security=0.8,
77
+ use_power_method=True,
78
+ auto_alpha_gamma=0.05, # gamma for auto alpha: alpha = gamma * data_term / tv_term
79
+ apply_positivity_clamp=True,
80
+ tikhonov_as_gradient=False, # if True, apply -tau*2*beta*x instead of prox multiplicative
81
+ use_laplacian=True, # enable Laplacian (Hessian scalar) penalty
82
+ laplacian_beta_scale=1.0 # multiply beta for laplacian term if you want separate scaling
83
+ ):
84
+ # try:
85
+ tumor_str = "WITH" if withTumor else "WITHOUT"
86
+ # Auto-select device and method
87
+ if device is None:
88
+ if torch.cuda.is_available() and check_gpu_memory(config.select_best_gpu(), calculate_memory_requirement(SMatrix, y), show_logs=show_logs):
89
+ device = torch.device(f"cuda:{config.select_best_gpu()}")
90
+ use_gpu = True
91
+ else:
92
+ device = torch.device("cpu")
93
+ use_gpu = False
94
+ else:
95
+ use_gpu = device.type == "cuda"
96
+ # Dispatch to the appropriate implementation
97
+ if use_gpu:
98
+ if smatrixType == SMatrixType.CSR:
99
+ raise NotImplementedError("GPU Chambolle Pock (LS-KL) with CSR not implemented.")
100
+ elif smatrixType == SMatrixType.SELL:
101
+ raise NotImplementedError("GPU Chambolle Pock (LS-KL) with SELL not implemented.")
102
+ elif smatrixType == SMatrixType.DENSE:
103
+ return CP_KL(SMatrix, y, alpha, theta, numIterations, isSavingEachIteration, L, tumor_str, device, max_saves, show_logs)
104
+ else:
105
+ raise ValueError("Unsupported SMatrixType for GPU Chambolle Pock (LS-KL).")
106
+ else:
107
+ raise NotImplementedError("CPU Chambolle Pock (LS-KL) not implemented.")
108
+
109
+
110
+
111
+
112
+ def CP_TV_dense(
16
113
  SMatrix,
17
114
  y,
18
115
  alpha=1e-1,
@@ -23,6 +120,7 @@ def CP_TV(
23
120
  withTumor=True,
24
121
  device=None,
25
122
  max_saves=5000,
123
+ show_logs=True,
26
124
  ):
27
125
  """
28
126
  Chambolle-Pock algorithm for Total Variation (TV) regularization.
@@ -94,10 +192,10 @@ def CP_TV(
94
192
  # Description for progress bar
95
193
  tumor_str = "WITH TUMOR" if withTumor else "WITHOUT TUMOR"
96
194
  device_str = f"GPU no.{torch.cuda.current_device()}" if device.type == "cuda" else "CPU"
97
- description = f"AOT-BioMaps -- Primal/Dual Reconstruction (TV) α:{alpha:.4f} L:{L:.4f} -- {tumor_str} -- {device_str}"
195
+ description = f"AOT-BioMaps -- Primal/Dual Reconstruction (LS-TV) α:{alpha:.4f} L:{L:.4f} -- {tumor_str} -- {device_str}"
98
196
 
99
- # Main loop
100
- for iteration in trange(numIterations, desc=description):
197
+ iterator = trange(numIterations, desc=description) if show_logs else range(numIterations)
198
+ for it in iterator:
101
199
  # Update p (TV proximal step)
102
200
  grad_x = gradient(x_tilde.reshape(Z, X))
103
201
  p = proj_l2(p + sigma * grad_x, alpha)
@@ -115,9 +213,9 @@ def CP_TV(
115
213
  x_tilde = x + theta * (x - x_old)
116
214
 
117
215
  # Save intermediate result if needed
118
- if isSavingEachIteration and iteration in save_indices:
216
+ if isSavingEachIteration and it in save_indices:
119
217
  I_reconMatrix.append(x.reshape(Z, X).clone() * (norm_y / norm_A))
120
- saved_indices.append(iteration)
218
+ saved_indices.append(it)
121
219
 
122
220
  # Return results
123
221
  if isSavingEachIteration:
@@ -134,7 +232,7 @@ def CP_TV_Tikhonov_sparseCSR_pycuda(
134
232
  numIterations=2000,
135
233
  isSavingEachIteration=True,
136
234
  L=None,
137
- withTumor=True,
235
+ tumor_str="",
138
236
  device=None,
139
237
  max_saves=2000,
140
238
  show_logs=True,
@@ -203,7 +301,7 @@ def CP_TV_Tikhonov_sparseCSR_pycuda(
203
301
  sigma = np.float32(k_security / L_op_norm)
204
302
 
205
303
  # prepare y and normalization
206
- y = y.astype(dtype).reshape(-1)
304
+ y = y.T.astype(dtype).reshape(-1)
207
305
  maxy = float(np.max(np.abs(y))) if y.size > 0 else 0.0
208
306
  if maxy > 0:
209
307
  y_normed = (y / maxy).copy()
@@ -286,7 +384,6 @@ def CP_TV_Tikhonov_sparseCSR_pycuda(
286
384
  step = max(1, numIterations // max_saves)
287
385
  save_indices_all = list(range(0, numIterations + 1, step))
288
386
 
289
- tumor_str = "WITH TUMOR" if withTumor else "WITHOUT TUMOR"
290
387
  device_str = f"GPU no.{torch.cuda.current_device()}" if device.type == "cuda" else "CPU"
291
388
  if show_logs:
292
389
  if (alpha is None or alpha == 0) and (beta is None or beta == 0):
@@ -294,10 +391,13 @@ def CP_TV_Tikhonov_sparseCSR_pycuda(
294
391
  description = f"AOT-BioMaps -- Primal/Dual Reconstruction (LS) -- {tumor_str} -- {device_str}"
295
392
  if alpha is None or alpha == 0:
296
393
  print(f"Parameters: L={L_op_norm:.6e} tau={tau:.3e} sigma={sigma:.3e} beta={beta:.4e} lap_enabled={use_lap}")
297
- description = f"AOT-BioMaps -- Primal/Dual Reconstruction (LS) -- {tumor_str} -- {device_str}"
394
+ description = f"AOT-BioMaps -- Primal/Dual Reconstruction (LS-Tikhonov) -- {tumor_str} -- {device_str}"
298
395
  elif beta is None or beta == 0:
299
396
  print(f"Parameters: L={L_op_norm:.6e} tau={tau:.3e} sigma={sigma:.3e} alpha={alpha:.4e} beta={beta:.4e} lap_enabled={use_lap}")
300
- description = f"AOT-BioMaps -- Primal/Dual Reconstruction (LS) -- {tumor_str} -- {device_str}"
397
+ description = f"AOT-BioMaps -- Primal/Dual Reconstruction (LS-TV) -- {tumor_str} -- {device_str}"
398
+ else:
399
+ print(f"Parameters: L={L_op_norm:.6e} tau={tau:.3e} sigma={sigma:.3e} alpha={alpha:.4e} beta={beta:.4e} lap_enabled={use_lap}")
400
+ description = f"AOT-BioMaps -- Primal/Dual Reconstruction (LS-TV-Tikhonov) -- {tumor_str} -- {device_str}"
301
401
 
302
402
  I_reconMatrix = []
303
403
  saved_indices = []
@@ -311,9 +411,6 @@ def CP_TV_Tikhonov_sparseCSR_pycuda(
311
411
 
312
412
  # main loop
313
413
  try:
314
-
315
-
316
-
317
414
  iterator = trange(numIterations, desc=description) if show_logs else range(numIterations)
318
415
  for it in iterator:
319
416
  # 1) dual p update (TV)
@@ -466,9 +563,10 @@ def CP_KL(
466
563
  numIterations=5000,
467
564
  isSavingEachIteration=True,
468
565
  L=None,
469
- withTumor=True,
566
+ tumor_str="",
470
567
  device=None,
471
568
  max_saves=5000,
569
+ show_logs=True,
472
570
  ):
473
571
  """
474
572
  Chambolle-Pock algorithm for Kullback-Leibler (KL) divergence regularization.
@@ -527,12 +625,11 @@ def CP_KL(
527
625
  saved_indices = [0]
528
626
 
529
627
  # Description for progress bar
530
- tumor_str = "WITH TUMOR" if withTumor else "WITHOUT TUMOR"
531
628
  device_str = f"GPU no.{torch.cuda.current_device()}" if device.type == "cuda" else "CPU"
532
629
  description = f"AOT-BioMaps -- Primal/Dual Reconstruction (KL) α:{alpha:.4f} L:{L:.4f} -- {tumor_str} -- {device_str}"
533
630
 
534
- # Main loop
535
- for iteration in trange(numIterations, desc=description):
631
+ iterator = trange(numIterations, desc=description) if show_logs else range(numIterations)
632
+ for iteration in iterator:
536
633
  # Update q (proximal step for F*)
537
634
  q = prox_F_star(q + sigma * P(x_tilde) - sigma * y_flat, sigma, y_flat)
538
635
 
@@ -1,5 +1,5 @@
1
1
  from AOT_biomaps.AOT_Recon.AlgebraicRecon import AlgebraicRecon
2
- from AOT_biomaps.AOT_Recon.ReconEnums import ReconType, ProcessType
2
+ from AOT_biomaps.AOT_Recon.ReconEnums import ReconType, ProcessType, SMatrixType
3
3
  from AOT_biomaps.AOT_Recon.AOT_Optimizers import CP_KL, CP_TV
4
4
  from AOT_biomaps.AOT_Recon.ReconEnums import OptimizerType
5
5
 
@@ -13,11 +13,20 @@ class PrimalDualRecon(AlgebraicRecon):
13
13
  This class implements the convex reconstruction process.
14
14
  It currently does not perform any operations but serves as a template for future implementations.
15
15
  """
16
- def __init__(self, theta=1.0, L=None, **kwargs):
16
+ def __init__(self, alpha, beta, theta=1.0, L=None, k_security=0.8, use_power_method=True, auto_alpha_gamma=0.05, apply_positivity_clamp=True, tikhonov_as_gradient=False, use_laplacian=True, laplacian_beta_scale=1.0, **kwargs):
17
17
  super().__init__(**kwargs)
18
18
  self.reconType = ReconType.Convex
19
+ self.alpha = alpha # TV regularization parameter (if None, alpha is auto-scaled)
20
+ self.beta=beta # Tikhonov regularization parameter
19
21
  self.theta = theta # relaxation parameter (between 1 and 2)
20
22
  self.L = L # norme spectrale de l'opérateur linéaire défini par les matrices P et P^T
23
+ self.k_security=k_security
24
+ self.use_power_method=use_power_method
25
+ self.auto_alpha_gamma=auto_alpha_gamma # gamma for auto alpha: alpha = gamma * data_term / tv_term
26
+ self.apply_positivity_clamp=apply_positivity_clamp
27
+ self.tikhonov_as_gradient=tikhonov_as_gradient # if True, apply -tau*2*beta*x instead of prox multiplicative
28
+ self.use_laplacian=use_laplacian # enable Laplacian (Hessian scalar) penalty
29
+ self.laplacian_beta_scale=laplacian_beta_scale # multiply beta for laplacian term if you want separate scaling
21
30
 
22
31
  def run(self, processType=ProcessType.PYTHON, withTumor=True):
23
32
  """
@@ -149,56 +158,100 @@ class PrimalDualRecon(AlgebraicRecon):
149
158
  if show_logs:
150
159
  print(f"Loaded reconstruction results and indices from {results_dir}")
151
160
 
152
- def _convexReconPython(self, withTumor):
161
+ def _convexReconPython(self, withTumor,show_logs=True):
153
162
  if self.optimizer == OptimizerType.CP_TV:
154
163
  if withTumor:
155
164
  self.reconPhantom, self.indices = CP_TV(
156
- self.SMatrix,
157
- y=self.experiment.AOsignal_withTumor,
158
- alpha=self.alpha,
159
- theta=self.theta,
160
- numIterations=self.numIterations,
161
- isSavingEachIteration=self.isSavingEachIteration,
162
- L=self.L,
163
- withTumor=withTumor,
164
- device=None
165
- )
165
+ SMatrix = self.SMatrix,
166
+ y = self.experiment.AOsignal_withTumor,
167
+ alpha=self.alpha,
168
+ beta=self.beta,
169
+ theta=self.theta,
170
+ numIterations=self.numIterations,
171
+ isSavingEachIteration=self.isSavingEachIteration,
172
+ L=self.L,
173
+ withTumor=withTumor,
174
+ device=self.device,
175
+ max_saves=self.maxSaves,
176
+ show_logs=show_logs,
177
+ smatrixType= self.smatrixType,
178
+ k_security=self.k_security,
179
+ use_power_method=self.use_power_method,
180
+ auto_alpha_gamma=self.auto_alpha_gamma,
181
+ apply_positivity_clamp=self.apply_positivity_clamp,
182
+ tikhonov_as_gradient=self.tikhonov_as_gradient,
183
+ use_laplacian=self.use_laplacian,
184
+ laplacian_beta_scale=self.laplacian_beta_scale
185
+ )
166
186
  else:
167
187
  self.reconLaser, self.indices = CP_TV(
168
- self.SMatrix,
169
- y=self.experiment.AOsignal_withoutTumor,
170
- alpha=self.alpha,
171
- theta=self.theta,
172
- numIterations=self.numIterations,
173
- isSavingEachIteration=self.isSavingEachIteration,
174
- L=self.L,
175
- withTumor=withTumor,
176
- device=None
177
- )
188
+ SMatrix = self.SMatrix,
189
+ y = self.experiment.AOsignal_withoutTumor,
190
+ alpha=self.alpha,
191
+ beta=self.beta,
192
+ theta=self.theta,
193
+ numIterations=self.numIterations,
194
+ isSavingEachIteration=self.isSavingEachIteration,
195
+ L=self.L,
196
+ withTumor=withTumor,
197
+ device=self.device,
198
+ max_saves=self.maxSaves,
199
+ show_logs=show_logs,
200
+ smatrixType= self.smatrixType,
201
+ k_security=self.k_security,
202
+ use_power_method=self.use_power_method,
203
+ auto_alpha_gamma=self.auto_alpha_gamma,
204
+ apply_positivity_clamp=self.apply_positivity_clamp,
205
+ tikhonov_as_gradient=self.tikhonov_as_gradient,
206
+ use_laplacian=self.use_laplacian,
207
+ laplacian_beta_scale=self.laplacian_beta_scale
208
+ )
178
209
  elif self.optimizer == OptimizerType.CP_KL:
179
210
  if withTumor:
180
211
  self.reconPhantom, self.indices = CP_KL(
181
- self.SMatrix,
182
- y=self.experiment.AOsignal_withTumor,
183
- alpha=self.alpha,
184
- theta=self.theta,
185
- numIterations=self.numIterations,
186
- isSavingEachIteration=self.isSavingEachIteration,
187
- L=self.L,
188
- withTumor=withTumor,
189
- device=None
212
+ SMatrix = self.SMatrix,
213
+ y = self.experiment.AOsignal_withTumor,
214
+ alpha=self.alpha,
215
+ beta=self.beta,
216
+ theta=self.theta,
217
+ numIterations=self.numIterations,
218
+ isSavingEachIteration=self.isSavingEachIteration,
219
+ L=self.L,
220
+ withTumor=withTumor,
221
+ device=self.device,
222
+ max_saves=self.maxSaves,
223
+ show_logs=show_logs,
224
+ smatrixType= self.smatrixType,
225
+ k_security=self.k_security,
226
+ use_power_method=self.use_power_method,
227
+ auto_alpha_gamma=self.auto_alpha_gamma,
228
+ apply_positivity_clamp=self.apply_positivity_clamp,
229
+ tikhonov_as_gradient=self.tikhonov_as_gradient,
230
+ use_laplacian=self.use_laplacian,
231
+ laplacian_beta_scale=self.laplacian_beta_scale
190
232
  )
191
233
  else:
192
234
  self.reconLaser, self.indices = CP_KL(
193
- self.SMatrix,
194
- y=self.experiment.AOsignal_withoutTumor,
195
- alpha=self.alpha,
196
- theta=self.theta,
197
- numIterations=self.numIterations,
198
- isSavingEachIteration=self.isSavingEachIteration,
199
- L=self.L,
200
- withTumor=withTumor,
201
- device=None
235
+ SMatrix = self.SMatrix,
236
+ y = self.experiment.AOsignal_withoutTumor,
237
+ alpha=self.alpha,
238
+ beta=self.beta,
239
+ theta=self.theta,
240
+ numIterations=self.numIterations,
241
+ isSavingEachIteration=self.isSavingEachIteration,
242
+ L=self.L,
243
+ withTumor=withTumor,
244
+ device=self.device,
245
+ max_saves=self.maxSaves,
246
+ show_logs=show_logs,
247
+ smatrixType= self.smatrixType,
248
+ k_security=self.k_security,
249
+ use_power_method=self.use_power_method,
250
+ auto_alpha_gamma=self.auto_alpha_gamma,
251
+ apply_positivity_clamp=self.apply_positivity_clamp,
252
+ tikhonov_as_gradient=self.tikhonov_as_gradient,
253
+ use_laplacian=self.use_laplacian,
254
+ laplacian_beta_scale=self.laplacian_beta_scale
202
255
  )
203
256
  else:
204
257
  raise ValueError(f"Optimizer value must be CP_TV or CP_KL, got {self.optimizer}")
@@ -85,7 +85,7 @@ from .AOT_Recon.AOT_PotentialFunctions.RelativeDifferences import *
85
85
  from .Config import config
86
86
  from .Settings import *
87
87
 
88
- __version__ = '2.9.281'
88
+ __version__ = '2.9.291'
89
89
  __process__ = config.get_process()
90
90
 
91
91
  def initialize(process=None):
@@ -135,6 +135,16 @@ def initialize(process=None):
135
135
 
136
136
 
137
137
 
138
+
139
+
140
+
141
+
142
+
143
+
144
+
145
+
146
+
147
+
138
148
 
139
149
 
140
150
 
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: AOT_biomaps
3
- Version: 2.9.281
3
+ Version: 2.9.291
4
4
  Summary: Acousto-Optic Tomography
5
5
  Home-page: https://github.com/LucasDuclos/AcoustoOpticTomography
6
6
  Author: Lucas Duclos
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: AOT_biomaps
3
- Version: 2.9.281
3
+ Version: 2.9.291
4
4
  Summary: Acousto-Optic Tomography
5
5
  Home-page: https://github.com/LucasDuclos/AcoustoOpticTomography
6
6
  Author: Lucas Duclos
@@ -2,7 +2,7 @@ from setuptools import setup, find_packages
2
2
 
3
3
  setup(
4
4
  name='AOT_biomaps',
5
- version='2.9.281',
5
+ version='2.9.291',
6
6
  packages=find_packages(),
7
7
  include_package_data=True,
8
8
 
@@ -293,6 +293,16 @@ setup(
293
293
 
294
294
 
295
295
 
296
+
297
+
298
+
299
+
300
+
301
+
302
+
303
+
304
+
305
+
296
306
 
297
307
 
298
308
 
File without changes
File without changes