httomolibgpu 5.2__tar.gz → 5.4__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (37) hide show
  1. {httomolibgpu-5.2/httomolibgpu.egg-info → httomolibgpu-5.4}/PKG-INFO +1 -1
  2. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cupywrapper.py +2 -0
  3. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/misc/rescale.py +0 -1
  4. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/prep/normalize.py +2 -3
  5. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/prep/phase.py +70 -11
  6. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/prep/stripe.py +3 -3
  7. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/recon/algorithm.py +156 -7
  8. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/recon/rotation.py +13 -13
  9. {httomolibgpu-5.2 → httomolibgpu-5.4/httomolibgpu.egg-info}/PKG-INFO +1 -1
  10. {httomolibgpu-5.2 → httomolibgpu-5.4}/LICENSE +0 -0
  11. {httomolibgpu-5.2 → httomolibgpu-5.4}/MANIFEST.in +0 -0
  12. {httomolibgpu-5.2 → httomolibgpu-5.4}/README.rst +0 -0
  13. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/__init__.py +0 -0
  14. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/__init__.py +0 -0
  15. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/calc_metrics.cu +0 -0
  16. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/center_360_shifts.cu +0 -0
  17. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/generate_mask.cu +0 -0
  18. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/median_kernel.cu +0 -0
  19. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/raven_filter.cu +0 -0
  20. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/remove_nan_inf.cu +0 -0
  21. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/cuda_kernels/remove_stripe_fw.cu +0 -0
  22. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/memory_estimator_helpers.py +0 -0
  23. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/misc/__init__.py +0 -0
  24. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/misc/corr.py +0 -0
  25. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/misc/denoise.py +0 -0
  26. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/misc/morph.py +0 -0
  27. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/misc/utils.py +0 -0
  28. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/prep/__init__.py +0 -0
  29. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/prep/alignment.py +0 -0
  30. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/recon/__init__.py +0 -0
  31. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu/recon/_phase_cross_correlation.py +0 -0
  32. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu.egg-info/SOURCES.txt +0 -0
  33. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu.egg-info/dependency_links.txt +0 -0
  34. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu.egg-info/requires.txt +0 -0
  35. {httomolibgpu-5.2 → httomolibgpu-5.4}/httomolibgpu.egg-info/top_level.txt +0 -0
  36. {httomolibgpu-5.2 → httomolibgpu-5.4}/pyproject.toml +0 -0
  37. {httomolibgpu-5.2 → httomolibgpu-5.4}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: httomolibgpu
3
- Version: 5.2
3
+ Version: 5.4
4
4
  Summary: Commonly used tomography data processing methods at DLS.
5
5
  Author-email: Daniil Kazantsev <daniil.kazantsev@diamond.ac.uk>, Yousef Moazzam <yousef.moazzam@diamond.ac.uk>, Naman Gera <naman.gera@diamond.ac.uk>
6
6
  License: BSD-3-Clause
@@ -2,6 +2,7 @@ cupy_run = False
2
2
  try:
3
3
  import cupy as cp
4
4
  import nvtx
5
+ from cupyx.scipy.fft import next_fast_len
5
6
 
6
7
  try:
7
8
  cp.cuda.Device(0).compute_capability
@@ -15,5 +16,6 @@ except ImportError as e:
15
16
  )
16
17
  from unittest.mock import Mock
17
18
  import numpy as cp
19
+ from scipy.fft import next_fast_len
18
20
 
19
21
  nvtx = Mock()
@@ -27,7 +27,6 @@ cp = cupywrapper.cp
27
27
 
28
28
  from typing import Literal, Optional, Tuple, Union
29
29
 
30
-
31
30
  __all__ = [
32
31
  "rescale_to_int",
33
32
  ]
@@ -34,7 +34,6 @@ else:
34
34
 
35
35
  from numpy import float32
36
36
 
37
-
38
37
  __all__ = ["dark_flat_field_correction", "minus_log"]
39
38
 
40
39
 
@@ -47,7 +46,7 @@ def dark_flat_field_correction(
47
46
  cutoff: float = 10.0,
48
47
  ) -> cp.ndarray:
49
48
  """
50
- Normalize raw projection data using the flat and dark field projections.
49
+ Perform dark/flat field correction of raw projection data.
51
50
 
52
51
  Parameters
53
52
  ----------
@@ -67,7 +66,7 @@ def dark_flat_field_correction(
67
66
  Returns
68
67
  -------
69
68
  cp.ndarray
70
- Normalised by dark/flat fields 3D tomographic data as a CuPy array.
69
+ dark/flat field corrected 3D tomographic data as a CuPy array.
71
70
  """
72
71
  _check_valid_input_normalise(data, flats, darks)
73
72
 
@@ -26,6 +26,7 @@ from httomolibgpu.memory_estimator_helpers import _DeviceMemStack
26
26
 
27
27
  cp = cupywrapper.cp
28
28
  cupy_run = cupywrapper.cupy_run
29
+ next_fast_len = cupywrapper.next_fast_len
29
30
 
30
31
  from unittest.mock import Mock
31
32
 
@@ -38,7 +39,7 @@ else:
38
39
  fftshift = Mock()
39
40
 
40
41
  from numpy import float32
41
- from typing import Optional, Tuple
42
+ from typing import Literal, Optional, Tuple
42
43
  import math
43
44
 
44
45
  __all__ = [
@@ -56,6 +57,10 @@ def paganin_filter(
56
57
  distance: float = 1.0,
57
58
  energy: float = 53.0,
58
59
  ratio_delta_beta: float = 250,
60
+ calculate_padding_value_method: Literal[
61
+ "next_power_of_2", "next_fast_length", "use_pad_x_y"
62
+ ] = "next_power_of_2",
63
+ pad_x_y: Optional[list] = None,
59
64
  calc_peak_gpu_mem: bool = False,
60
65
  ) -> cp.ndarray:
61
66
  """
@@ -74,6 +79,10 @@ def paganin_filter(
74
79
  Beam energy in keV.
75
80
  ratio_delta_beta : float
76
81
  The ratio of delta/beta, where delta is the phase shift and real part of the complex material refractive index and beta is the absorption.
82
+ calculate_padding_value_method: str
83
+ Method to calculate the padded size of the input data. Accepted values are 'next_power_of_2', 'next_fast_length' and 'use_pad_x_y`.
84
+ pad_x_y list | None:
85
+ Padding values in pixels horizontally and vertically. Must be None, unless `calculate_padding_value_method` is 'use_pad_x_y'.
77
86
  calc_peak_gpu_mem: bool
78
87
  Parameter to support memory estimation in HTTomo. Irrelevant to the method itself and can be ignored by user.
79
88
 
@@ -93,9 +102,9 @@ def paganin_filter(
93
102
  mem_stack.malloc(np.prod(tomo) * np.float32().itemsize)
94
103
  dz_orig, dy_orig, dx_orig = tomo.shape if not mem_stack else tomo
95
104
 
96
- # Perform padding to the power of 2 as FFT is O(n*log(n)) complexity
97
- # TODO: adding other options of padding?
98
- padded_tomo, pad_tup = _pad_projections_to_second_power(tomo, mem_stack)
105
+ padded_tomo, pad_tup = _pad_projections(
106
+ tomo, calculate_padding_value_method, pad_x_y, mem_stack
107
+ )
99
108
 
100
109
  dz, dy, dx = padded_tomo.shape if not mem_stack else padded_tomo
101
110
 
@@ -219,21 +228,59 @@ def _shift_bit_length(x: int) -> int:
219
228
  return 1 << (x - 1).bit_length()
220
229
 
221
230
 
222
- def _calculate_pad_size(datashape: tuple) -> list:
231
+ def _calculate_pad_size(
232
+ datashape: tuple,
233
+ calculate_padding_value_method: Literal[
234
+ "next_power_of_2", "next_fast_length", "use_pad_x_y"
235
+ ],
236
+ pad_x_y: Optional[list],
237
+ ) -> list:
223
238
  """Calculating the padding size
224
239
 
225
240
  Args:
226
- datashape (tuple): the shape of the 3D data
241
+ datashape (tuple):
242
+ the shape of the 3D data
243
+ calculate_padding_value_method: str
244
+ Method to calculate the padded size of the input data. Accepted values are 'next_power_of_2', 'next_fast_length' and 'use_pad_x_y`.
245
+ pad_x_y (int, int) | None:
246
+ Padding values in pixels horizontally and vertically. Must be None, unless `calculate_padding_value_method` is 'use_pad_x_y'.
227
247
 
228
248
  Returns:
229
249
  list: the padded dimensions
230
250
  """
251
+ if pad_x_y is not None and calculate_padding_value_method != "use_pad_x_y":
252
+ raise ValueError(
253
+ 'calculate_padding_value_method must be "use_pad_x_y" when pad_x_y is specified'
254
+ )
255
+ elif calculate_padding_value_method == "use_pad_x_y":
256
+ if pad_x_y is None:
257
+ raise ValueError(
258
+ 'pad_x_y must be provided when calculate_padding_value_method is "use_pad_x_y"'
259
+ )
260
+ elif (
261
+ not isinstance(pad_x_y, list)
262
+ or len(pad_x_y) != 2
263
+ or not isinstance(pad_x_y[0], int)
264
+ or not isinstance(pad_x_y[1], int)
265
+ ):
266
+ raise ValueError("pad_x_y must be a list of two integers")
267
+
268
+ if calculate_padding_value_method == "next_power_of_2":
269
+ calculate_padded_dim = lambda _, size: _shift_bit_length(size + 1)
270
+ elif calculate_padding_value_method == "next_fast_length":
271
+ calculate_padded_dim = lambda _, size: next_fast_len(size)
272
+ elif calculate_padding_value_method == "use_pad_x_y":
273
+ calculate_padded_dim = lambda dim, size: size + 2 * pad_x_y[2 - dim]
274
+ else:
275
+ raise ValueError(
276
+ f'Unexpected calculate_padding_value_method: "{calculate_padding_value_method}"'
277
+ )
231
278
  pad_list = []
232
279
  for index, element in enumerate(datashape):
233
280
  if index == 0:
234
281
  pad_width = (0, 0) # do not pad the slicing dim
235
282
  else:
236
- diff = _shift_bit_length(element + 1) - element
283
+ diff = calculate_padded_dim(index, element) - element
237
284
  if element % 2 == 0:
238
285
  pad_width_scalar = diff // 2
239
286
  pad_width = (pad_width_scalar, pad_width_scalar)
@@ -248,17 +295,27 @@ def _calculate_pad_size(datashape: tuple) -> list:
248
295
  return pad_list
249
296
 
250
297
 
251
- def _pad_projections_to_second_power(
252
- tomo: cp.ndarray, mem_stack: Optional[_DeviceMemStack]
298
+ def _pad_projections(
299
+ tomo: cp.ndarray,
300
+ calculate_padding_value_method: Literal[
301
+ "next_power_of_2", "next_fast_length", "use_pad_x_y"
302
+ ],
303
+ pad_x_y: Optional[list],
304
+ mem_stack: Optional[_DeviceMemStack],
253
305
  ) -> Tuple[cp.ndarray, Tuple[int, int]]:
254
306
  """
255
- Performs padding of each projection to the next power of 2.
307
+ Performs padding of each projection to a size optimal for FFT.
256
308
  If the shape is not even we also care of that before padding.
257
309
 
258
310
  Parameters
259
311
  ----------
260
312
  tomo : cp.ndarray
261
313
  3d projection data
314
+ calculate_padding_value_method: str
315
+ Method to calculate the padded size of the input data. Accepted values are 'next_power_of_2', 'next_fast_length' and 'use_pad_x_y`.
316
+ pad_x_y: list | None:
317
+ Padding values in pixels horizontally and vertically. Must be None, unless `calculate_padding_value_method` is 'use_pad_x_y'.
318
+
262
319
 
263
320
  Returns
264
321
  -------
@@ -268,7 +325,9 @@ def _pad_projections_to_second_power(
268
325
  """
269
326
  full_shape_tomo = cp.shape(tomo) if not mem_stack else tomo
270
327
 
271
- pad_list = _calculate_pad_size(full_shape_tomo)
328
+ pad_list = _calculate_pad_size(
329
+ full_shape_tomo, calculate_padding_value_method, pad_x_y
330
+ )
272
331
 
273
332
  if mem_stack:
274
333
  padded_tomo = [
@@ -845,7 +845,7 @@ def _detect_stripe(listdata, snr):
845
845
  listsorted = cp.sort(listdata)[::-1]
846
846
  xlist = cp.arange(0, numdata, 1.0)
847
847
  ndrop = cp.int16(0.25 * numdata)
848
- (_slope, _intercept) = _mpolyfit(
848
+ _slope, _intercept = _mpolyfit(
849
849
  xlist[ndrop : -ndrop - 1], listsorted[ndrop : -ndrop - 1]
850
850
  )
851
851
 
@@ -869,7 +869,7 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
869
869
  Remove large stripes.
870
870
  """
871
871
  drop_ratio = max(min(drop_ratio, 0.8), 0) # = cp.clip(drop_ratio, 0.0, 0.8)
872
- (nrow, ncol) = sinogram.shape
872
+ nrow, ncol = sinogram.shape
873
873
  ndrop = int(0.5 * drop_ratio * nrow)
874
874
  sinosort = cp.sort(sinogram, axis=0)
875
875
  sinosmooth = median_filter(sinosort, (1, size))
@@ -907,7 +907,7 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
907
907
  def _rs_dead(sinogram, snr, size, matindex, norm=True):
908
908
  """remove unresponsive and fluctuating stripes"""
909
909
  sinogram = cp.copy(sinogram) # Make it mutable
910
- (nrow, _) = sinogram.shape
910
+ nrow, _ = sinogram.shape
911
911
  sinosmooth = uniform_filter1d(sinogram, 10, axis=0)
912
912
 
913
913
  listdiff = cp.sum(cp.abs(sinogram - sinosmooth), axis=0)
@@ -40,7 +40,6 @@ else:
40
40
  from numpy import float32
41
41
  from typing import Optional, Type, Union
42
42
 
43
-
44
43
  __all__ = [
45
44
  "FBP2d_astra",
46
45
  "FBP3d_tomobar",
@@ -48,6 +47,7 @@ __all__ = [
48
47
  "SIRT3d_tomobar",
49
48
  "CGLS3d_tomobar",
50
49
  "FISTA3d_tomobar",
50
+ "ADMM3d_tomobar",
51
51
  ]
52
52
 
53
53
  input_data_axis_labels = ["angles", "detY", "detX"] # set the labels of the input data
@@ -272,9 +272,7 @@ def SIRT3d_tomobar(
272
272
  ) -> cp.ndarray:
273
273
  """
274
274
  Perform Simultaneous Iterative Recostruction Technique (SIRT) using ASTRA toolbox :cite:`van2016fast` and
275
- ToMoBAR :cite:`kazantsev2020tomographic` wrappers.
276
- This is 3D recon directly from a CuPy array while using ASTRA GPUlink capability to avoid host-device
277
- transactions for projection and backprojection.
275
+ ToMoBAR :cite:`kazantsev2020tomographic` wrappers. For more information see :ref:`method_SIRT3d_tomobar`.
278
276
 
279
277
  Parameters
280
278
  ----------
@@ -345,9 +343,7 @@ def CGLS3d_tomobar(
345
343
  ) -> cp.ndarray:
346
344
  """
347
345
  Perform Conjugate Gradient Least Squares (CGLS) using ASTRA toolbox :cite:`van2016fast` and
348
- ToMoBAR :cite:`kazantsev2020tomographic` wrappers.
349
- This is 3D recon directly from a CuPy array while using ASTRA GPUlink capability to avoid host-device
350
- transactions for projection and backprojection.
346
+ ToMoBAR :cite:`kazantsev2020tomographic` wrappers. For more information see :ref:`method_CGLS3d_tomobar`.
351
347
 
352
348
  Parameters
353
349
  ----------
@@ -418,6 +414,7 @@ def FISTA3d_tomobar(
418
414
  """
419
415
  A Fast Iterative Shrinkage-Thresholding Algorithm :cite:`beck2009fast` with various types of regularisation or
420
416
  denoising operations :cite:`kazantsev2019ccpi` (currently accepts ROF_TV and PD_TV regularisations only).
417
+ For more information see :ref:`method_FISTA3d_tomobar`.
421
418
 
422
419
  Parameters
423
420
  ----------
@@ -489,6 +486,158 @@ def FISTA3d_tomobar(
489
486
  return cp.require(cp.swapaxes(reconstruction, 0, 1), requirements="C")
490
487
 
491
488
 
489
+ ## %%%%%%%%%%%%%%%%%%%%%%% ADMM reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
490
+ def ADMM3d_tomobar(
491
+ data: cp.ndarray,
492
+ angles: np.ndarray,
493
+ center: Optional[float] = None,
494
+ detector_pad: Union[bool, int] = False,
495
+ recon_size: Optional[int] = None,
496
+ recon_mask_radius: float = 0.95,
497
+ iterations: int = 3,
498
+ subsets_number: int = 24,
499
+ initialisation: Optional[str] = "FBP",
500
+ ADMM_rho_const: float = 1.0,
501
+ ADMM_relax_par: float = 1.7,
502
+ regularisation_type: str = "PD_TV",
503
+ regularisation_parameter: float = 0.0025,
504
+ regularisation_iterations: int = 40,
505
+ regularisation_half_precision: bool = True,
506
+ nonnegativity: bool = False,
507
+ gpu_id: int = 0,
508
+ ) -> cp.ndarray:
509
+ """
510
+ An Alternating Direction Method of Multipliers method with various types of regularisation or
511
+ denoising operations :cite:`kazantsev2019ccpi` (currently accepts ROF_TV and PD_TV regularisations only).
512
+ For more information see :ref:`_method_ADMM3d_tomobar`.
513
+
514
+ Parameters
515
+ ----------
516
+ data : cp.ndarray
517
+ Projection data as a CuPy array.
518
+ angles : np.ndarray
519
+ An array of angles given in radians.
520
+ center : float, optional
521
+ The center of rotation (CoR).
522
+ detector_pad : bool, int
523
+ Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform
524
+ an automated padding or specify a certain value as an integer.
525
+ recon_size : int, optional
526
+ The [recon_size, recon_size] shape of the reconstructed slice in pixels.
527
+ By default (None), the reconstructed size will be the dimension of the horizontal detector.
528
+ recon_mask_radius: float
529
+ The radius of the circular mask that applies to the reconstructed slice in order to crop
530
+ out some undesirable artifacts. The values outside the given diameter will be set to zero.
531
+ To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
532
+ iterations : int
533
+ The number of ADMM algorithm iterations. The recommended range is between 3 to 5 with initialisation and
534
+ more than 10 without. Assuming that the subsets_number is reasonably large (>12).
535
+ subsets_number: int
536
+ The number of the ordered subsets to accelerate convergence. The recommended range is between 12 to 24.
537
+ initialisation: str, optional
538
+ Initialise ADMM with the reconstructed image to reduce the number of iterations and accelerate. Choose between 'CGLS' when data
539
+ is noisy and undersampled, 'FBP' when data is of better quality (default) or None.
540
+ ADMM_rho_const: float
541
+ Convergence related parameter for ADMM, higher values lead to slower convergence, but too small values can destabilise the iterations.
542
+ Recommended range is between 0.9 and 2.0.
543
+ ADMM_relax_par: Relaxation parameter which can lead to acceleration of the algorithm, keep it in the range between 1.5 and 1.8 to avoid divergence. regularisation_type: str
544
+ A method to use for regularisation. Currently PD_TV and ROF_TV are available.
545
+ regularisation_parameter: float
546
+ The main regularisation parameter to control the amount of smoothing/noise removal. Larger values lead to stronger smoothing.
547
+ regularisation_iterations: int
548
+ The number of iterations for regularisers (aka INNER iterations).
549
+ regularisation_half_precision: bool
550
+ Perform faster regularisation computation in half-precision with a very minimal sacrifice in quality.
551
+ nonnegativity : bool
552
+ Impose nonnegativity constraint (set to True) on the reconstructed image. Default False.
553
+ gpu_id : int
554
+ A GPU device index to perform operation on.
555
+
556
+ Returns
557
+ -------
558
+ cp.ndarray
559
+ The ADMM reconstructed volume as a CuPy array.
560
+ """
561
+ if initialisation not in ["FBP", "CGLS", None]:
562
+ raise ValueError(
563
+ "The acceptable values for initialisation are 'FBP','CGLS' and None"
564
+ )
565
+
566
+ if initialisation is not None:
567
+ if detector_pad == True:
568
+ detector_pad = __estimate_detectorHoriz_padding(data.shape[2])
569
+
570
+ if detector_pad > 0:
571
+ # if detector_pad is not zero we need to reconstruct the image on the recon+2*detector_pad size
572
+ recon_size = data.shape[2] + 2 * detector_pad
573
+
574
+ if initialisation == "FBP":
575
+ initialisation_vol = cp.require(
576
+ cp.swapaxes(
577
+ FBP3d_tomobar(
578
+ data,
579
+ angles=angles,
580
+ center=center,
581
+ detector_pad=detector_pad,
582
+ recon_size=recon_size,
583
+ recon_mask_radius=recon_mask_radius,
584
+ ),
585
+ 0,
586
+ 1,
587
+ ),
588
+ requirements="C",
589
+ )
590
+ elif initialisation == "CGLS":
591
+ initialisation_vol = cp.require(
592
+ cp.swapaxes(
593
+ CGLS3d_tomobar(
594
+ data,
595
+ angles=angles,
596
+ center=center,
597
+ detector_pad=detector_pad,
598
+ recon_size=recon_size,
599
+ recon_mask_radius=recon_mask_radius,
600
+ iterations=15,
601
+ ),
602
+ 0,
603
+ 1,
604
+ ),
605
+ requirements="C",
606
+ )
607
+ else:
608
+ initialisation_vol = None
609
+
610
+ RecToolsCP = _instantiate_iterative_recon_class(
611
+ data, angles, center, detector_pad, recon_size, gpu_id, datafidelity="LS"
612
+ )
613
+
614
+ _data_ = {
615
+ "projection_norm_data": data,
616
+ "OS_number": subsets_number,
617
+ "data_axes_labels_order": input_data_axis_labels,
618
+ }
619
+
620
+ _algorithm_ = {
621
+ "initialise": initialisation_vol,
622
+ "iterations": iterations,
623
+ "nonnegativity": nonnegativity,
624
+ "recon_mask_radius": recon_mask_radius,
625
+ "ADMM_rho_const": ADMM_rho_const,
626
+ "ADMM_relax_par": ADMM_relax_par,
627
+ }
628
+
629
+ _regularisation_ = {
630
+ "method": regularisation_type, # Selected regularisation method
631
+ "regul_param": regularisation_parameter, # Regularisation parameter
632
+ "iterations": regularisation_iterations, # The number of regularisation iterations
633
+ "half_precision": regularisation_half_precision, # enabling half-precision calculation
634
+ }
635
+
636
+ reconstruction = RecToolsCP.ADMM(_data_, _algorithm_, _regularisation_)
637
+ cp._default_memory_pool.free_all_blocks()
638
+ return cp.require(cp.swapaxes(reconstruction, 0, 1), requirements="C")
639
+
640
+
492
641
  ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
493
642
  def _instantiate_direct_recon_class(
494
643
  data: cp.ndarray,
@@ -171,7 +171,7 @@ def find_center_vo(
171
171
 
172
172
 
173
173
  def _search_coarse(sino, smin, smax, ratio, drop):
174
- (nrow, ncol) = sino.shape
174
+ nrow, ncol = sino.shape
175
175
  flip_sino = cp.ascontiguousarray(cp.fliplr(sino))
176
176
  comp_sino = cp.ascontiguousarray(cp.flipud(sino))
177
177
 
@@ -206,7 +206,7 @@ def _search_coarse(sino, smin, smax, ratio, drop):
206
206
 
207
207
 
208
208
  def _search_fine(sino, srad, step, init_cen, ratio, drop):
209
- (nrow, ncol) = sino.shape
209
+ nrow, ncol = sino.shape
210
210
 
211
211
  flip_sino = cp.ascontiguousarray(cp.fliplr(sino))
212
212
  comp_sino = cp.ascontiguousarray(cp.flipud(sino))
@@ -397,7 +397,7 @@ def _downsample(image, dsp_fact0, dsp_fact1):
397
397
  ---------
398
398
  image_dsp : Downsampled image.
399
399
  """
400
- (height, width) = image.shape
400
+ height, width = image.shape
401
401
  dsp_fact0 = cp.clip(cp.int16(dsp_fact0), 1, height // 2)
402
402
  dsp_fact1 = cp.clip(cp.int16(dsp_fact1), 1, width // 2)
403
403
  height_dsp = height // dsp_fact0
@@ -477,11 +477,11 @@ def find_center_360(
477
477
  else:
478
478
  _sino = data[:, ind, :]
479
479
 
480
- (nrow, ncol) = _sino.shape
480
+ nrow, ncol = _sino.shape
481
481
  nrow_180 = nrow // 2 + 1
482
482
  sino_top = _sino[0:nrow_180, :]
483
483
  sino_bot = cp.fliplr(_sino[-nrow_180:, :])
484
- (overlap, side, overlap_position) = _find_overlap(
484
+ overlap, side, overlap_position = _find_overlap(
485
485
  sino_top, sino_bot, win_width, side, denoise, norm, use_overlap
486
486
  )
487
487
  cor = ncol - overlap / 2
@@ -531,7 +531,7 @@ def _find_overlap(
531
531
  win_width = int(np.clip(win_width, 6, min(ncol1, ncol2) // 2))
532
532
 
533
533
  if side == "right":
534
- (list_metric, offset) = _search_overlap(
534
+ list_metric, offset = _search_overlap(
535
535
  mat1,
536
536
  mat2,
537
537
  win_width,
@@ -544,7 +544,7 @@ def _find_overlap(
544
544
  overlap_position += offset
545
545
  overlap = ncol1 - overlap_position + win_width // 2
546
546
  elif side == "left":
547
- (list_metric, offset) = _search_overlap(
547
+ list_metric, offset = _search_overlap(
548
548
  mat1,
549
549
  mat2,
550
550
  win_width,
@@ -557,7 +557,7 @@ def _find_overlap(
557
557
  overlap_position += offset
558
558
  overlap = overlap_position + win_width // 2
559
559
  else:
560
- (list_metric1, offset1) = _search_overlap(
560
+ list_metric1, offset1 = _search_overlap(
561
561
  mat1,
562
562
  mat2,
563
563
  win_width,
@@ -566,7 +566,7 @@ def _find_overlap(
566
566
  norm=norm,
567
567
  use_overlap=use_overlap,
568
568
  )
569
- (list_metric2, offset2) = _search_overlap(
569
+ list_metric2, offset2 = _search_overlap(
570
570
  mat1,
571
571
  mat2,
572
572
  win_width,
@@ -576,9 +576,9 @@ def _find_overlap(
576
576
  use_overlap=use_overlap,
577
577
  )
578
578
 
579
- (curvature1, overlap_position1) = _calculate_curvature(list_metric1)
579
+ curvature1, overlap_position1 = _calculate_curvature(list_metric1)
580
580
  overlap_position1 += offset1
581
- (curvature2, overlap_position2) = _calculate_curvature(list_metric2)
581
+ curvature2, overlap_position2 = _calculate_curvature(list_metric2)
582
582
  overlap_position2 += offset2
583
583
 
584
584
  if curvature1 > curvature2:
@@ -638,8 +638,8 @@ def _search_overlap(
638
638
  mat1 = cp.ascontiguousarray(mat1, dtype=cp.float32)
639
639
  mat2 = cp.ascontiguousarray(mat2, dtype=cp.float32)
640
640
 
641
- (nrow1, ncol1) = mat1.shape
642
- (nrow2, ncol2) = mat2.shape
641
+ nrow1, ncol1 = mat1.shape
642
+ nrow2, ncol2 = mat2.shape
643
643
 
644
644
  if nrow1 != nrow2:
645
645
  raise ValueError("Two images are not at the same height!!!")
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: httomolibgpu
3
- Version: 5.2
3
+ Version: 5.4
4
4
  Summary: Commonly used tomography data processing methods at DLS.
5
5
  Author-email: Daniil Kazantsev <daniil.kazantsev@diamond.ac.uk>, Yousef Moazzam <yousef.moazzam@diamond.ac.uk>, Naman Gera <naman.gera@diamond.ac.uk>
6
6
  License: BSD-3-Clause
File without changes
File without changes
File without changes
File without changes
File without changes