httomolibgpu 4.0__py3-none-any.whl → 5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
httomolibgpu/__init__.py CHANGED
@@ -1,10 +1,12 @@
1
+ from httomolibgpu.misc.utils import data_checker
2
+
1
3
  from httomolibgpu.misc.corr import median_filter, remove_outlier
2
4
  from httomolibgpu.misc.denoise import total_variation_ROF, total_variation_PD
3
5
  from httomolibgpu.misc.morph import sino_360_to_180, data_resampler
4
6
  from httomolibgpu.misc.rescale import rescale_to_int
5
7
  from httomolibgpu.prep.alignment import distortion_correction_proj_discorpy
6
- from httomolibgpu.prep.normalize import normalize
7
- from httomolibgpu.prep.phase import paganin_filter
8
+ from httomolibgpu.prep.normalize import dark_flat_field_correction, minus_log
9
+ from httomolibgpu.prep.phase import paganin_filter, paganin_filter_savu_legacy
8
10
  from httomolibgpu.prep.stripe import (
9
11
  remove_stripe_based_sorting,
10
12
  remove_stripe_ti,
@@ -0,0 +1,24 @@
1
+ ALLOCATION_UNIT_SIZE = 512
2
+
3
+
4
+ class _DeviceMemStack:
5
+ def __init__(self) -> None:
6
+ self.allocations = []
7
+ self.current = 0
8
+ self.highwater = 0
9
+
10
+ def malloc(self, bytes):
11
+ self.allocations.append(bytes)
12
+ allocated = self._round_up(bytes)
13
+ self.current += allocated
14
+ self.highwater = max(self.current, self.highwater)
15
+
16
+ def free(self, bytes):
17
+ assert bytes in self.allocations
18
+ self.allocations.remove(bytes)
19
+ self.current -= self._round_up(bytes)
20
+ assert self.current >= 0
21
+
22
+ def _round_up(self, size):
23
+ size = (size + ALLOCATION_UNIT_SIZE - 1) // ALLOCATION_UNIT_SIZE
24
+ return size * ALLOCATION_UNIT_SIZE
httomolibgpu/misc/corr.py CHANGED
@@ -36,8 +36,6 @@ if cupy_run:
36
36
  else:
37
37
  load_cuda_module = Mock()
38
38
 
39
- from httomolibgpu.misc.supp_func import data_checker
40
-
41
39
  __all__ = [
42
40
  "median_filter",
43
41
  "remove_outlier",
@@ -82,10 +80,6 @@ def median_filter(
82
80
  else:
83
81
  raise ValueError("The input array must be a 3D array")
84
82
 
85
- data = data_checker(
86
- data, verbosity=True, method_name="median_filter_or_remove_outlier"
87
- )
88
-
89
83
  if kernel_size not in [3, 5, 7, 9, 11, 13]:
90
84
  raise ValueError("Please select a correct kernel size: 3, 5, 7, 9, 11, 13")
91
85
 
@@ -29,8 +29,6 @@ cupy_run = cupywrapper.cupy_run
29
29
 
30
30
  from unittest.mock import Mock
31
31
 
32
- from httomolibgpu.misc.supp_func import data_checker
33
-
34
32
  if cupy_run:
35
33
  from tomobar.regularisersCuPy import ROF_TV_cupy, PD_TV_cupy
36
34
  else:
@@ -84,8 +82,6 @@ def total_variation_ROF(
84
82
  If the input array is not float32 data type.
85
83
  """
86
84
 
87
- data = data_checker(data, verbosity=True, method_name="total_variation_ROF")
88
-
89
85
  return ROF_TV_cupy(
90
86
  data,
91
87
  regularisation_parameter,
@@ -139,8 +135,6 @@ def total_variation_PD(
139
135
  If the input array is not float32 data type.
140
136
  """
141
137
 
142
- data_checker(data, verbosity=True, method_name="total_variation_PD")
143
-
144
138
  methodTV = 0
145
139
  if not isotropic:
146
140
  methodTV = 1
@@ -35,8 +35,6 @@ else:
35
35
 
36
36
  from typing import Literal
37
37
 
38
- from httomolibgpu.misc.supp_func import data_checker
39
-
40
38
  __all__ = [
41
39
  "sino_360_to_180",
42
40
  "data_resampler",
@@ -68,8 +66,6 @@ def sino_360_to_180(
68
66
  if data.ndim != 3:
69
67
  raise ValueError("only 3D data is supported")
70
68
 
71
- data = data_checker(data, verbosity=True, method_name="sino_360_to_180")
72
-
73
69
  dx, dy, dz = data.shape
74
70
 
75
71
  overlap = int(np.round(overlap))
@@ -142,8 +138,6 @@ def data_resampler(
142
138
  data = cp.expand_dims(data, 1)
143
139
  axis = 1
144
140
 
145
- data = data_checker(data, verbosity=True, method_name="data_resampler")
146
-
147
141
  N, M, Z = cp.shape(data)
148
142
 
149
143
  if axis == 0:
@@ -27,8 +27,6 @@ cp = cupywrapper.cp
27
27
 
28
28
  from typing import Literal, Optional, Tuple, Union
29
29
 
30
- from httomolibgpu.misc.supp_func import data_checker
31
-
32
30
 
33
31
  __all__ = [
34
32
  "rescale_to_int",
@@ -80,8 +78,6 @@ def rescale_to_int(
80
78
  else:
81
79
  output_dtype = np.uint32
82
80
 
83
- data = data_checker(data, verbosity=True, method_name="rescale_to_int")
84
-
85
81
  # get the min and max integer values of the output type
86
82
  output_min = cp.iinfo(output_dtype).min
87
83
  output_max = cp.iinfo(output_dtype).max
@@ -0,0 +1,146 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ---------------------------------------------------------------------------
4
+ # Copyright 2022 Diamond Light Source Ltd.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ # ---------------------------------------------------------------------------
18
+ # Created By : Tomography Team at DLS <scientificsoftware@diamond.ac.uk>
19
+ # Created Date: 02/June/2025
20
+ # ---------------------------------------------------------------------------
21
+ """Various utilities for data inspection and correction"""
22
+
23
+ from httomolibgpu import cupywrapper
24
+ from typing import Optional
25
+
26
+ cp = cupywrapper.cp
27
+ cupy_run = cupywrapper.cupy_run
28
+
29
+ from unittest.mock import Mock
30
+
31
+ if cupy_run:
32
+ from httomolibgpu.cuda_kernels import load_cuda_module
33
+ else:
34
+ load_cuda_module = Mock()
35
+
36
+
37
+ __all__ = [
38
+ "data_checker",
39
+ ]
40
+
41
+
42
+ def data_checker(
43
+ data: cp.ndarray,
44
+ infsnans_correct: bool = True,
45
+ zeros_warning: bool = False,
46
+ data_to_method_name: Optional[str] = None,
47
+ verbosity: bool = True,
48
+ ) -> cp.ndarray:
49
+ """Function that performs checks on input data to ensure its validity, performs corrections and prints the warnings.
50
+ Currently it checks for the presence of Infs and NaNs in the data and corrects them.
51
+
52
+ Parameters
53
+ ----------
54
+ data : cp.ndarray
55
+ CuPy array either float32 or uint16 data type.
56
+ infsnans_correct: bool
57
+ Perform correction of NaNs and Infs if they are present in the data.
58
+ zeros_warning: bool
59
+ Count the number of zeros in the data and produce a warning if more half of the data are zeros.
60
+ verbosity : bool
61
+ Print the warnings.
62
+ data_to_method_name : str, optional.
63
+ Method's name the output of which is tested. This is tailored for printing purposes when the method runs in HTTomo.
64
+
65
+ Returns
66
+ -------
67
+ cp.ndarray
68
+ Returns corrected CuPy array.
69
+ """
70
+ if data.dtype not in ["uint16", "float32"]:
71
+ raise ValueError(
72
+ "The input data of `uint16` and `float32` data types is accepted only."
73
+ )
74
+
75
+ if infsnans_correct:
76
+ data = __naninfs_check(
77
+ data, verbosity=verbosity, method_name=data_to_method_name
78
+ )
79
+ # TODO
80
+ # if zeros_warning:
81
+ # __zeros_check(data, verbosity=verbosity, percentage_threshold = 50, method_name=data_to_method_name)
82
+
83
+ return data
84
+
85
+
86
+ def __naninfs_check(
87
+ data: cp.ndarray,
88
+ verbosity: bool = True,
89
+ method_name: Optional[str] = None,
90
+ ) -> cp.ndarray:
91
+ """
92
+ This function finds NaN's, +-Inf's in the input data and then prints the warnings and correct the data if correction is enabled.
93
+
94
+ Parameters
95
+ ----------
96
+ data : cp.ndarray
97
+ Input CuPy or Numpy array either float32 or uint16 data type.
98
+ verbosity : bool
99
+ If enabled, then the printing of the warning happens when data contains infs or nans
100
+ method_name : str, optional.
101
+ Method's name for which the output data is tested.
102
+
103
+ Returns
104
+ -------
105
+ ndarray
106
+ Uncorrected or corrected (nans and infs converted to zeros) input array.
107
+ """
108
+ present_nans_infs_b = False
109
+
110
+ input_type = data.dtype
111
+ if len(data.shape) == 2:
112
+ dy, dx = data.shape
113
+ dz = 1
114
+ else:
115
+ dz, dy, dx = data.shape
116
+
117
+ present_nans_infs = cp.zeros(shape=(1)).astype(cp.uint8)
118
+
119
+ block_x = 128
120
+ # setting grid/block parameters
121
+ block_dims = (block_x, 1, 1)
122
+ grid_x = (dx + block_x - 1) // block_x
123
+ grid_y = dy
124
+ grid_z = dz
125
+ grid_dims = (grid_x, grid_y, grid_z)
126
+ params = (data, dz, dy, dx, present_nans_infs)
127
+
128
+ kernel_args = "remove_nan_inf<{0}>".format(
129
+ "float" if input_type == "float32" else "unsigned short"
130
+ )
131
+
132
+ module = load_cuda_module("remove_nan_inf", name_expressions=[kernel_args])
133
+ remove_nan_inf_kernel = module.get_function(kernel_args)
134
+ remove_nan_inf_kernel(grid_dims, block_dims, params)
135
+
136
+ if present_nans_infs[0].get() == 1:
137
+ present_nans_infs_b = True
138
+
139
+ if present_nans_infs_b:
140
+ if verbosity:
141
+ print(
142
+ "Warning! Output data of the \033[31m{}\033[0m method contains Inf's or/and NaN's. Corrected to zeros.".format(
143
+ method_name
144
+ )
145
+ )
146
+ return data
@@ -35,8 +35,6 @@ else:
35
35
 
36
36
  from typing import Dict, List, Tuple
37
37
 
38
- from httomolibgpu.misc.supp_func import data_checker
39
-
40
38
  __all__ = [
41
39
  "distortion_correction_proj_discorpy",
42
40
  ]
@@ -88,10 +86,6 @@ def distortion_correction_proj_discorpy(
88
86
  if len(data.shape) == 2:
89
87
  data = cp.expand_dims(data, axis=0)
90
88
 
91
- data = data_checker(
92
- data, verbosity=True, method_name="distortion_correction_proj_discorpy"
93
- )
94
-
95
89
  # Get info from metadata txt file
96
90
  xcenter, ycenter, list_fact = _load_metadata_txt(metadata_path)
97
91
 
@@ -20,7 +20,6 @@
20
20
  # ---------------------------------------------------------------------------
21
21
  """Modules for raw projection data normalization"""
22
22
 
23
- import numpy as np
24
23
  from httomolibgpu import cupywrapper
25
24
 
26
25
  cp = cupywrapper.cp
@@ -34,27 +33,21 @@ else:
34
33
  mean = Mock()
35
34
 
36
35
  from numpy import float32
37
- from typing import Tuple
38
36
 
39
- from httomolibgpu.misc.supp_func import data_checker
40
37
 
41
- __all__ = ["normalize"]
38
+ __all__ = ["dark_flat_field_correction", "minus_log"]
42
39
 
43
40
 
44
- def normalize(
41
+ def dark_flat_field_correction(
45
42
  data: cp.ndarray,
46
43
  flats: cp.ndarray,
47
44
  darks: cp.ndarray,
48
45
  flats_multiplier: float = 1.0,
49
46
  darks_multiplier: float = 1.0,
50
47
  cutoff: float = 10.0,
51
- minus_log: bool = True,
52
- nonnegativity: bool = False,
53
- remove_nans: bool = False,
54
48
  ) -> cp.ndarray:
55
49
  """
56
50
  Normalize raw projection data using the flat and dark field projections.
57
- This is a raw CUDA kernel implementation with CuPy wrappers.
58
51
 
59
52
  Parameters
60
53
  ----------
@@ -70,17 +63,11 @@ def normalize(
70
63
  A multiplier to apply to darks, can work as an intensity compensation constant.
71
64
  cutoff : float
72
65
  Permitted maximum value for the normalised data.
73
- minus_log : bool
74
- Apply negative log to the normalised data.
75
- nonnegativity : bool
76
- Remove negative values in the normalised data.
77
- remove_nans : bool
78
- Remove NaN and Inf values in the normalised data.
79
66
 
80
- Returns
67
+ Returns
81
68
  -------
82
69
  cp.ndarray
83
- Normalised 3D tomographic data as a CuPy array.
70
+ Normalised by dark/flat fields 3D tomographic data as a CuPy array.
84
71
  """
85
72
  _check_valid_input_normalise(data, flats, darks)
86
73
 
@@ -101,16 +88,6 @@ def normalize(
101
88
  }
102
89
  float v = (float(data) - float(darks))/denom;
103
90
  """
104
- if minus_log:
105
- kernel += "v = -log(v);\n"
106
- kernel_name += "_mlog"
107
- if nonnegativity:
108
- kernel += "if (v < 0.0f) v = 0.0f;\n"
109
- kernel_name += "_nneg"
110
- if remove_nans:
111
- kernel += "if (isnan(v)) v = 0.0f;\n"
112
- kernel += "if (isinf(v)) v = 0.0f;\n"
113
- kernel_name += "_remnan"
114
91
  kernel += "if (v > cutoff) v = cutoff;\n"
115
92
  kernel += "if (v < -cutoff) v = cutoff;\n"
116
93
  kernel += "out = v;\n"
@@ -130,6 +107,24 @@ def normalize(
130
107
  return out
131
108
 
132
109
 
110
+ def minus_log(data: cp.ndarray) -> cp.ndarray:
111
+ """
112
+ Apply -log(data) operation
113
+
114
+ Parameters
115
+ ----------
116
+ data : cp.ndarray
117
+ Data as a CuPy array.
118
+
119
+ Returns
120
+ -------
121
+ cp.ndarray
122
+ data after -log(data)
123
+ """
124
+
125
+ return -cp.log(data)
126
+
127
+
133
128
  def _check_valid_input_normalise(data, flats, darks) -> None:
134
129
  """Helper function to check the validity of inputs to normalisation functions"""
135
130
  if data.ndim != 3:
@@ -145,7 +140,3 @@ def _check_valid_input_normalise(data, flats, darks) -> None:
145
140
  flats = flats[cp.newaxis, :, :]
146
141
  if darks.ndim == 2:
147
142
  darks = darks[cp.newaxis, :, :]
148
-
149
- data_checker(data, verbosity=True, method_name="normalize_data")
150
- data_checker(flats, verbosity=True, method_name="normalize_flats")
151
- data_checker(darks, verbosity=True, method_name="normalize_darks")
@@ -22,6 +22,7 @@
22
22
 
23
23
  import numpy as np
24
24
  from httomolibgpu import cupywrapper
25
+ from httomolibgpu.memory_estimator_helpers import _DeviceMemStack
25
26
 
26
27
  cp = cupywrapper.cp
27
28
  cupy_run = cupywrapper.cupy_run
@@ -30,19 +31,19 @@ from unittest.mock import Mock
30
31
 
31
32
  if cupy_run:
32
33
  from cupyx.scipy.fft import fft2, ifft2, fftshift
34
+ from cupyx.scipy.fftpack import get_fft_plan
33
35
  else:
34
36
  fft2 = Mock()
35
37
  ifft2 = Mock()
36
38
  fftshift = Mock()
37
39
 
38
40
  from numpy import float32
39
- from typing import Tuple
41
+ from typing import Optional, Tuple
40
42
  import math
41
43
 
42
- from httomolibgpu.misc.supp_func import data_checker
43
-
44
44
  __all__ = [
45
45
  "paganin_filter",
46
+ "paganin_filter_savu_legacy",
46
47
  ]
47
48
 
48
49
 
@@ -55,6 +56,7 @@ def paganin_filter(
55
56
  distance: float = 1.0,
56
57
  energy: float = 53.0,
57
58
  ratio_delta_beta: float = 250,
59
+ calc_peak_gpu_mem: bool = False,
58
60
  ) -> cp.ndarray:
59
61
  """
60
62
  Perform single-material phase retrieval from flats/darks corrected tomographic measurements. For more detailed information, see :ref:`phase_contrast_module`.
@@ -72,32 +74,50 @@ def paganin_filter(
72
74
  Beam energy in keV.
73
75
  ratio_delta_beta : float
74
76
  The ratio of delta/beta, where delta is the phase shift and real part of the complex material refractive index and beta is the absorption.
77
+ calc_peak_gpu_mem: bool
78
+ Parameter to support memory estimation in HTTomo. Irrelevant to the method itself and can be ignored by user.
75
79
 
76
80
  Returns
77
81
  -------
78
82
  cp.ndarray
79
83
  The 3D array of Paganin phase-filtered projection images.
80
84
  """
85
+ mem_stack = _DeviceMemStack() if calc_peak_gpu_mem else None
81
86
  # Check the input data is valid
82
- if tomo.ndim != 3:
87
+ if not mem_stack and tomo.ndim != 3:
83
88
  raise ValueError(
84
89
  f"Invalid number of dimensions in data: {tomo.ndim},"
85
90
  " please provide a stack of 2D projections."
86
91
  )
87
-
88
- tomo = data_checker(tomo, verbosity=True, method_name="paganin_filter")
89
-
90
- dz_orig, dy_orig, dx_orig = tomo.shape
92
+ if mem_stack:
93
+ mem_stack.malloc(np.prod(tomo) * np.float32().itemsize)
94
+ dz_orig, dy_orig, dx_orig = tomo.shape if not mem_stack else tomo
91
95
 
92
96
  # Perform padding to the power of 2 as FFT is O(n*log(n)) complexity
93
97
  # TODO: adding other options of padding?
94
- padded_tomo, pad_tup = _pad_projections_to_second_power(tomo)
98
+ padded_tomo, pad_tup = _pad_projections_to_second_power(tomo, mem_stack)
95
99
 
96
- dz, dy, dx = padded_tomo.shape
100
+ dz, dy, dx = padded_tomo.shape if not mem_stack else padded_tomo
97
101
 
98
102
  # 3D FFT of tomo data
99
- padded_tomo = cp.asarray(padded_tomo, dtype=cp.complex64)
100
- fft_tomo = fft2(padded_tomo, axes=(-2, -1), overwrite_x=True)
103
+ if mem_stack:
104
+ mem_stack.malloc(np.prod(padded_tomo) * np.complex64().itemsize)
105
+ mem_stack.free(np.prod(padded_tomo) * np.float32().itemsize)
106
+ fft_input = cp.empty(padded_tomo, dtype=cp.complex64)
107
+ else:
108
+ padded_tomo = cp.asarray(padded_tomo, dtype=cp.complex64)
109
+ fft_input = padded_tomo
110
+
111
+ fft_plan = get_fft_plan(fft_input, axes=(-2, -1))
112
+ if mem_stack:
113
+ mem_stack.malloc(fft_plan.work_area.mem.size)
114
+ mem_stack.free(fft_plan.work_area.mem.size)
115
+ else:
116
+ with fft_plan:
117
+ fft_tomo = fft2(padded_tomo, axes=(-2, -1), overwrite_x=True)
118
+ del padded_tomo
119
+ del fft_input
120
+ del fft_plan
101
121
 
102
122
  # calculate alpha constant
103
123
  alpha = _calculate_alpha(energy, distance / 1e-6, ratio_delta_beta)
@@ -106,18 +126,56 @@ def paganin_filter(
106
126
  indx = _reciprocal_coord(pixel_size, dy)
107
127
  indy = _reciprocal_coord(pixel_size, dx)
108
128
 
109
- # Build Lorentzian-type filter
110
- phase_filter = fftshift(
111
- 1.0 / (1.0 + alpha * (cp.add.outer(cp.square(indx), cp.square(indy))))
112
- )
129
+ if mem_stack:
130
+ mem_stack.malloc(indx.size * indx.dtype.itemsize) # cp.asarray(indx)
131
+ mem_stack.malloc(indx.size * indx.dtype.itemsize) # cp.square
132
+ mem_stack.free(indx.size * indx.dtype.itemsize) # cp.asarray(indx)
133
+ mem_stack.malloc(indy.size * indy.dtype.itemsize) # cp.asarray(indy)
134
+ mem_stack.malloc(indy.size * indy.dtype.itemsize) # cp.square
135
+ mem_stack.free(indy.size * indy.dtype.itemsize) # cp.asarray(indy)
136
+
137
+ mem_stack.malloc(indx.size * indy.size * indx.dtype.itemsize) # cp.add.outer
138
+ mem_stack.free(indx.size * indx.dtype.itemsize) # cp.square
139
+ mem_stack.free(indy.size * indy.dtype.itemsize) # cp.square
140
+ mem_stack.malloc(indx.size * indy.size * indx.dtype.itemsize) # phase_filter
141
+ mem_stack.free(indx.size * indy.size * indx.dtype.itemsize) # cp.add.outer
142
+ mem_stack.free(indx.size * indy.size * indx.dtype.itemsize) # phase_filter
143
+
144
+ else:
145
+ # Build Lorentzian-type filter
146
+ phase_filter = fftshift(
147
+ 1.0
148
+ / (
149
+ 1.0
150
+ + alpha
151
+ * (
152
+ cp.add.outer(
153
+ cp.square(cp.asarray(indx)), cp.square(cp.asarray(indy))
154
+ )
155
+ )
156
+ )
157
+ )
113
158
 
114
- phase_filter = phase_filter / phase_filter.max() # normalisation
159
+ phase_filter = phase_filter / phase_filter.max() # normalisation
115
160
 
116
- # Filter projections
117
- fft_tomo *= phase_filter
161
+ # Filter projections
162
+ fft_tomo *= phase_filter
163
+ del phase_filter
118
164
 
119
165
  # Apply filter and take inverse FFT
120
- ifft_filtered_tomo = ifft2(fft_tomo, axes=(-2, -1), overwrite_x=True).real
166
+ ifft_input = (
167
+ fft_tomo if not mem_stack else cp.empty(padded_tomo, dtype=cp.complex64)
168
+ )
169
+ ifft_plan = get_fft_plan(ifft_input, axes=(-2, -1))
170
+ if mem_stack:
171
+ mem_stack.malloc(ifft_plan.work_area.mem.size)
172
+ mem_stack.free(ifft_plan.work_area.mem.size)
173
+ else:
174
+ with ifft_plan:
175
+ ifft_filtered_tomo = ifft2(fft_tomo, axes=(-2, -1), overwrite_x=True).real
176
+ del fft_tomo
177
+ del ifft_plan
178
+ del ifft_input
121
179
 
122
180
  # slicing indices for cropping
123
181
  slc_indices = (
@@ -126,8 +184,19 @@ def paganin_filter(
126
184
  slice(pad_tup[2][0], pad_tup[2][0] + dx_orig, 1),
127
185
  )
128
186
 
187
+ if mem_stack:
188
+ mem_stack.malloc(np.prod(tomo) * np.float32().itemsize) # astype(cp.float32)
189
+ mem_stack.free(
190
+ np.prod(padded_tomo) * np.complex64().itemsize
191
+ ) # ifft_filtered_tomo
192
+ mem_stack.malloc(
193
+ np.prod(tomo) * np.float32().itemsize
194
+ ) # return _log_kernel(tomo)
195
+ return mem_stack.highwater
196
+
129
197
  # crop the padded filtered data:
130
198
  tomo = ifft_filtered_tomo[slc_indices].astype(cp.float32)
199
+ del ifft_filtered_tomo
131
200
 
132
201
  # taking the negative log
133
202
  _log_kernel = cp.ElementwiseKernel(
@@ -180,7 +249,7 @@ def _calculate_pad_size(datashape: tuple) -> list:
180
249
 
181
250
 
182
251
  def _pad_projections_to_second_power(
183
- tomo: cp.ndarray,
252
+ tomo: cp.ndarray, mem_stack: Optional[_DeviceMemStack]
184
253
  ) -> Tuple[cp.ndarray, Tuple[int, int]]:
185
254
  """
186
255
  Performs padding of each projection to the next power of 2.
@@ -197,11 +266,17 @@ def _pad_projections_to_second_power(
197
266
  ndarray: padded 3d projection data
198
267
  tuple: a tuple with padding dimensions
199
268
  """
200
- full_shape_tomo = cp.shape(tomo)
269
+ full_shape_tomo = cp.shape(tomo) if not mem_stack else tomo
201
270
 
202
271
  pad_list = _calculate_pad_size(full_shape_tomo)
203
272
 
204
- padded_tomo = cp.pad(tomo, tuple(pad_list), "edge")
273
+ if mem_stack:
274
+ padded_tomo = [
275
+ sh + pad[0] + pad[1] for sh, pad in zip(full_shape_tomo, pad_list)
276
+ ]
277
+ mem_stack.malloc(np.prod(padded_tomo) * np.float32().itemsize)
278
+ else:
279
+ padded_tomo = cp.pad(tomo, tuple(pad_list), "edge")
205
280
 
206
281
  return padded_tomo, tuple(pad_list)
207
282
 
@@ -212,7 +287,7 @@ def _wavelength_micron(energy: float) -> float:
212
287
  return 2 * math.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy
213
288
 
214
289
 
215
- def _reciprocal_coord(pixel_size: float, num_grid: int) -> cp.ndarray:
290
+ def _reciprocal_coord(pixel_size: float, num_grid: int) -> np.ndarray:
216
291
  """
217
292
  Calculate reciprocal grid coordinates for a given pixel size
218
293
  and discretization.
@@ -230,6 +305,50 @@ def _reciprocal_coord(pixel_size: float, num_grid: int) -> cp.ndarray:
230
305
  Grid coordinates.
231
306
  """
232
307
  n = num_grid - 1
233
- rc = cp.arange(-n, num_grid, 2, dtype=cp.float32)
308
+ rc = np.arange(-n, num_grid, 2, dtype=cp.float32)
234
309
  rc *= 2 * math.pi / (n * pixel_size)
235
310
  return rc
311
+
312
+
313
+ def paganin_filter_savu_legacy(
314
+ tomo: cp.ndarray,
315
+ pixel_size: float = 1.28,
316
+ distance: float = 1.0,
317
+ energy: float = 53.0,
318
+ ratio_delta_beta: float = 250,
319
+ calc_peak_gpu_mem: bool = False,
320
+ ) -> cp.ndarray:
321
+ """
322
+ Perform single-material phase retrieval from flats/darks corrected tomographic measurements. For more detailed information, see :ref:`phase_contrast_module`.
323
+ Also see :cite:`Paganin02` and :cite:`paganin2020boosting` for references. The ratio_delta_beta parameter here follows implementation in Savu software.
324
+ The module will be retired in future in favour of paganin_filter. One can rescale parameter ratio_delta_beta / 4 to achieve the same effect in paganin_filter.
325
+
326
+ Parameters
327
+ ----------
328
+ tomo : cp.ndarray
329
+ 3D array of f/d corrected tomographic projections.
330
+ pixel_size : float
331
+ Detector pixel size (resolution) in micron units.
332
+ distance : float
333
+ Propagation distance of the wavefront from sample to detector in metre units.
334
+ energy : float
335
+ Beam energy in keV.
336
+ ratio_delta_beta : float
337
+ The ratio of delta/beta, where delta is the phase shift and real part of the complex material refractive index and beta is the absorption.
338
+ calc_peak_gpu_mem: bool
339
+ Parameter to support memory estimation in HTTomo. Irrelevant to the method itself and can be ignored by user.
340
+
341
+ Returns
342
+ -------
343
+ cp.ndarray
344
+ The 3D array of Paganin phase-filtered projection images.
345
+ """
346
+
347
+ return paganin_filter(
348
+ tomo,
349
+ pixel_size,
350
+ distance,
351
+ energy,
352
+ ratio_delta_beta / 4,
353
+ calc_peak_gpu_mem=calc_peak_gpu_mem,
354
+ )