httomolibgpu 3.0__tar.gz → 3.1.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (35) hide show
  1. {httomolibgpu-3.0/httomolibgpu.egg-info → httomolibgpu-3.1.1}/PKG-INFO +1 -2
  2. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/__init__.py +3 -1
  3. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/misc/denoise.py +30 -19
  4. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/misc/supp_func.py +1 -1
  5. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/prep/phase.py +55 -218
  6. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/prep/stripe.py +7 -3
  7. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/recon/algorithm.py +186 -41
  8. {httomolibgpu-3.0 → httomolibgpu-3.1.1/httomolibgpu.egg-info}/PKG-INFO +1 -2
  9. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu.egg-info/SOURCES.txt +0 -1
  10. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu.egg-info/requires.txt +0 -1
  11. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/pyproject.toml +0 -1
  12. httomolibgpu-3.0/httomolibgpu/cuda_kernels/paganin_filter_gen.cu +0 -37
  13. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/LICENSE +0 -0
  14. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/MANIFEST.in +0 -0
  15. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/README.rst +0 -0
  16. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cuda_kernels/__init__.py +0 -0
  17. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cuda_kernels/calc_metrics.cu +0 -0
  18. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cuda_kernels/center_360_shifts.cu +0 -0
  19. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cuda_kernels/generate_mask.cu +0 -0
  20. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cuda_kernels/median_kernel.cu +0 -0
  21. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cuda_kernels/raven_filter.cu +0 -0
  22. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cuda_kernels/remove_nan_inf.cu +0 -0
  23. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/cupywrapper.py +0 -0
  24. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/misc/__init__.py +0 -0
  25. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/misc/corr.py +0 -0
  26. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/misc/morph.py +0 -0
  27. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/misc/rescale.py +0 -0
  28. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/prep/__init__.py +0 -0
  29. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/prep/alignment.py +0 -0
  30. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/prep/normalize.py +0 -0
  31. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/recon/__init__.py +0 -0
  32. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu/recon/rotation.py +0 -0
  33. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu.egg-info/dependency_links.txt +0 -0
  34. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/httomolibgpu.egg-info/top_level.txt +0 -0
  35. {httomolibgpu-3.0 → httomolibgpu-3.1.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: httomolibgpu
3
- Version: 3.0
3
+ Version: 3.1.1
4
4
  Summary: Commonly used tomography data processing methods at DLS.
5
5
  Author-email: Daniil Kazantsev <daniil.kazantsev@diamond.ac.uk>, Yousef Moazzam <yousef.moazzam@diamond.ac.uk>, Naman Gera <naman.gera@diamond.ac.uk>
6
6
  License: BSD-3-Clause
@@ -19,7 +19,6 @@ Requires-Dist: scipy
19
19
  Requires-Dist: pillow
20
20
  Requires-Dist: scikit-image
21
21
  Requires-Dist: tomobar
22
- Requires-Dist: ccpi-regularisation-cupy
23
22
  Provides-Extra: dev
24
23
  Requires-Dist: pytest; extra == "dev"
25
24
  Requires-Dist: pytest-cov; extra == "dev"
@@ -4,11 +4,12 @@ from httomolibgpu.misc.morph import sino_360_to_180, data_resampler
4
4
  from httomolibgpu.misc.rescale import rescale_to_int
5
5
  from httomolibgpu.prep.alignment import distortion_correction_proj_discorpy
6
6
  from httomolibgpu.prep.normalize import normalize
7
- from httomolibgpu.prep.phase import paganin_filter_savu, paganin_filter_tomopy
7
+ from httomolibgpu.prep.phase import paganin_filter_tomopy
8
8
  from httomolibgpu.prep.stripe import (
9
9
  remove_stripe_based_sorting,
10
10
  remove_stripe_ti,
11
11
  remove_all_stripe,
12
+ raven_filter,
12
13
  )
13
14
 
14
15
  from httomolibgpu.recon.algorithm import (
@@ -17,6 +18,7 @@ from httomolibgpu.recon.algorithm import (
17
18
  LPRec3d_tomobar,
18
19
  SIRT3d_tomobar,
19
20
  CGLS3d_tomobar,
21
+ FISTA3d_tomobar,
20
22
  )
21
23
 
22
24
  from httomolibgpu.recon.rotation import find_center_vo, find_center_360, find_center_pc
@@ -21,7 +21,6 @@
21
21
  """Module for data denoising. For more detailed information see :ref:`data_denoising_module`."""
22
22
 
23
23
  import numpy as np
24
- from typing import Union, Optional
25
24
 
26
25
  from httomolibgpu import cupywrapper
27
26
 
@@ -33,7 +32,7 @@ from unittest.mock import Mock
33
32
  from httomolibgpu.misc.supp_func import data_checker
34
33
 
35
34
  if cupy_run:
36
- from ccpi.filters.regularisersCuPy import ROF_TV, PD_TV
35
+ from tomobar.regularisersCuPy import ROF_TV_cupy, PD_TV_cupy
37
36
  else:
38
37
  ROF_TV = Mock()
39
38
  PD_TV = Mock()
@@ -47,10 +46,11 @@ __all__ = [
47
46
 
48
47
  def total_variation_ROF(
49
48
  data: cp.ndarray,
50
- regularisation_parameter: Optional[float] = 1e-05,
51
- iterations: Optional[int] = 3000,
52
- time_marching_parameter: Optional[float] = 0.001,
53
- gpu_id: Optional[int] = 0,
49
+ regularisation_parameter: float = 1e-05,
50
+ iterations: int = 3000,
51
+ time_marching_parameter: float = 0.001,
52
+ gpu_id: int = 0,
53
+ half_precision: bool = False,
54
54
  ) -> cp.ndarray:
55
55
  """
56
56
  Total Variation using Rudin-Osher-Fatemi (ROF) :cite:`rudin1992nonlinear` explicit iteration scheme to perform edge-preserving image denoising.
@@ -62,14 +62,16 @@ def total_variation_ROF(
62
62
  ----------
63
63
  data : cp.ndarray
64
64
  Input CuPy 3D array of float32 data type.
65
- regularisation_parameter : float, optional
65
+ regularisation_parameter : float
66
66
  Regularisation parameter to control the level of smoothing. Defaults to 1e-05.
67
- iterations : int, optional
67
+ iterations : int
68
68
  The number of iterations. Defaults to 3000.
69
- time_marching_parameter : float, optional
69
+ time_marching_parameter : float
70
70
  Time marching parameter, needs to be small to ensure convergence. Defaults to 0.001.
71
- gpu_id : int, optional
71
+ gpu_id : int
72
72
  GPU device index to perform processing on. Defaults to 0.
73
+ half_precision : bool
74
+ Perform faster computation in half-precision with a very minimal sacrifice in quality. Defaults to False.
73
75
 
74
76
  Returns
75
77
  -------
@@ -84,19 +86,25 @@ def total_variation_ROF(
84
86
 
85
87
  data = data_checker(data, verbosity=True, method_name="total_variation_ROF")
86
88
 
87
- return ROF_TV(
88
- data, regularisation_parameter, iterations, time_marching_parameter, gpu_id
89
+ return ROF_TV_cupy(
90
+ data,
91
+ regularisation_parameter,
92
+ iterations,
93
+ time_marching_parameter,
94
+ gpu_id,
95
+ half_precision,
89
96
  )
90
97
 
91
98
 
92
99
  def total_variation_PD(
93
100
  data: cp.ndarray,
94
- regularisation_parameter: Optional[float] = 1e-05,
95
- iterations: Optional[int] = 1000,
96
- isotropic: Optional[bool] = True,
97
- nonnegativity: Optional[bool] = False,
98
- lipschitz_const: Optional[float] = 8.0,
99
- gpu_id: Optional[int] = 0,
101
+ regularisation_parameter: float = 1e-05,
102
+ iterations: int = 1000,
103
+ isotropic: bool = True,
104
+ nonnegativity: bool = False,
105
+ lipschitz_const: float = 8.0,
106
+ gpu_id: int = 0,
107
+ half_precision: bool = False,
100
108
  ) -> cp.ndarray:
101
109
  """
102
110
  Primal Dual algorithm for non-smooth convex Total Variation functional :cite:`chan1999nonlinear`. See more in :ref:`method_total_variation_PD`.
@@ -117,6 +125,8 @@ def total_variation_PD(
117
125
  Lipschitz constant to control convergence. Defaults to 8.
118
126
  gpu_id : int
119
127
  GPU device index to perform processing on. Defaults to 0.
128
+ half_precision : bool
129
+ Perform faster computation in half-precision with a very minimal sacrifice in quality. Defaults to False.
120
130
 
121
131
  Returns
122
132
  -------
@@ -139,7 +149,7 @@ def total_variation_PD(
139
149
  if nonnegativity:
140
150
  nonneg = 1
141
151
 
142
- return PD_TV(
152
+ return PD_TV_cupy(
143
153
  data,
144
154
  regularisation_parameter,
145
155
  iterations,
@@ -147,4 +157,5 @@ def total_variation_PD(
147
157
  nonneg,
148
158
  lipschitz_const,
149
159
  gpu_id,
160
+ half_precision,
150
161
  )
@@ -159,7 +159,7 @@ def data_checker(
159
159
  data: cp.ndarray,
160
160
  verbosity: bool = True,
161
161
  method_name: Optional[str] = None,
162
- ) -> bool:
162
+ ) -> cp.ndarray:
163
163
  """
164
164
  Function that performs the variety of checks on input data, in some cases also correct the data and prints warnings.
165
165
  Currently it checks for: the presence of infs and nans in data.
@@ -29,10 +29,8 @@ cupy_run = cupywrapper.cupy_run
29
29
  from unittest.mock import Mock
30
30
 
31
31
  if cupy_run:
32
- from httomolibgpu.cuda_kernels import load_cuda_module
33
32
  from cupyx.scipy.fft import fft2, ifft2, fftshift
34
33
  else:
35
- load_cuda_module = Mock()
36
34
  fft2 = Mock()
37
35
  ifft2 = Mock()
38
36
  fftshift = Mock()
@@ -44,225 +42,10 @@ import math
44
42
  from httomolibgpu.misc.supp_func import data_checker
45
43
 
46
44
  __all__ = [
47
- "paganin_filter_savu",
48
45
  "paganin_filter_tomopy",
49
46
  ]
50
47
 
51
48
 
52
- ## %%%%%%%%%%%%%%%%%%%%%%% paganin_filter %%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
53
- #: CuPy implementation of Paganin filter from Savu
54
- def paganin_filter_savu(
55
- data: cp.ndarray,
56
- ratio: float = 250.0,
57
- energy: float = 53.0,
58
- distance: float = 1.0,
59
- resolution: float = 1.28,
60
- pad_y: int = 100,
61
- pad_x: int = 100,
62
- pad_method: str = "edge",
63
- increment: float = 0.0,
64
- ) -> cp.ndarray:
65
- """
66
- Apply Paganin filter (for denoising or contrast enhancement) to
67
- projections.
68
-
69
- Parameters
70
- ----------
71
- data : cp.ndarray
72
- The stack of projections to filter.
73
-
74
- ratio : float, optional
75
- Ratio of delta/beta.
76
-
77
- energy : float, optional
78
- Beam energy in keV.
79
-
80
- distance : float, optional
81
- Distance from sample to detector in metres.
82
-
83
- resolution : float, optional
84
- Pixel size in microns.
85
-
86
- pad_y : int, optional
87
- Pad the top and bottom of projections.
88
-
89
- pad_x : int, optional
90
- Pad the left and right of projections.
91
-
92
- pad_method : str, optional
93
- Numpy pad method to use.
94
-
95
- increment : float, optional
96
- Increment all values by this amount before taking the log.
97
-
98
- Returns
99
- -------
100
- cp.ndarray
101
- The stack of filtered projections.
102
- """
103
- # Check the input data is valid
104
- if data.ndim != 3:
105
- raise ValueError(
106
- f"Invalid number of dimensions in data: {data.ndim},"
107
- " please provide a stack of 2D projections."
108
- )
109
-
110
- data = data_checker(data, verbosity=True, method_name="paganin_filter_savu")
111
-
112
- # Setup various values for the filter
113
- _, height, width = data.shape
114
- micron = 1e-6
115
- keV = 1000.0
116
- energy *= keV
117
- resolution *= micron
118
- wavelength = (1240.0 / energy) * 1e-9
119
-
120
- height1 = height + 2 * pad_y
121
- width1 = width + 2 * pad_x
122
-
123
- # Define the paganin filter, taking into account the padding that will be
124
- # applied to the projections (if any)
125
-
126
- # Using raw kernel her as indexing is direct and it avoids a lot of temporaries
127
- # and tiny kernels
128
- module = load_cuda_module("paganin_filter_gen")
129
- kernel = module.get_function("paganin_filter_gen")
130
-
131
- # Apply padding to all the 2D projections
132
- # Note: this takes considerable time on GPU...
133
- data = cp.pad(data, ((0, 0), (pad_y, pad_y), (pad_x, pad_x)), mode=pad_method)
134
-
135
- precond_kernel_float = cp.ElementwiseKernel(
136
- "T data",
137
- "T out",
138
- """
139
- if (isnan(data)) {
140
- out = T(0);
141
- } else if (isinf(data)) {
142
- out = data < 0.0 ? -3.402823e38f : 3.402823e38f; // FLT_MAX, not available in cupy
143
- } else if (data == 0.0) {
144
- out = 1.0;
145
- } else {
146
- out = data;
147
- }
148
- """,
149
- name="paganin_precond_float",
150
- no_return=True,
151
- )
152
- precond_kernel_int = cp.ElementwiseKernel(
153
- "T data",
154
- "T out",
155
- """out = data == 0 ? 1 : data""",
156
- name="paganin_precond_int",
157
- no_return=True,
158
- )
159
-
160
- if data.dtype in (cp.float32, cp.float64):
161
- precond_kernel_float(data, data)
162
- else:
163
- precond_kernel_int(data, data)
164
-
165
- # avoid normalising in both directions - we include multiplier in the post_kernel
166
- data = cp.asarray(data, dtype=cp.complex64)
167
- data = fft2(data, axes=(-2, -1), overwrite_x=True, norm="backward")
168
-
169
- # prepare filter here, while the GPU is busy with the FFT
170
- filtercomplex = cp.empty((height1, width1), dtype=cp.complex64)
171
- bx = 16
172
- by = 8
173
- gx = (width1 + bx - 1) // bx
174
- gy = (height1 + by - 1) // by
175
- kernel(
176
- grid=(gx, gy, 1),
177
- block=(bx, by, 1),
178
- args=(
179
- cp.int32(width1),
180
- cp.int32(height1),
181
- cp.float32(resolution),
182
- cp.float32(wavelength),
183
- cp.float32(distance),
184
- cp.float32(ratio),
185
- filtercomplex,
186
- ),
187
- )
188
- data *= filtercomplex
189
-
190
- data = ifft2(data, axes=(-2, -1), overwrite_x=True, norm="forward")
191
-
192
- post_kernel = cp.ElementwiseKernel(
193
- "C pci1, raw float32 increment, raw float32 ratio, raw float32 fft_scale",
194
- "T out",
195
- "out = -0.5 * ratio * log(abs(pci1) * fft_scale + increment)",
196
- name="paganin_post_proc",
197
- no_return=True,
198
- )
199
- fft_scale = 1.0 / (data.shape[1] * data.shape[2])
200
- res = cp.empty((data.shape[0], height, width), dtype=cp.float32)
201
- post_kernel(
202
- data[:, pad_y : pad_y + height, pad_x : pad_x + width],
203
- np.float32(increment),
204
- np.float32(ratio),
205
- np.float32(fft_scale),
206
- res,
207
- )
208
- return res
209
-
210
-
211
- def _wavelength(energy: float) -> float:
212
- SPEED_OF_LIGHT = 299792458e2 # [cm/s]
213
- PLANCK_CONSTANT = 6.58211928e-19 # [keV*s]
214
- return 2 * math.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy
215
-
216
-
217
- def _reciprocal_grid(pixel_size: float, shape_proj: tuple) -> cp.ndarray:
218
- """
219
- Calculate reciprocal grid.
220
-
221
- Parameters
222
- ----------
223
- pixel_size : float
224
- Detector pixel size in cm.
225
- shape_proj : tuple
226
- Shape of the reciprocal grid along x and y axes.
227
-
228
- Returns
229
- -------
230
- ndarray
231
- Grid coordinates.
232
- """
233
- # Sampling in reciprocal space.
234
- indx = _reciprocal_coord(pixel_size, shape_proj[0])
235
- indy = _reciprocal_coord(pixel_size, shape_proj[1])
236
- indx_sq = cp.square(indx)
237
- indy_sq = cp.square(indy)
238
-
239
- return cp.add.outer(indx_sq, indy_sq)
240
-
241
-
242
- def _reciprocal_coord(pixel_size: float, num_grid: int) -> cp.ndarray:
243
- """
244
- Calculate reciprocal grid coordinates for a given pixel size
245
- and discretization.
246
-
247
- Parameters
248
- ----------
249
- pixel_size : float
250
- Detector pixel size in cm.
251
- num_grid : int
252
- Size of the reciprocal grid.
253
-
254
- Returns
255
- -------
256
- ndarray
257
- Grid coordinates.
258
- """
259
- n = num_grid - 1
260
- rc = cp.arange(-n, num_grid, 2, dtype=cp.float32)
261
- rc *= 2 * math.pi / (n * pixel_size)
262
- return rc
263
-
264
-
265
- ##-------------------------------------------------------------##
266
49
  ##-------------------------------------------------------------##
267
50
  # Adaptation of retrieve_phase (Paganin filter) from TomoPy
268
51
  def paganin_filter_tomopy(
@@ -287,7 +70,7 @@ def paganin_filter_tomopy(
287
70
  energy : float, optional
288
71
  Energy of incident wave in keV.
289
72
  alpha : float, optional
290
- Regularization parameter, the ratio of delta/beta. Larger values lead to more smoothing.
73
+ Regularization parameter, the ratio of delta/beta. Smaller values lead to less noise and more blur.
291
74
 
292
75
  Returns
293
76
  -------
@@ -412,3 +195,57 @@ def _pad_projections_to_second_power(
412
195
  def _paganin_filter_factor2(energy, dist, alpha, w2):
413
196
  # Alpha represents the ratio of delta/beta.
414
197
  return 1 / (_wavelength(energy) * dist * w2 / (4 * math.pi) + alpha)
198
+
199
+
200
+ def _wavelength(energy: float) -> float:
201
+ SPEED_OF_LIGHT = 299792458e2 # [cm/s]
202
+ PLANCK_CONSTANT = 6.58211928e-19 # [keV*s]
203
+ return 2 * math.pi * PLANCK_CONSTANT * SPEED_OF_LIGHT / energy
204
+
205
+
206
+ def _reciprocal_grid(pixel_size: float, shape_proj: tuple) -> cp.ndarray:
207
+ """
208
+ Calculate reciprocal grid.
209
+
210
+ Parameters
211
+ ----------
212
+ pixel_size : float
213
+ Detector pixel size in cm.
214
+ shape_proj : tuple
215
+ Shape of the reciprocal grid along x and y axes.
216
+
217
+ Returns
218
+ -------
219
+ ndarray
220
+ Grid coordinates.
221
+ """
222
+ # Sampling in reciprocal space.
223
+ indx = _reciprocal_coord(pixel_size, shape_proj[0])
224
+ indy = _reciprocal_coord(pixel_size, shape_proj[1])
225
+ indx_sq = cp.square(indx)
226
+ indy_sq = cp.square(indy)
227
+
228
+ return cp.add.outer(indx_sq, indy_sq)
229
+
230
+
231
+ def _reciprocal_coord(pixel_size: float, num_grid: int) -> cp.ndarray:
232
+ """
233
+ Calculate reciprocal grid coordinates for a given pixel size
234
+ and discretization.
235
+
236
+ Parameters
237
+ ----------
238
+ pixel_size : float
239
+ Detector pixel size in cm.
240
+ num_grid : int
241
+ Size of the reciprocal grid.
242
+
243
+ Returns
244
+ -------
245
+ ndarray
246
+ Grid coordinates.
247
+ """
248
+ n = num_grid - 1
249
+ rc = cp.arange(-n, num_grid, 2, dtype=cp.float32)
250
+ rc *= 2 * math.pi / (n * pixel_size)
251
+ return rc
@@ -143,8 +143,8 @@ def remove_stripe_ti(
143
143
 
144
144
  _, _, dx_orig = data.shape
145
145
  if (dx_orig % 2) != 0:
146
- # the horizontal detector size is odd, data needs to be padded/cropped, for now raising the error
147
- raise ValueError("The horizontal detector size must be even")
146
+ # if the horizontal detector size is odd, the data needs to be padded
147
+ data = cp.pad(data, ((0, 0), (0, 0), (0, 1)), mode="edge")
148
148
 
149
149
  gamma = beta * ((1 - beta) / (1 + beta)) ** cp.abs(
150
150
  cp.fft.fftfreq(data.shape[-1]) * data.shape[-1]
@@ -154,7 +154,11 @@ def remove_stripe_ti(
154
154
  v = v - v[:, 0:1]
155
155
  v = cp.fft.irfft(cp.fft.rfft(v) * cp.fft.rfft(gamma)).astype(data.dtype)
156
156
  data[:] += v
157
- return data
157
+ if (dx_orig % 2) != 0:
158
+ # unpad
159
+ return data[:, :, :-1]
160
+ else:
161
+ return data
158
162
 
159
163
 
160
164
  ######## Optimized version for Vo-all ring removal in tomopy########
@@ -37,8 +37,8 @@ else:
37
37
  RecToolsDIRCuPy = Mock()
38
38
  RecToolsIRCuPy = Mock()
39
39
 
40
- from numpy import float32, complex64
41
- from typing import Optional, Type
40
+ from numpy import float32
41
+ from typing import Optional, Type, Union
42
42
 
43
43
  from httomolibgpu.misc.supp_func import data_checker
44
44
 
@@ -49,6 +49,7 @@ __all__ = [
49
49
  "LPRec3d_tomobar",
50
50
  "SIRT3d_tomobar",
51
51
  "CGLS3d_tomobar",
52
+ "FISTA3d_tomobar",
52
53
  ]
53
54
 
54
55
  input_data_axis_labels = ["angles", "detY", "detX"] # set the labels of the input data
@@ -59,7 +60,7 @@ def FBP2d_astra(
59
60
  data: np.ndarray,
60
61
  angles: np.ndarray,
61
62
  center: Optional[float] = None,
62
- detector_pad: int = 0,
63
+ detector_pad: Union[bool, int] = False,
63
64
  filter_type: str = "ram-lak",
64
65
  filter_parameter: Optional[float] = None,
65
66
  filter_d: Optional[float] = None,
@@ -81,8 +82,9 @@ def FBP2d_astra(
81
82
  An array of angles given in radians.
82
83
  center : float, optional
83
84
  The center of rotation (CoR).
84
- detector_pad : int
85
- Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction.
85
+ detector_pad : bool, int
86
+ Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform
87
+ an automated padding or specify a certain value as an integer.
86
88
  filter_type: str
87
89
  Type of projection filter, see ASTRA's API for all available options for filters.
88
90
  filter_parameter: float, optional
@@ -95,7 +97,7 @@ def FBP2d_astra(
95
97
  recon_mask_radius: float
96
98
  The radius of the circular mask that applies to the reconstructed slice in order to crop
97
99
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
98
- It is recommended to keep the value in the range [0.7-1.0].
100
+ To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
99
101
  neglog: bool
100
102
  Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
101
103
  assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
@@ -119,7 +121,7 @@ def FBP2d_astra(
119
121
 
120
122
  detY_size = data_shape[1]
121
123
  reconstruction = np.empty(
122
- (recon_size, detY_size, recon_size), dtype=np.float32(), order="C"
124
+ (recon_size, detY_size, recon_size), dtype=float32, order="C"
123
125
  )
124
126
  _take_neg_log_np(data) if neglog else data
125
127
 
@@ -142,7 +144,7 @@ def FBP3d_tomobar(
142
144
  data: cp.ndarray,
143
145
  angles: np.ndarray,
144
146
  center: Optional[float] = None,
145
- detector_pad: int = 0,
147
+ detector_pad: Union[bool, int] = False,
146
148
  filter_freq_cutoff: float = 0.35,
147
149
  recon_size: Optional[int] = None,
148
150
  recon_mask_radius: Optional[float] = 0.95,
@@ -152,7 +154,7 @@ def FBP3d_tomobar(
152
154
  """
153
155
  Perform Filtered Backprojection (FBP) reconstruction using ASTRA toolbox :cite:`van2016fast` and
154
156
  ToMoBAR :cite:`kazantsev2020tomographic` wrappers.
155
- This is a 3D recon from the CuPy array directly and using a custom built SINC filter for filtration in Fourier space,
157
+ This is a 3D recon from the CuPy array directly and using a custom built SINC filter for filtration in Fourier space,
156
158
  see more in :ref:`method_FBP3d_tomobar`.
157
159
 
158
160
  Parameters
@@ -163,17 +165,18 @@ def FBP3d_tomobar(
163
165
  An array of angles given in radians.
164
166
  center : float, optional
165
167
  The center of rotation (CoR).
166
- detector_pad : int
167
- Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction.
168
+ detector_pad : bool, int
169
+ Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform
170
+ an automated padding or specify a certain value as an integer.
168
171
  filter_freq_cutoff : float
169
- Cutoff frequency parameter for the SINC filter, the lower values may produce better contrast but noisy reconstruction. The filter change will also affect the dynamic range of the reconstructed image.
172
+ Cutoff frequency parameter for the SINC filter, the lower values may produce better contrast but noisy reconstruction. The filter change will also affect the dynamic range of the reconstructed image.
170
173
  recon_size : int, optional
171
174
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
172
175
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
173
176
  recon_mask_radius: float, optional
174
177
  The radius of the circular mask that applies to the reconstructed slice in order to crop
175
178
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
176
- It is recommended to keep the value in the range [0.7-1.0].
179
+ To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
177
180
  neglog: bool
178
181
  Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
179
182
  assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
@@ -206,11 +209,15 @@ def LPRec3d_tomobar(
206
209
  data: cp.ndarray,
207
210
  angles: np.ndarray,
208
211
  center: Optional[float] = None,
209
- detector_pad: int = 0,
212
+ detector_pad: Union[bool, int] = False,
210
213
  filter_type: str = "shepp",
211
214
  filter_freq_cutoff: float = 1.0,
212
215
  recon_size: Optional[int] = None,
213
- recon_mask_radius: Optional[float] = 0.95,
216
+ recon_mask_radius: float = 0.95,
217
+ power_of_2_oversampling: Optional[bool] = True,
218
+ power_of_2_cropping: Optional[bool] = True,
219
+ min_mem_usage_filter: Optional[bool] = False,
220
+ min_mem_usage_ifft2: Optional[bool] = False,
214
221
  neglog: bool = False,
215
222
  ) -> cp.ndarray:
216
223
  """
@@ -226,8 +233,9 @@ def LPRec3d_tomobar(
226
233
  An array of angles given in radians.
227
234
  center : float, optional
228
235
  The center of rotation (CoR).
229
- detector_pad : int
230
- Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction.
236
+ detector_pad : bool, int
237
+ Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform
238
+ an automated padding or specify a certain value as an integer.
231
239
  filter_type : str
232
240
  Filter type, the accepted strings are: none, ramp, shepp, cosine, cosine2, hamming, hann, parzen.
233
241
  filter_freq_cutoff : float
@@ -235,10 +243,10 @@ def LPRec3d_tomobar(
235
243
  recon_size : int, optional
236
244
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
237
245
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
238
- recon_mask_radius: float, optional
246
+ recon_mask_radius: float
239
247
  The radius of the circular mask that applies to the reconstructed slice in order to crop
240
248
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
241
- It is recommended to keep the value in the range [0.7-1.0].
249
+ To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
242
250
  neglog: bool
243
251
  Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
244
252
  assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
@@ -261,6 +269,10 @@ def LPRec3d_tomobar(
261
269
  data_axes_labels_order=input_data_axis_labels,
262
270
  filter_type=filter_type,
263
271
  cutoff_freq=filter_freq_cutoff,
272
+ power_of_2_oversampling=power_of_2_oversampling,
273
+ power_of_2_cropping=power_of_2_cropping,
274
+ min_mem_usage_filter=min_mem_usage_filter,
275
+ min_mem_usage_ifft2=min_mem_usage_ifft2,
264
276
  )
265
277
  cp._default_memory_pool.free_all_blocks()
266
278
  return cp.require(cp.swapaxes(reconstruction, 0, 1), requirements="C")
@@ -271,10 +283,11 @@ def SIRT3d_tomobar(
271
283
  data: cp.ndarray,
272
284
  angles: np.ndarray,
273
285
  center: Optional[float] = None,
274
- detector_pad: int = 0,
286
+ detector_pad: Union[bool, int] = False,
275
287
  recon_size: Optional[int] = None,
276
- iterations: Optional[int] = 300,
277
- nonnegativity: Optional[bool] = True,
288
+ recon_mask_radius: float = 0.95,
289
+ iterations: int = 300,
290
+ nonnegativity: bool = True,
278
291
  neglog: bool = False,
279
292
  gpu_id: int = 0,
280
293
  ) -> cp.ndarray:
@@ -292,19 +305,24 @@ def SIRT3d_tomobar(
292
305
  An array of angles given in radians.
293
306
  center : float, optional
294
307
  The center of rotation (CoR).
295
- detector_pad : int
296
- Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction.
308
+ detector_pad : bool, int
309
+ Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform
310
+ an automated padding or specify a certain value as an integer.
297
311
  recon_size : int, optional
298
312
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
299
313
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
300
- iterations : int, optional
314
+ recon_mask_radius: float
315
+ The radius of the circular mask that applies to the reconstructed slice in order to crop
316
+ out some undesirable artifacts. The values outside the given diameter will be set to zero.
317
+ To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
318
+ iterations : int
301
319
  The number of SIRT iterations.
302
- nonnegativity : bool, optional
320
+ nonnegativity : bool
303
321
  Impose nonnegativity constraint on reconstructed image.
304
322
  neglog: bool
305
323
  Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
306
324
  assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
307
- gpu_id : int, optional
325
+ gpu_id : int
308
326
  A GPU device index to perform operation on.
309
327
 
310
328
  Returns
@@ -331,6 +349,7 @@ def SIRT3d_tomobar(
331
349
  _algorithm_ = {
332
350
  "iterations": iterations,
333
351
  "nonnegativity": nonnegativity,
352
+ "recon_mask_radius": recon_mask_radius,
334
353
  }
335
354
  reconstruction = RecToolsCP.SIRT(_data_, _algorithm_)
336
355
  cp._default_memory_pool.free_all_blocks()
@@ -342,10 +361,11 @@ def CGLS3d_tomobar(
342
361
  data: cp.ndarray,
343
362
  angles: np.ndarray,
344
363
  center: Optional[float] = None,
345
- detector_pad: int = 0,
364
+ detector_pad: Union[bool, int] = False,
346
365
  recon_size: Optional[int] = None,
347
- iterations: Optional[int] = 20,
348
- nonnegativity: Optional[bool] = True,
366
+ recon_mask_radius: float = 0.95,
367
+ iterations: int = 20,
368
+ nonnegativity: bool = True,
349
369
  neglog: bool = False,
350
370
  gpu_id: int = 0,
351
371
  ) -> cp.ndarray:
@@ -363,14 +383,19 @@ def CGLS3d_tomobar(
363
383
  An array of angles given in radians.
364
384
  center : float, optional
365
385
  The center of rotation (CoR).
366
- detector_pad : int
367
- Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction.
386
+ detector_pad : bool, int
387
+ Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform
388
+ an automated padding or specify a certain value as an integer.
368
389
  recon_size : int, optional
369
390
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
370
391
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
371
- iterations : int, optional
392
+ recon_mask_radius: float
393
+ The radius of the circular mask that applies to the reconstructed slice in order to crop
394
+ out some undesirable artifacts. The values outside the given diameter will be set to zero.
395
+ To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
396
+ iterations : int
372
397
  The number of CGLS iterations.
373
- nonnegativity : bool, optional
398
+ nonnegativity : bool
374
399
  Impose nonnegativity constraint on reconstructed image.
375
400
  neglog: bool
376
401
  Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
@@ -393,18 +418,119 @@ def CGLS3d_tomobar(
393
418
  "projection_norm_data": _take_neg_log(data) if neglog else data,
394
419
  "data_axes_labels_order": input_data_axis_labels,
395
420
  } # data dictionary
396
- _algorithm_ = {"iterations": iterations, "nonnegativity": nonnegativity}
421
+ _algorithm_ = {
422
+ "iterations": iterations,
423
+ "nonnegativity": nonnegativity,
424
+ "recon_mask_radius": recon_mask_radius,
425
+ }
397
426
  reconstruction = RecToolsCP.CGLS(_data_, _algorithm_)
398
427
  cp._default_memory_pool.free_all_blocks()
399
428
  return cp.require(cp.swapaxes(reconstruction, 0, 1), requirements="C")
400
429
 
401
430
 
431
+ ## %%%%%%%%%%%%%%%%%%%%%%% FISTA reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
432
+ def FISTA3d_tomobar(
433
+ data: cp.ndarray,
434
+ angles: np.ndarray,
435
+ center: Optional[float] = None,
436
+ detector_pad: Union[bool, int] = False,
437
+ recon_size: Optional[int] = None,
438
+ recon_mask_radius: float = 0.95,
439
+ iterations: int = 20,
440
+ subsets_number: int = 6,
441
+ regularisation_type: str = "PD_TV",
442
+ regularisation_parameter: float = 0.000001,
443
+ regularisation_iterations: int = 50,
444
+ regularisation_half_precision: bool = True,
445
+ nonnegativity: bool = True,
446
+ neglog: bool = False,
447
+ gpu_id: int = 0,
448
+ ) -> cp.ndarray:
449
+ """
450
+ A Fast Iterative Shrinkage-Thresholding Algorithm :cite:`beck2009fast` with various types of regularisation or
451
+ denoising operations :cite:`kazantsev2019ccpi` (currently accepts ROF_TV and PD_TV regularisations only).
452
+
453
+ Parameters
454
+ ----------
455
+ data : cp.ndarray
456
+ Projection data as a CuPy array.
457
+ angles : np.ndarray
458
+ An array of angles given in radians.
459
+ center : float, optional
460
+ The center of rotation (CoR).
461
+ detector_pad : bool, int
462
+ Detector width padding with edge values to remove circle/arc type artifacts in the reconstruction. Set to True to perform
463
+ an automated padding or specify a certain value as an integer.
464
+ recon_size : int, optional
465
+ The [recon_size, recon_size] shape of the reconstructed slice in pixels.
466
+ By default (None), the reconstructed size will be the dimension of the horizontal detector.
467
+ recon_mask_radius: float
468
+ The radius of the circular mask that applies to the reconstructed slice in order to crop
469
+ out some undesirable artifacts. The values outside the given diameter will be set to zero.
470
+ To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
471
+ iterations : int
472
+ The number of FISTA algorithm iterations.
473
+ subsets_number: int
474
+ The number of the ordered subsets to accelerate convergence. Keep the value bellow 10 to avoid divergence.
475
+ regularisation_type: str
476
+ A method to use for regularisation. Currently PD_TV and ROF_TV are available.
477
+ regularisation_parameter: float
478
+ The main regularisation parameter to control the amount of smoothing/noise removal. Larger values lead to stronger smoothing.
479
+ regularisation_iterations: int
480
+ The number of iterations for regularisers (aka INNER iterations).
481
+ regularisation_half_precision: bool
482
+ Perform faster regularisation computation in half-precision with a very minimal sacrifice in quality.
483
+ nonnegativity : bool
484
+ Impose nonnegativity constraint on the reconstructed image.
485
+ neglog: bool
486
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
487
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
488
+ gpu_id : int
489
+ A GPU device index to perform operation on.
490
+
491
+ Returns
492
+ -------
493
+ cp.ndarray
494
+ The FISTA reconstructed volume as a CuPy array.
495
+ """
496
+ data = data_checker(data, verbosity=True, method_name="FISTA3d_tomobar")
497
+
498
+ RecToolsCP = _instantiate_iterative_recon_class(
499
+ data, angles, center, detector_pad, recon_size, gpu_id, datafidelity="LS"
500
+ )
501
+
502
+ _data_ = {
503
+ "projection_norm_data": _take_neg_log(data) if neglog else data,
504
+ "OS_number": subsets_number,
505
+ "data_axes_labels_order": input_data_axis_labels,
506
+ }
507
+ lc = RecToolsCP.powermethod(_data_) # calculate Lipschitz constant (run once)
508
+
509
+ _algorithm_ = {
510
+ "iterations": iterations,
511
+ "lipschitz_const": lc.get(),
512
+ "nonnegativity": nonnegativity,
513
+ "recon_mask_radius": recon_mask_radius,
514
+ }
515
+
516
+ _regularisation_ = {
517
+ "method": regularisation_type, # Selected regularisation method
518
+ "regul_param": regularisation_parameter, # Regularisation parameter
519
+ "iterations": regularisation_iterations, # The number of regularisation iterations
520
+ "half_precision": regularisation_half_precision, # enabling half-precision calculation
521
+ }
522
+
523
+ reconstruction = RecToolsCP.FISTA(_data_, _algorithm_, _regularisation_)
524
+ cp._default_memory_pool.free_all_blocks()
525
+ return cp.require(cp.swapaxes(reconstruction, 0, 1), requirements="C")
526
+
527
+
402
528
  ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
403
529
  def _instantiate_direct_recon_class(
404
530
  data: cp.ndarray,
405
531
  angles: np.ndarray,
406
532
  center: Optional[float] = None,
407
- detector_pad: int = 0,
533
+ detector_pad: Union[bool, int] = False,
408
534
  recon_size: Optional[int] = None,
409
535
  gpu_id: int = 0,
410
536
  ) -> Type:
@@ -414,7 +540,7 @@ def _instantiate_direct_recon_class(
414
540
  data (cp.ndarray): data array
415
541
  angles (np.ndarray): angles
416
542
  center (Optional[float], optional): center of recon. Defaults to None.
417
- detector_pad (int): Detector width padding. Defaults to 0.
543
+ detector_pad : (Union[bool, int]) : Detector width padding. Defaults to False.
418
544
  recon_size (Optional[int], optional): recon_size. Defaults to None.
419
545
  gpu_id (int, optional): gpu ID. Defaults to 0.
420
546
 
@@ -425,6 +551,10 @@ def _instantiate_direct_recon_class(
425
551
  center = data.shape[2] // 2 # making a crude guess
426
552
  if recon_size is None:
427
553
  recon_size = data.shape[2]
554
+ if detector_pad is True:
555
+ detector_pad = __estimate_detectorHoriz_padding(data.shape[2])
556
+ elif detector_pad is False:
557
+ detector_pad = 0
428
558
  RecToolsCP = RecToolsDIRCuPy(
429
559
  DetectorsDimH=data.shape[2], # Horizontal detector dimension
430
560
  DetectorsDimH_pad=detector_pad, # padding for horizontal detector
@@ -444,7 +574,7 @@ def _instantiate_direct_recon2d_class(
444
574
  data: np.ndarray,
445
575
  angles: np.ndarray,
446
576
  center: Optional[float] = None,
447
- detector_pad: int = 0,
577
+ detector_pad: Union[bool, int] = False,
448
578
  recon_size: Optional[int] = None,
449
579
  gpu_id: int = 0,
450
580
  ) -> Type:
@@ -454,7 +584,7 @@ def _instantiate_direct_recon2d_class(
454
584
  data (cp.ndarray): data array
455
585
  angles (np.ndarray): angles
456
586
  center (Optional[float], optional): center of recon. Defaults to None.
457
- detector_pad (int): Detector width padding. Defaults to 0.
587
+ detector_pad : (Union[bool, int]) : Detector width padding. Defaults to False.
458
588
  recon_size (Optional[int], optional): recon_size. Defaults to None.
459
589
  gpu_id (int, optional): gpu ID. Defaults to 0.
460
590
 
@@ -465,6 +595,10 @@ def _instantiate_direct_recon2d_class(
465
595
  center = data.shape[2] // 2 # making a crude guess
466
596
  if recon_size is None:
467
597
  recon_size = data.shape[2]
598
+ if detector_pad is True:
599
+ detector_pad = __estimate_detectorHoriz_padding(data.shape[2])
600
+ elif detector_pad is False:
601
+ detector_pad = 0
468
602
  RecTools = RecToolsDIR(
469
603
  DetectorsDimH=data.shape[2], # Horizontal detector dimension
470
604
  DetectorsDimH_pad=detector_pad, # padding for horizontal detector
@@ -483,7 +617,7 @@ def _instantiate_iterative_recon_class(
483
617
  data: cp.ndarray,
484
618
  angles: np.ndarray,
485
619
  center: Optional[float] = None,
486
- detector_pad: int = 0,
620
+ detector_pad: Union[bool, int] = False,
487
621
  recon_size: Optional[int] = None,
488
622
  gpu_id: int = 0,
489
623
  datafidelity: str = "LS",
@@ -494,7 +628,7 @@ def _instantiate_iterative_recon_class(
494
628
  data (cp.ndarray): data array
495
629
  angles (np.ndarray): angles
496
630
  center (Optional[float], optional): center of recon. Defaults to None.
497
- detector_pad (int): Detector width padding. Defaults to 0.
631
+ detector_pad : (Union[bool, int]) : Detector width padding. Defaults to False.
498
632
  recon_size (Optional[int], optional): recon_size. Defaults to None.
499
633
  datafidelity (str, optional): Data fidelity
500
634
  gpu_id (int, optional): gpu ID. Defaults to 0.
@@ -506,6 +640,10 @@ def _instantiate_iterative_recon_class(
506
640
  center = data.shape[2] // 2 # making a crude guess
507
641
  if recon_size is None:
508
642
  recon_size = data.shape[2]
643
+ if detector_pad is True:
644
+ detector_pad = __estimate_detectorHoriz_padding(data.shape[2])
645
+ elif detector_pad is False:
646
+ detector_pad = 0
509
647
  RecToolsCP = RecToolsIRCuPy(
510
648
  DetectorsDimH=data.shape[2], # Horizontal detector dimension
511
649
  DetectorsDimH_pad=detector_pad, # padding for horizontal detector
@@ -537,3 +675,10 @@ def _take_neg_log_np(data: np.ndarray) -> np.ndarray:
537
675
  data[np.isnan(data)] = 6.0
538
676
  data[np.isinf(data)] = 0
539
677
  return data
678
+
679
+
680
+ def __estimate_detectorHoriz_padding(detX_size) -> int:
681
+ det_half = detX_size // 2
682
+ padded_value_exact = int(np.sqrt(2 * (det_half**2))) - det_half
683
+ padded_add_margin = int(0.1 * padded_value_exact)
684
+ return padded_value_exact + padded_add_margin
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: httomolibgpu
3
- Version: 3.0
3
+ Version: 3.1.1
4
4
  Summary: Commonly used tomography data processing methods at DLS.
5
5
  Author-email: Daniil Kazantsev <daniil.kazantsev@diamond.ac.uk>, Yousef Moazzam <yousef.moazzam@diamond.ac.uk>, Naman Gera <naman.gera@diamond.ac.uk>
6
6
  License: BSD-3-Clause
@@ -19,7 +19,6 @@ Requires-Dist: scipy
19
19
  Requires-Dist: pillow
20
20
  Requires-Dist: scikit-image
21
21
  Requires-Dist: tomobar
22
- Requires-Dist: ccpi-regularisation-cupy
23
22
  Provides-Extra: dev
24
23
  Requires-Dist: pytest; extra == "dev"
25
24
  Requires-Dist: pytest-cov; extra == "dev"
@@ -14,7 +14,6 @@ httomolibgpu/cuda_kernels/calc_metrics.cu
14
14
  httomolibgpu/cuda_kernels/center_360_shifts.cu
15
15
  httomolibgpu/cuda_kernels/generate_mask.cu
16
16
  httomolibgpu/cuda_kernels/median_kernel.cu
17
- httomolibgpu/cuda_kernels/paganin_filter_gen.cu
18
17
  httomolibgpu/cuda_kernels/raven_filter.cu
19
18
  httomolibgpu/cuda_kernels/remove_nan_inf.cu
20
19
  httomolibgpu/misc/__init__.py
@@ -5,7 +5,6 @@ scipy
5
5
  pillow
6
6
  scikit-image
7
7
  tomobar
8
- ccpi-regularisation-cupy
9
8
 
10
9
  [dev]
11
10
  pytest
@@ -44,7 +44,6 @@ dependencies = [
44
44
  "pillow",
45
45
  "scikit-image",
46
46
  "tomobar",
47
- "ccpi-regularisation-cupy",
48
47
  ]
49
48
 
50
49
  [project.optional-dependencies]
@@ -1,37 +0,0 @@
1
- #include <cupy/complex.cuh>
2
-
3
- #ifndef M_PI
4
- #define M_PI 3.1415926535897932384626433832795f
5
- #endif
6
-
7
- extern "C" __global__ void
8
- paganin_filter_gen(int width1, int height1, float resolution, float wavelength,
9
- float distance, float ratio, complex<float> *filtercomplex) {
10
- int px = threadIdx.x + blockIdx.x * blockDim.x;
11
- int py = threadIdx.y + blockIdx.y * blockDim.y;
12
- if (px >= width1)
13
- return;
14
- if (py >= height1)
15
- return;
16
-
17
- float dpx = 1.0f / (width1 * resolution);
18
- float dpy = 1.0f / (height1 * resolution);
19
- int centerx = (width1 + 1) / 2 - 1;
20
- int centery = (height1 + 1) / 2 - 1;
21
-
22
- float pxx = (px - centerx) * dpx;
23
- float pyy = (py - centery) * dpy;
24
- float pd = (pxx * pxx + pyy * pyy) * wavelength * distance * M_PI;
25
- ;
26
- float filter1 = 1.0f + ratio * pd;
27
-
28
- complex<float> value = 1.0f / complex<float>(filter1, filter1);
29
-
30
- // ifftshifting positions
31
- int xshift = (width1 + 1) / 2;
32
- int yshift = (height1 + 1) / 2;
33
- int outX = (px + xshift) % width1;
34
- int outY = (py + yshift) % height1;
35
-
36
- filtercomplex[outY * width1 + outX] = value;
37
- }
File without changes
File without changes
File without changes
File without changes