httomolibgpu 2.4.0__tar.gz → 2.5.1__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. {httomolibgpu-2.4.0/httomolibgpu.egg-info → httomolibgpu-2.5.1}/PKG-INFO +20 -17
  2. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/README.rst +16 -15
  3. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/__init__.py +8 -1
  4. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/misc/morph.py +1 -1
  5. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/prep/normalize.py +13 -4
  6. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/prep/stripe.py +14 -54
  7. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/recon/algorithm.py +154 -24
  8. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1/httomolibgpu.egg-info}/PKG-INFO +20 -17
  9. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu.egg-info/requires.txt +1 -0
  10. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/pyproject.toml +1 -0
  11. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/LICENSE +0 -0
  12. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/MANIFEST.in +0 -0
  13. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cuda_kernels/__init__.py +0 -0
  14. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cuda_kernels/calc_metrics.cu +0 -0
  15. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cuda_kernels/center_360_shifts.cu +0 -0
  16. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cuda_kernels/generate_mask.cu +0 -0
  17. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cuda_kernels/median_kernel.cu +0 -0
  18. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cuda_kernels/paganin_filter_gen.cu +0 -0
  19. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cuda_kernels/raven_filter.cu +0 -0
  20. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/cupywrapper.py +0 -0
  21. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/misc/__init__.py +0 -0
  22. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/misc/corr.py +0 -0
  23. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/misc/denoise.py +0 -0
  24. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/misc/rescale.py +0 -0
  25. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/prep/__init__.py +0 -0
  26. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/prep/alignment.py +0 -0
  27. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/prep/phase.py +0 -0
  28. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/recon/__init__.py +0 -0
  29. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu/recon/rotation.py +0 -0
  30. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu.egg-info/SOURCES.txt +0 -0
  31. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu.egg-info/dependency_links.txt +0 -0
  32. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/httomolibgpu.egg-info/top_level.txt +0 -0
  33. {httomolibgpu-2.4.0 → httomolibgpu-2.5.1}/setup.cfg +0 -0
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: httomolibgpu
3
- Version: 2.4.0
3
+ Version: 2.5.1
4
4
  Summary: Commonly used tomography data processing methods at DLS.
5
5
  Author-email: Daniil Kazantsev <daniil.kazantsev@diamond.ac.uk>, Yousef Moazzam <yousef.moazzam@diamond.ac.uk>, Naman Gera <naman.gera@diamond.ac.uk>
6
6
  License: BSD-3-Clause
@@ -19,6 +19,7 @@ Requires-Dist: scipy
19
19
  Requires-Dist: pillow
20
20
  Requires-Dist: scikit-image
21
21
  Requires-Dist: tomobar
22
+ Requires-Dist: ccpi-regularisation-cupy
22
23
  Provides-Extra: dev
23
24
  Requires-Dist: pytest; extra == "dev"
24
25
  Requires-Dist: pytest-cov; extra == "dev"
@@ -32,6 +33,7 @@ Requires-Dist: imageio; extra == "dev"
32
33
  Requires-Dist: h5py; extra == "dev"
33
34
  Requires-Dist: pre-commit; extra == "dev"
34
35
  Requires-Dist: pyfftw; extra == "dev"
36
+ Dynamic: license-file
35
37
 
36
38
  HTTomolibGPU is a library of GPU accelerated methods for tomography
37
39
  --------------------------------------------------------------------
@@ -49,19 +51,28 @@ Although **HTTomolibGPU** can be used as a stand-alone library, it has been spec
49
51
  its backend for data processing. HTTomo is a user interface (UI) written in Python for fast big tomographic data processing using
50
52
  MPI protocols or as well serially.
51
53
 
52
- Install HTTomolibGPU as a PyPi package
53
- =========================================================
54
+ Installation
55
+ ============
56
+
57
+ HTTomolibGPU is available on PyPI, so it can be installed into either a virtual environment or
58
+ a conda environment.
59
+
60
+ Virtual environment
61
+ ~~~~~~~~~~~~~~~~~~~
54
62
  .. code-block:: console
55
63
 
64
+ $ python -m venv httomolibgpu
65
+ $ source httomolibgpu/bin/activate
56
66
  $ pip install httomolibgpu
57
67
 
58
- Install HTTomolibGPU as a pre-built conda Python package
59
- =========================================================
68
+ Conda environment
69
+ ~~~~~~~~~~~~~~~~~
60
70
  .. code-block:: console
61
71
 
62
72
  $ conda create --name httomolibgpu # create a fresh conda environment
63
73
  $ conda activate httomolibgpu # activate the environment
64
- $ conda install -c httomo httomolibgpu -c conda-forge # for linux users
74
+ $ conda install -c conda-forge cupy==12.3.0 # for linux users
75
+ $ pip install httomolibgpu
65
76
 
66
77
  Setup the development environment:
67
78
  ==================================
@@ -69,14 +80,6 @@ Setup the development environment:
69
80
  .. code-block:: console
70
81
 
71
82
  $ git clone git@github.com:DiamondLightSource/httomolibgpu.git # clone the repo
72
- $ conda env create --name httomolibgpu --file conda/environment.yml # install dependencies
83
+ $ conda env create --name httomolibgpu -c conda-forge cupy==12.3.0 # install dependencies
73
84
  $ conda activate httomolibgpu # activate the environment
74
- $ pip install -e .[dev] # editable/development mode
75
-
76
- Build HTTomolibGPU as a conda Python package
77
- ============================================
78
-
79
- .. code-block:: console
80
-
81
- $ conda build conda/recipe/ -c conda-forge -c httomo
82
-
85
+ $ pip install -e ./httomolibgpu[dev] # editable/development mode
@@ -14,19 +14,28 @@ Although **HTTomolibGPU** can be used as a stand-alone library, it has been spec
14
14
  its backend for data processing. HTTomo is a user interface (UI) written in Python for fast big tomographic data processing using
15
15
  MPI protocols or as well serially.
16
16
 
17
- Install HTTomolibGPU as a PyPi package
18
- =========================================================
17
+ Installation
18
+ ============
19
+
20
+ HTTomolibGPU is available on PyPI, so it can be installed into either a virtual environment or
21
+ a conda environment.
22
+
23
+ Virtual environment
24
+ ~~~~~~~~~~~~~~~~~~~
19
25
  .. code-block:: console
20
26
 
27
+ $ python -m venv httomolibgpu
28
+ $ source httomolibgpu/bin/activate
21
29
  $ pip install httomolibgpu
22
30
 
23
- Install HTTomolibGPU as a pre-built conda Python package
24
- =========================================================
31
+ Conda environment
32
+ ~~~~~~~~~~~~~~~~~
25
33
  .. code-block:: console
26
34
 
27
35
  $ conda create --name httomolibgpu # create a fresh conda environment
28
36
  $ conda activate httomolibgpu # activate the environment
29
- $ conda install -c httomo httomolibgpu -c conda-forge # for linux users
37
+ $ conda install -c conda-forge cupy==12.3.0 # for linux users
38
+ $ pip install httomolibgpu
30
39
 
31
40
  Setup the development environment:
32
41
  ==================================
@@ -34,14 +43,6 @@ Setup the development environment:
34
43
  .. code-block:: console
35
44
 
36
45
  $ git clone git@github.com:DiamondLightSource/httomolibgpu.git # clone the repo
37
- $ conda env create --name httomolibgpu --file conda/environment.yml # install dependencies
46
+ $ conda env create --name httomolibgpu -c conda-forge cupy==12.3.0 # install dependencies
38
47
  $ conda activate httomolibgpu # activate the environment
39
- $ pip install -e .[dev] # editable/development mode
40
-
41
- Build HTTomolibGPU as a conda Python package
42
- ============================================
43
-
44
- .. code-block:: console
45
-
46
- $ conda build conda/recipe/ -c conda-forge -c httomo
47
-
48
+ $ pip install -e ./httomolibgpu[dev] # editable/development mode
@@ -11,5 +11,12 @@ from httomolibgpu.prep.stripe import (
11
11
  remove_all_stripe,
12
12
  )
13
13
 
14
- from httomolibgpu.recon.algorithm import FBP, LPRec, SIRT, CGLS
14
+ from httomolibgpu.recon.algorithm import (
15
+ FBP2d_astra,
16
+ FBP3d_tomobar,
17
+ LPRec3d_tomobar,
18
+ SIRT3d_tomobar,
19
+ CGLS3d_tomobar,
20
+ )
21
+
15
22
  from httomolibgpu.recon.rotation import find_center_vo, find_center_360, find_center_pc
@@ -112,7 +112,7 @@ def data_resampler(
112
112
 
113
113
  Parameters
114
114
  ----------
115
- data : cp.ndarray
115
+ data : cp.ndarray
116
116
  3d cupy array.
117
117
  newshape : list
118
118
  2d list that defines the 2D slice shape of new shape data.
@@ -43,6 +43,8 @@ def normalize(
43
43
  data: cp.ndarray,
44
44
  flats: cp.ndarray,
45
45
  darks: cp.ndarray,
46
+ flats_multiplier: float = 1.0,
47
+ darks_multiplier: float = 1.0,
46
48
  cutoff: float = 10.0,
47
49
  minus_log: bool = True,
48
50
  nonnegativity: bool = False,
@@ -60,13 +62,17 @@ def normalize(
60
62
  3D flat field data as a CuPy array.
61
63
  darks : cp.ndarray
62
64
  3D dark field data as a CuPy array.
63
- cutoff : float, optional
65
+ flats_multiplier: float
66
+ A multiplier to apply to flats, can work as an intensity compensation constant.
67
+ darks_multiplier: float
68
+ A multiplier to apply to darks, can work as an intensity compensation constant.
69
+ cutoff : float
64
70
  Permitted maximum value for the normalised data.
65
- minus_log : bool, optional
71
+ minus_log : bool
66
72
  Apply negative log to the normalised data.
67
- nonnegativity : bool, optional
73
+ nonnegativity : bool
68
74
  Remove negative values in the normalised data.
69
- remove_nans : bool, optional
75
+ remove_nans : bool
70
76
  Remove NaN and Inf values in the normalised data.
71
77
 
72
78
  Returns
@@ -82,6 +88,9 @@ def normalize(
82
88
  mean(darks, axis=0, dtype=float32, out=dark0)
83
89
  mean(flats, axis=0, dtype=float32, out=flat0)
84
90
 
91
+ dark0 *= darks_multiplier
92
+ flat0 *= flats_multiplier
93
+
85
94
  kernel_name = "normalisation"
86
95
  kernel = r"""
87
96
  float denom = float(flats) - float(darks);
@@ -58,7 +58,7 @@ def remove_stripe_based_sorting(
58
58
  ) -> Union[cp.ndarray, np.ndarray]:
59
59
  """
60
60
  Remove full and partial stripe artifacts from sinogram using Nghia Vo's
61
- approach, see :cite:`vo2018superior`. This algorithm works particularly
61
+ approach, see :ref:`method_remove_stripe_based_sorting` and :cite:`vo2018superior`. This algorithm works particularly
62
62
  well for removing partial stripes.
63
63
 
64
64
  Steps of the algorithm: 1. Sort each column of the sinogram by its grayscale values.
@@ -119,7 +119,7 @@ def remove_stripe_ti(
119
119
  ) -> Union[cp.ndarray, np.ndarray]:
120
120
  """
121
121
  Removes stripes with the method of V. Titarenko (TomoCuPy implementation).
122
- See :cite:`titarenko2010analytical`.
122
+ See :ref:`method_remove_stripe_ti` and :cite:`titarenko2010analytical`.
123
123
 
124
124
  Parameters
125
125
  ----------
@@ -179,7 +179,7 @@ def remove_all_stripe(
179
179
  ) -> cp.ndarray:
180
180
  """
181
181
  Remove all types of stripe artifacts from sinogram using Nghia Vo's
182
- approach, see :cite:`vo2018superior` (combination of algorithm 3,4,5, and 6).
182
+ approach, see :ref:`method_remove_all_stripe` and :cite:`vo2018superior` (combination of algorithm 3,4,5, and 6).
183
183
 
184
184
  Parameters
185
185
  ----------
@@ -205,40 +205,12 @@ def remove_all_stripe(
205
205
  for m in range(data.shape[1]):
206
206
  sino = data[:, m, :]
207
207
  sino = _rs_dead(sino, snr, la_size, matindex)
208
- sino = _rs_sort2(sino, sm_size, matindex, dim)
208
+ sino = _rs_sort(sino, sm_size, dim)
209
209
  sino = cp.nan_to_num(sino)
210
210
  data[:, m, :] = sino
211
211
  return data
212
212
 
213
213
 
214
- def _rs_sort2(sinogram, size, matindex, dim):
215
- """
216
- Remove stripes using the sorting technique.
217
- """
218
- sinogram = cp.transpose(sinogram)
219
- matcomb = cp.asarray(cp.dstack((matindex, sinogram)))
220
-
221
- # matsort = cp.asarray([row[row[:, 1].argsort()] for row in matcomb])
222
- ids = cp.argsort(matcomb[:, :, 1], axis=1)
223
- matsort = matcomb.copy()
224
- matsort[:, :, 0] = cp.take_along_axis(matsort[:, :, 0], ids, axis=1)
225
- matsort[:, :, 1] = cp.take_along_axis(matsort[:, :, 1], ids, axis=1)
226
- if dim == 1:
227
- matsort[:, :, 1] = median_filter(matsort[:, :, 1], (size, 1))
228
- else:
229
- matsort[:, :, 1] = median_filter(matsort[:, :, 1], (size, size))
230
-
231
- # matsortback = cp.asarray([row[row[:, 0].argsort()] for row in matsort])
232
-
233
- ids = cp.argsort(matsort[:, :, 0], axis=1)
234
- matsortback = matsort.copy()
235
- matsortback[:, :, 0] = cp.take_along_axis(matsortback[:, :, 0], ids, axis=1)
236
- matsortback[:, :, 1] = cp.take_along_axis(matsortback[:, :, 1], ids, axis=1)
237
-
238
- sino_corrected = matsortback[:, :, 1]
239
- return cp.transpose(sino_corrected)
240
-
241
-
242
214
  def _mpolyfit(x, y):
243
215
  n = len(x)
244
216
  x_mean = cp.mean(x)
@@ -261,8 +233,6 @@ def _detect_stripe(listdata, snr):
261
233
  listsorted = cp.sort(listdata)[::-1]
262
234
  xlist = cp.arange(0, numdata, 1.0)
263
235
  ndrop = cp.int16(0.25 * numdata)
264
- # (_slope, _intercept) = cp.polyfit(xlist[ndrop:-ndrop - 1],
265
- # listsorted[ndrop:-ndrop - 1], 1)
266
236
  (_slope, _intercept) = _mpolyfit(
267
237
  xlist[ndrop : -ndrop - 1], listsorted[ndrop : -ndrop - 1]
268
238
  )
@@ -293,11 +263,6 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
293
263
  sinosmooth = median_filter(sinosort, (1, size))
294
264
  list1 = cp.mean(sinosort[ndrop : nrow - ndrop], axis=0)
295
265
  list2 = cp.mean(sinosmooth[ndrop : nrow - ndrop], axis=0)
296
- # listfact = cp.divide(list1,
297
- # list2,
298
- # out=cp.ones_like(list1),
299
- # where=list2 != 0)
300
-
301
266
  listfact = list1 / list2
302
267
 
303
268
  # Locate stripes
@@ -310,14 +275,12 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
310
275
  sinogram1 = cp.transpose(sinogram)
311
276
  matcombine = cp.asarray(cp.dstack((matindex, sinogram1)))
312
277
 
313
- # matsort = cp.asarray([row[row[:, 1].argsort()] for row in matcombine])
314
278
  ids = cp.argsort(matcombine[:, :, 1], axis=1)
315
279
  matsort = matcombine.copy()
316
280
  matsort[:, :, 0] = cp.take_along_axis(matsort[:, :, 0], ids, axis=1)
317
281
  matsort[:, :, 1] = cp.take_along_axis(matsort[:, :, 1], ids, axis=1)
318
282
 
319
283
  matsort[:, :, 1] = cp.transpose(sinosmooth)
320
- # matsortback = cp.asarray([row[row[:, 0].argsort()] for row in matsort])
321
284
  ids = cp.argsort(matsort[:, :, 0], axis=1)
322
285
  matsortback = matsort.copy()
323
286
  matsortback[:, :, 0] = cp.take_along_axis(matsortback[:, :, 0], ids, axis=1)
@@ -330,12 +293,9 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
330
293
 
331
294
 
332
295
  def _rs_dead(sinogram, snr, size, matindex, norm=True):
333
- """
334
- Remove unresponsive and fluctuating stripes.
335
- """
296
+ """remove unresponsive and fluctuating stripes"""
336
297
  sinogram = cp.copy(sinogram) # Make it mutable
337
298
  (nrow, _) = sinogram.shape
338
- # sinosmooth = cp.apply_along_axis(uniform_filter1d, 0, sinogram, 10)
339
299
  sinosmooth = uniform_filter1d(sinogram, 10, axis=0)
340
300
 
341
301
  listdiff = cp.sum(cp.abs(sinogram - sinosmooth), axis=0)
@@ -344,22 +304,22 @@ def _rs_dead(sinogram, snr, size, matindex, norm=True):
344
304
  listfact = listdiff / listdiffbck
345
305
 
346
306
  listmask = _detect_stripe(listfact, snr)
307
+ del listfact
347
308
  listmask = binary_dilation(listmask, iterations=1).astype(listmask.dtype)
348
309
  listmask[0:2] = 0.0
349
310
  listmask[-2:] = 0.0
350
- listx = cp.where(listmask < 1.0)[0]
351
- listy = cp.arange(nrow)
352
- matz = sinogram[:, listx]
353
311
 
312
+ listx = cp.where(listmask < 1.0)[0]
354
313
  listxmiss = cp.where(listmask > 0.0)[0]
314
+ del listmask
355
315
 
356
- # finter = interpolate.interp2d(listx.get(), listy.get(), matz.get(), kind='linear')
357
316
  if len(listxmiss) > 0:
358
- # sinogram_c[:, listxmiss.get()] = finter(listxmiss.get(), listy.get())
359
317
  ids = cp.searchsorted(listx, listxmiss)
360
- sinogram[:, listxmiss] = matz[:, ids - 1] + (listxmiss - listx[ids - 1]) * (
361
- matz[:, ids] - matz[:, ids - 1]
362
- ) / (listx[ids] - listx[ids - 1])
318
+ weights = (listxmiss - listx[ids - 1]) / (listx[ids] - listx[ids - 1])
319
+ # direct interpolation without making an extra copy
320
+ sinogram[:, listxmiss] = sinogram[:, listx[ids - 1]] + weights * (
321
+ sinogram[:, listx[ids]] - sinogram[:, listx[ids - 1]]
322
+ )
363
323
 
364
324
  # Remove residual stripes
365
325
  if norm is True:
@@ -455,7 +415,7 @@ def raven_filter(
455
415
  # Removing padding
456
416
  data = data[pad_y : height - pad_y, :, pad_x : width - pad_x].real
457
417
 
458
- return data
418
+ return cp.require(data, requirements="C")
459
419
 
460
420
 
461
421
  def _create_matindex(nrow, ncol):
@@ -29,9 +29,11 @@ cupy_run = cupywrapper.cupy_run
29
29
  from unittest.mock import Mock
30
30
 
31
31
  if cupy_run:
32
+ from tomobar.methodsDIR import RecToolsDIR
32
33
  from tomobar.methodsDIR_CuPy import RecToolsDIRCuPy
33
34
  from tomobar.methodsIR_CuPy import RecToolsIRCuPy
34
35
  else:
36
+ RecToolsDIR = Mock()
35
37
  RecToolsDIRCuPy = Mock()
36
38
  RecToolsIRCuPy = Mock()
37
39
 
@@ -40,23 +42,103 @@ from typing import Optional, Type
40
42
 
41
43
 
42
44
  __all__ = [
43
- "FBP",
44
- "LPRec",
45
- "SIRT",
46
- "CGLS",
45
+ "FBP2d_astra",
46
+ "FBP3d_tomobar",
47
+ "LPRec3d_tomobar",
48
+ "SIRT3d_tomobar",
49
+ "CGLS3d_tomobar",
47
50
  ]
48
51
 
49
52
  input_data_axis_labels = ["angles", "detY", "detX"] # set the labels of the input data
50
53
 
51
54
 
55
+ ## %%%%%%%%%%%%%%%%%%%%%%% FBP2d_astra reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
56
+ def FBP2d_astra(
57
+ data: np.ndarray,
58
+ angles: np.ndarray,
59
+ center: Optional[float] = None,
60
+ filter_type: str = "ram-lak",
61
+ filter_parameter: Optional[float] = None,
62
+ filter_d: Optional[float] = None,
63
+ recon_size: Optional[int] = None,
64
+ recon_mask_radius: float = 0.95,
65
+ neglog: bool = False,
66
+ gpu_id: int = 0,
67
+ ) -> np.ndarray:
68
+ """
69
+ Perform Filtered Backprojection (FBP) reconstruction slice-by-slice (2d) using ASTRA toolbox :cite:`van2016fast` and
70
+ ToMoBAR :cite:`kazantsev2020tomographic` wrappers.
71
+ This is a 2D recon using ASTRA's API for the FBP method, see for more parameters ASTRA's documentation here:
72
+ https://astra-toolbox.com/docs/algs/FBP_CUDA.html.
73
+
74
+ Parameters`
75
+ ----------
76
+ data : np.ndarray
77
+ Projection data as a 3d numpy array.
78
+ angles : np.ndarray
79
+ An array of angles given in radians.
80
+ center : float, optional
81
+ The center of rotation (CoR).
82
+ filter_type: str
83
+ Type of projection filter, see ASTRA's API for all available options for filters.
84
+ filter_parameter: float, optional
85
+ Parameter value for the 'tukey', 'gaussian', 'blackman' and 'kaiser' filter types.
86
+ filter_d: float, optional
87
+ D parameter value for 'shepp-logan', 'cosine', 'hamming' and 'hann' filter types.
88
+ recon_size : int, optional
89
+ The [recon_size, recon_size] shape of the reconstructed slice in pixels.
90
+ By default (None), the reconstructed size will be the dimension of the horizontal detector.
91
+ recon_mask_radius: float
92
+ The radius of the circular mask that applies to the reconstructed slice in order to crop
93
+ out some undesirable artifacts. The values outside the given diameter will be set to zero.
94
+ It is recommended to keep the value in the range [0.7-1.0].
95
+ neglog: bool
96
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
97
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
98
+ gpu_id : int
99
+ A GPU device index to perform operation on.
100
+
101
+ Returns
102
+ -------
103
+ np.ndarray
104
+ The FBP reconstructed volume as a numpy array.
105
+ """
106
+ data_shape = np.shape(data)
107
+ if recon_size is None:
108
+ recon_size = data_shape[2]
109
+
110
+ RecTools = _instantiate_direct_recon2d_class(
111
+ data, angles, center, recon_size, gpu_id
112
+ )
113
+
114
+ detY_size = data_shape[1]
115
+ reconstruction = np.empty(
116
+ (recon_size, detY_size, recon_size), dtype=np.float32(), order="C"
117
+ )
118
+ _take_neg_log_np(data) if neglog else data
119
+
120
+ # loop over detY slices
121
+ for slice_index in range(0, detY_size):
122
+ reconstruction[:, slice_index, :] = np.flipud(
123
+ RecTools.FBP(
124
+ data[:, slice_index, :],
125
+ filter_type=filter_type,
126
+ filter_parameter=filter_parameter,
127
+ filter_d=filter_d,
128
+ recon_mask_radius=recon_mask_radius,
129
+ )
130
+ )
131
+ return reconstruction
132
+
133
+
52
134
  ## %%%%%%%%%%%%%%%%%%%%%%% FBP reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
53
- def FBP(
135
+ def FBP3d_tomobar(
54
136
  data: cp.ndarray,
55
137
  angles: np.ndarray,
56
138
  center: Optional[float] = None,
57
139
  filter_freq_cutoff: float = 0.35,
58
140
  recon_size: Optional[int] = None,
59
- recon_mask_radius: float = 0.95,
141
+ recon_mask_radius: Optional[float] = 0.95,
60
142
  neglog: bool = False,
61
143
  gpu_id: int = 0,
62
144
  ) -> cp.ndarray:
@@ -68,7 +150,7 @@ def FBP(
68
150
  Parameters
69
151
  ----------
70
152
  data : cp.ndarray
71
- Projection data as a CuPy array.
153
+ Projection data as a 3d CuPy array.
72
154
  angles : np.ndarray
73
155
  An array of angles given in radians.
74
156
  center : float, optional
@@ -78,20 +160,20 @@ def FBP(
78
160
  recon_size : int, optional
79
161
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
80
162
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
81
- recon_mask_radius: float
163
+ recon_mask_radius: float, optional
82
164
  The radius of the circular mask that applies to the reconstructed slice in order to crop
83
165
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
84
166
  It is recommended to keep the value in the range [0.7-1.0].
85
167
  neglog: bool
86
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
87
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
168
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
169
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
88
170
  gpu_id : int
89
171
  A GPU device index to perform operation on.
90
172
 
91
173
  Returns
92
174
  -------
93
175
  cp.ndarray
94
- The FBP reconstructed volume as a CuPy array.
176
+ FBP reconstructed volume as a CuPy array.
95
177
  """
96
178
  RecToolsCP = _instantiate_direct_recon_class(
97
179
  data, angles, center, recon_size, gpu_id
@@ -108,7 +190,7 @@ def FBP(
108
190
 
109
191
 
110
192
  ## %%%%%%%%%%%%%%%%%%%%%%% LPRec %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
111
- def LPRec(
193
+ def LPRec3d_tomobar(
112
194
  data: cp.ndarray,
113
195
  angles: np.ndarray,
114
196
  center: Optional[float] = None,
@@ -124,7 +206,7 @@ def LPRec(
124
206
  Parameters
125
207
  ----------
126
208
  data : cp.ndarray
127
- Projection data as a CuPy array.
209
+ Projection data as a 3d CuPy array.
128
210
  angles : np.ndarray
129
211
  An array of angles given in radians.
130
212
  center : float, optional
@@ -132,13 +214,13 @@ def LPRec(
132
214
  recon_size : int, optional
133
215
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
134
216
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
135
- recon_mask_radius: float
217
+ recon_mask_radius: float, optional
136
218
  The radius of the circular mask that applies to the reconstructed slice in order to crop
137
219
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
138
220
  It is recommended to keep the value in the range [0.7-1.0].
139
221
  neglog: bool
140
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
141
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
222
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
223
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
142
224
 
143
225
  Returns
144
226
  -------
@@ -157,7 +239,7 @@ def LPRec(
157
239
 
158
240
 
159
241
  ## %%%%%%%%%%%%%%%%%%%%%%% SIRT reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
160
- def SIRT(
242
+ def SIRT3d_tomobar(
161
243
  data: cp.ndarray,
162
244
  angles: np.ndarray,
163
245
  center: Optional[float] = None,
@@ -189,8 +271,8 @@ def SIRT(
189
271
  nonnegativity : bool, optional
190
272
  Impose nonnegativity constraint on reconstructed image.
191
273
  neglog: bool
192
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
193
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
274
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
275
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
194
276
  gpu_id : int, optional
195
277
  A GPU device index to perform operation on.
196
278
 
@@ -222,7 +304,7 @@ def SIRT(
222
304
 
223
305
 
224
306
  ## %%%%%%%%%%%%%%%%%%%%%%% CGLS reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
225
- def CGLS(
307
+ def CGLS3d_tomobar(
226
308
  data: cp.ndarray,
227
309
  angles: np.ndarray,
228
310
  center: Optional[float] = None,
@@ -254,8 +336,8 @@ def CGLS(
254
336
  nonnegativity : bool, optional
255
337
  Impose nonnegativity constraint on reconstructed image.
256
338
  neglog: bool
257
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
258
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
339
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
340
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
259
341
  gpu_id : int, optional
260
342
  A GPU device index to perform operation on.
261
343
 
@@ -277,6 +359,7 @@ def CGLS(
277
359
  cp._default_memory_pool.free_all_blocks()
278
360
  return cp.require(cp.swapaxes(reconstruction, 0, 1), requirements="C")
279
361
 
362
+
280
363
  ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
281
364
  def _instantiate_direct_recon_class(
282
365
  data: cp.ndarray,
@@ -314,6 +397,43 @@ def _instantiate_direct_recon_class(
314
397
  return RecToolsCP
315
398
 
316
399
 
400
+ ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
401
+ def _instantiate_direct_recon2d_class(
402
+ data: np.ndarray,
403
+ angles: np.ndarray,
404
+ center: Optional[float] = None,
405
+ recon_size: Optional[int] = None,
406
+ gpu_id: int = 0,
407
+ ) -> Type:
408
+ """instantiate ToMoBAR's direct recon class for 2d reconstruction
409
+
410
+ Args:
411
+ data (cp.ndarray): data array
412
+ angles (np.ndarray): angles
413
+ center (Optional[float], optional): center of recon. Defaults to None.
414
+ recon_size (Optional[int], optional): recon_size. Defaults to None.
415
+ gpu_id (int, optional): gpu ID. Defaults to 0.
416
+
417
+ Returns:
418
+ Type[RecToolsDIR]: an instance of the direct recon class
419
+ """
420
+ if center is None:
421
+ center = data.shape[2] // 2 # making a crude guess
422
+ if recon_size is None:
423
+ recon_size = data.shape[2]
424
+ RecTools = RecToolsDIR(
425
+ DetectorsDimH=data.shape[2], # Horizontal detector dimension
426
+ DetectorsDimV=None, # 2d case
427
+ CenterRotOffset=data.shape[2] / 2
428
+ - center
429
+ - 0.5, # Center of Rotation scalar or a vector
430
+ AnglesVec=-angles, # A vector of projection angles in radians
431
+ ObjSize=recon_size, # Reconstructed object dimensions (scalar)
432
+ device_projector=gpu_id,
433
+ )
434
+ return RecTools
435
+
436
+
317
437
  def _instantiate_iterative_recon_class(
318
438
  data: cp.ndarray,
319
439
  angles: np.ndarray,
@@ -352,10 +472,20 @@ def _instantiate_iterative_recon_class(
352
472
  )
353
473
  return RecToolsCP
354
474
 
475
+
355
476
  def _take_neg_log(data: cp.ndarray) -> cp.ndarray:
356
477
  """Taking negative log"""
357
- data[data<=0] = 1
478
+ data[data <= 0] = 1
358
479
  data = -cp.log(data)
359
480
  data[cp.isnan(data)] = 6.0
360
481
  data[cp.isinf(data)] = 0
361
- return data
482
+ return data
483
+
484
+
485
+ def _take_neg_log_np(data: np.ndarray) -> np.ndarray:
486
+ """Taking negative log"""
487
+ data[data <= 0] = 1
488
+ data = -np.log(data)
489
+ data[np.isnan(data)] = 6.0
490
+ data[np.isinf(data)] = 0
491
+ return data
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: httomolibgpu
3
- Version: 2.4.0
3
+ Version: 2.5.1
4
4
  Summary: Commonly used tomography data processing methods at DLS.
5
5
  Author-email: Daniil Kazantsev <daniil.kazantsev@diamond.ac.uk>, Yousef Moazzam <yousef.moazzam@diamond.ac.uk>, Naman Gera <naman.gera@diamond.ac.uk>
6
6
  License: BSD-3-Clause
@@ -19,6 +19,7 @@ Requires-Dist: scipy
19
19
  Requires-Dist: pillow
20
20
  Requires-Dist: scikit-image
21
21
  Requires-Dist: tomobar
22
+ Requires-Dist: ccpi-regularisation-cupy
22
23
  Provides-Extra: dev
23
24
  Requires-Dist: pytest; extra == "dev"
24
25
  Requires-Dist: pytest-cov; extra == "dev"
@@ -32,6 +33,7 @@ Requires-Dist: imageio; extra == "dev"
32
33
  Requires-Dist: h5py; extra == "dev"
33
34
  Requires-Dist: pre-commit; extra == "dev"
34
35
  Requires-Dist: pyfftw; extra == "dev"
36
+ Dynamic: license-file
35
37
 
36
38
  HTTomolibGPU is a library of GPU accelerated methods for tomography
37
39
  --------------------------------------------------------------------
@@ -49,19 +51,28 @@ Although **HTTomolibGPU** can be used as a stand-alone library, it has been spec
49
51
  its backend for data processing. HTTomo is a user interface (UI) written in Python for fast big tomographic data processing using
50
52
  MPI protocols or as well serially.
51
53
 
52
- Install HTTomolibGPU as a PyPi package
53
- =========================================================
54
+ Installation
55
+ ============
56
+
57
+ HTTomolibGPU is available on PyPI, so it can be installed into either a virtual environment or
58
+ a conda environment.
59
+
60
+ Virtual environment
61
+ ~~~~~~~~~~~~~~~~~~~
54
62
  .. code-block:: console
55
63
 
64
+ $ python -m venv httomolibgpu
65
+ $ source httomolibgpu/bin/activate
56
66
  $ pip install httomolibgpu
57
67
 
58
- Install HTTomolibGPU as a pre-built conda Python package
59
- =========================================================
68
+ Conda environment
69
+ ~~~~~~~~~~~~~~~~~
60
70
  .. code-block:: console
61
71
 
62
72
  $ conda create --name httomolibgpu # create a fresh conda environment
63
73
  $ conda activate httomolibgpu # activate the environment
64
- $ conda install -c httomo httomolibgpu -c conda-forge # for linux users
74
+ $ conda install -c conda-forge cupy==12.3.0 # for linux users
75
+ $ pip install httomolibgpu
65
76
 
66
77
  Setup the development environment:
67
78
  ==================================
@@ -69,14 +80,6 @@ Setup the development environment:
69
80
  .. code-block:: console
70
81
 
71
82
  $ git clone git@github.com:DiamondLightSource/httomolibgpu.git # clone the repo
72
- $ conda env create --name httomolibgpu --file conda/environment.yml # install dependencies
83
+ $ conda env create --name httomolibgpu -c conda-forge cupy==12.3.0 # install dependencies
73
84
  $ conda activate httomolibgpu # activate the environment
74
- $ pip install -e .[dev] # editable/development mode
75
-
76
- Build HTTomolibGPU as a conda Python package
77
- ============================================
78
-
79
- .. code-block:: console
80
-
81
- $ conda build conda/recipe/ -c conda-forge -c httomo
82
-
85
+ $ pip install -e ./httomolibgpu[dev] # editable/development mode
@@ -5,6 +5,7 @@ scipy
5
5
  pillow
6
6
  scikit-image
7
7
  tomobar
8
+ ccpi-regularisation-cupy
8
9
 
9
10
  [dev]
10
11
  pytest
@@ -44,6 +44,7 @@ dependencies = [
44
44
  "pillow",
45
45
  "scikit-image",
46
46
  "tomobar",
47
+ "ccpi-regularisation-cupy",
47
48
  ]
48
49
 
49
50
  [project.optional-dependencies]
File without changes
File without changes
File without changes