httomolibgpu 4.0__py3-none-any.whl → 5.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
httomolibgpu/__init__.py CHANGED
@@ -1,10 +1,12 @@
1
+ from httomolibgpu.misc.utils import data_checker
2
+
1
3
  from httomolibgpu.misc.corr import median_filter, remove_outlier
2
4
  from httomolibgpu.misc.denoise import total_variation_ROF, total_variation_PD
3
5
  from httomolibgpu.misc.morph import sino_360_to_180, data_resampler
4
6
  from httomolibgpu.misc.rescale import rescale_to_int
5
7
  from httomolibgpu.prep.alignment import distortion_correction_proj_discorpy
6
- from httomolibgpu.prep.normalize import normalize
7
- from httomolibgpu.prep.phase import paganin_filter
8
+ from httomolibgpu.prep.normalize import dark_flat_field_correction, minus_log
9
+ from httomolibgpu.prep.phase import paganin_filter, paganin_filter_savu_legacy
8
10
  from httomolibgpu.prep.stripe import (
9
11
  remove_stripe_based_sorting,
10
12
  remove_stripe_ti,
httomolibgpu/misc/corr.py CHANGED
@@ -36,8 +36,6 @@ if cupy_run:
36
36
  else:
37
37
  load_cuda_module = Mock()
38
38
 
39
- from httomolibgpu.misc.supp_func import data_checker
40
-
41
39
  __all__ = [
42
40
  "median_filter",
43
41
  "remove_outlier",
@@ -82,10 +80,6 @@ def median_filter(
82
80
  else:
83
81
  raise ValueError("The input array must be a 3D array")
84
82
 
85
- data = data_checker(
86
- data, verbosity=True, method_name="median_filter_or_remove_outlier"
87
- )
88
-
89
83
  if kernel_size not in [3, 5, 7, 9, 11, 13]:
90
84
  raise ValueError("Please select a correct kernel size: 3, 5, 7, 9, 11, 13")
91
85
 
@@ -29,8 +29,6 @@ cupy_run = cupywrapper.cupy_run
29
29
 
30
30
  from unittest.mock import Mock
31
31
 
32
- from httomolibgpu.misc.supp_func import data_checker
33
-
34
32
  if cupy_run:
35
33
  from tomobar.regularisersCuPy import ROF_TV_cupy, PD_TV_cupy
36
34
  else:
@@ -84,8 +82,6 @@ def total_variation_ROF(
84
82
  If the input array is not float32 data type.
85
83
  """
86
84
 
87
- data = data_checker(data, verbosity=True, method_name="total_variation_ROF")
88
-
89
85
  return ROF_TV_cupy(
90
86
  data,
91
87
  regularisation_parameter,
@@ -139,8 +135,6 @@ def total_variation_PD(
139
135
  If the input array is not float32 data type.
140
136
  """
141
137
 
142
- data_checker(data, verbosity=True, method_name="total_variation_PD")
143
-
144
138
  methodTV = 0
145
139
  if not isotropic:
146
140
  methodTV = 1
@@ -35,8 +35,6 @@ else:
35
35
 
36
36
  from typing import Literal
37
37
 
38
- from httomolibgpu.misc.supp_func import data_checker
39
-
40
38
  __all__ = [
41
39
  "sino_360_to_180",
42
40
  "data_resampler",
@@ -68,8 +66,6 @@ def sino_360_to_180(
68
66
  if data.ndim != 3:
69
67
  raise ValueError("only 3D data is supported")
70
68
 
71
- data = data_checker(data, verbosity=True, method_name="sino_360_to_180")
72
-
73
69
  dx, dy, dz = data.shape
74
70
 
75
71
  overlap = int(np.round(overlap))
@@ -142,8 +138,6 @@ def data_resampler(
142
138
  data = cp.expand_dims(data, 1)
143
139
  axis = 1
144
140
 
145
- data = data_checker(data, verbosity=True, method_name="data_resampler")
146
-
147
141
  N, M, Z = cp.shape(data)
148
142
 
149
143
  if axis == 0:
@@ -27,8 +27,6 @@ cp = cupywrapper.cp
27
27
 
28
28
  from typing import Literal, Optional, Tuple, Union
29
29
 
30
- from httomolibgpu.misc.supp_func import data_checker
31
-
32
30
 
33
31
  __all__ = [
34
32
  "rescale_to_int",
@@ -80,8 +78,6 @@ def rescale_to_int(
80
78
  else:
81
79
  output_dtype = np.uint32
82
80
 
83
- data = data_checker(data, verbosity=True, method_name="rescale_to_int")
84
-
85
81
  # get the min and max integer values of the output type
86
82
  output_min = cp.iinfo(output_dtype).min
87
83
  output_max = cp.iinfo(output_dtype).max
@@ -0,0 +1,146 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ---------------------------------------------------------------------------
4
+ # Copyright 2022 Diamond Light Source Ltd.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ # ---------------------------------------------------------------------------
18
+ # Created By : Tomography Team at DLS <scientificsoftware@diamond.ac.uk>
19
+ # Created Date: 02/June/2025
20
+ # ---------------------------------------------------------------------------
21
+ """Various utilities for data inspection and correction"""
22
+
23
+ from httomolibgpu import cupywrapper
24
+ from typing import Optional
25
+
26
+ cp = cupywrapper.cp
27
+ cupy_run = cupywrapper.cupy_run
28
+
29
+ from unittest.mock import Mock
30
+
31
+ if cupy_run:
32
+ from httomolibgpu.cuda_kernels import load_cuda_module
33
+ else:
34
+ load_cuda_module = Mock()
35
+
36
+
37
+ __all__ = [
38
+ "data_checker",
39
+ ]
40
+
41
+
42
+ def data_checker(
43
+ data: cp.ndarray,
44
+ infsnans_correct: bool = True,
45
+ zeros_warning: bool = False,
46
+ data_to_method_name: Optional[str] = None,
47
+ verbosity: bool = True,
48
+ ) -> cp.ndarray:
49
+ """Function that performs checks on input data to ensure its validity, performs corrections and prints the warnings.
50
+ Currently it checks for the presence of Infs and NaNs in the data and corrects them.
51
+
52
+ Parameters
53
+ ----------
54
+ data : cp.ndarray
55
+ CuPy array either float32 or uint16 data type.
56
+ infsnans_correct: bool
57
+ Perform correction of NaNs and Infs if they are present in the data.
58
+ zeros_warning: bool
59
+ Count the number of zeros in the data and produce a warning if more half of the data are zeros.
60
+ verbosity : bool
61
+ Print the warnings.
62
+ data_to_method_name : str, optional.
63
+ Method's name the output of which is tested. This is tailored for printing purposes when the method runs in HTTomo.
64
+
65
+ Returns
66
+ -------
67
+ cp.ndarray
68
+ Returns corrected CuPy array.
69
+ """
70
+ if data.dtype not in ["uint16", "float32"]:
71
+ raise ValueError(
72
+ "The input data of `uint16` and `float32` data types is accepted only."
73
+ )
74
+
75
+ if infsnans_correct:
76
+ data = __naninfs_check(
77
+ data, verbosity=verbosity, method_name=data_to_method_name
78
+ )
79
+ # TODO
80
+ # if zeros_warning:
81
+ # __zeros_check(data, verbosity=verbosity, percentage_threshold = 50, method_name=data_to_method_name)
82
+
83
+ return data
84
+
85
+
86
+ def __naninfs_check(
87
+ data: cp.ndarray,
88
+ verbosity: bool = True,
89
+ method_name: Optional[str] = None,
90
+ ) -> cp.ndarray:
91
+ """
92
+ This function finds NaN's, +-Inf's in the input data and then prints the warnings and correct the data if correction is enabled.
93
+
94
+ Parameters
95
+ ----------
96
+ data : cp.ndarray
97
+ Input CuPy or Numpy array either float32 or uint16 data type.
98
+ verbosity : bool
99
+ If enabled, then the printing of the warning happens when data contains infs or nans
100
+ method_name : str, optional.
101
+ Method's name for which the output data is tested.
102
+
103
+ Returns
104
+ -------
105
+ ndarray
106
+ Uncorrected or corrected (nans and infs converted to zeros) input array.
107
+ """
108
+ present_nans_infs_b = False
109
+
110
+ input_type = data.dtype
111
+ if len(data.shape) == 2:
112
+ dy, dx = data.shape
113
+ dz = 1
114
+ else:
115
+ dz, dy, dx = data.shape
116
+
117
+ present_nans_infs = cp.zeros(shape=(1)).astype(cp.uint8)
118
+
119
+ block_x = 128
120
+ # setting grid/block parameters
121
+ block_dims = (block_x, 1, 1)
122
+ grid_x = (dx + block_x - 1) // block_x
123
+ grid_y = dy
124
+ grid_z = dz
125
+ grid_dims = (grid_x, grid_y, grid_z)
126
+ params = (data, dz, dy, dx, present_nans_infs)
127
+
128
+ kernel_args = "remove_nan_inf<{0}>".format(
129
+ "float" if input_type == "float32" else "unsigned short"
130
+ )
131
+
132
+ module = load_cuda_module("remove_nan_inf", name_expressions=[kernel_args])
133
+ remove_nan_inf_kernel = module.get_function(kernel_args)
134
+ remove_nan_inf_kernel(grid_dims, block_dims, params)
135
+
136
+ if present_nans_infs[0].get() == 1:
137
+ present_nans_infs_b = True
138
+
139
+ if present_nans_infs_b:
140
+ if verbosity:
141
+ print(
142
+ "Warning! Output data of the \033[31m{}\033[0m method contains Inf's or/and NaN's. Corrected to zeros.".format(
143
+ method_name
144
+ )
145
+ )
146
+ return data
@@ -35,8 +35,6 @@ else:
35
35
 
36
36
  from typing import Dict, List, Tuple
37
37
 
38
- from httomolibgpu.misc.supp_func import data_checker
39
-
40
38
  __all__ = [
41
39
  "distortion_correction_proj_discorpy",
42
40
  ]
@@ -88,10 +86,6 @@ def distortion_correction_proj_discorpy(
88
86
  if len(data.shape) == 2:
89
87
  data = cp.expand_dims(data, axis=0)
90
88
 
91
- data = data_checker(
92
- data, verbosity=True, method_name="distortion_correction_proj_discorpy"
93
- )
94
-
95
89
  # Get info from metadata txt file
96
90
  xcenter, ycenter, list_fact = _load_metadata_txt(metadata_path)
97
91
 
@@ -20,7 +20,6 @@
20
20
  # ---------------------------------------------------------------------------
21
21
  """Modules for raw projection data normalization"""
22
22
 
23
- import numpy as np
24
23
  from httomolibgpu import cupywrapper
25
24
 
26
25
  cp = cupywrapper.cp
@@ -34,27 +33,21 @@ else:
34
33
  mean = Mock()
35
34
 
36
35
  from numpy import float32
37
- from typing import Tuple
38
36
 
39
- from httomolibgpu.misc.supp_func import data_checker
40
37
 
41
- __all__ = ["normalize"]
38
+ __all__ = ["dark_flat_field_correction", "minus_log"]
42
39
 
43
40
 
44
- def normalize(
41
+ def dark_flat_field_correction(
45
42
  data: cp.ndarray,
46
43
  flats: cp.ndarray,
47
44
  darks: cp.ndarray,
48
45
  flats_multiplier: float = 1.0,
49
46
  darks_multiplier: float = 1.0,
50
47
  cutoff: float = 10.0,
51
- minus_log: bool = True,
52
- nonnegativity: bool = False,
53
- remove_nans: bool = False,
54
48
  ) -> cp.ndarray:
55
49
  """
56
50
  Normalize raw projection data using the flat and dark field projections.
57
- This is a raw CUDA kernel implementation with CuPy wrappers.
58
51
 
59
52
  Parameters
60
53
  ----------
@@ -70,17 +63,11 @@ def normalize(
70
63
  A multiplier to apply to darks, can work as an intensity compensation constant.
71
64
  cutoff : float
72
65
  Permitted maximum value for the normalised data.
73
- minus_log : bool
74
- Apply negative log to the normalised data.
75
- nonnegativity : bool
76
- Remove negative values in the normalised data.
77
- remove_nans : bool
78
- Remove NaN and Inf values in the normalised data.
79
66
 
80
- Returns
67
+ Returns
81
68
  -------
82
69
  cp.ndarray
83
- Normalised 3D tomographic data as a CuPy array.
70
+ Normalised by dark/flat fields 3D tomographic data as a CuPy array.
84
71
  """
85
72
  _check_valid_input_normalise(data, flats, darks)
86
73
 
@@ -101,16 +88,6 @@ def normalize(
101
88
  }
102
89
  float v = (float(data) - float(darks))/denom;
103
90
  """
104
- if minus_log:
105
- kernel += "v = -log(v);\n"
106
- kernel_name += "_mlog"
107
- if nonnegativity:
108
- kernel += "if (v < 0.0f) v = 0.0f;\n"
109
- kernel_name += "_nneg"
110
- if remove_nans:
111
- kernel += "if (isnan(v)) v = 0.0f;\n"
112
- kernel += "if (isinf(v)) v = 0.0f;\n"
113
- kernel_name += "_remnan"
114
91
  kernel += "if (v > cutoff) v = cutoff;\n"
115
92
  kernel += "if (v < -cutoff) v = cutoff;\n"
116
93
  kernel += "out = v;\n"
@@ -130,6 +107,24 @@ def normalize(
130
107
  return out
131
108
 
132
109
 
110
+ def minus_log(data: cp.ndarray) -> cp.ndarray:
111
+ """
112
+ Apply -log(data) operation
113
+
114
+ Parameters
115
+ ----------
116
+ data : cp.ndarray
117
+ Data as a CuPy array.
118
+
119
+ Returns
120
+ -------
121
+ cp.ndarray
122
+ data after -log(data)
123
+ """
124
+
125
+ return -cp.log(data)
126
+
127
+
133
128
  def _check_valid_input_normalise(data, flats, darks) -> None:
134
129
  """Helper function to check the validity of inputs to normalisation functions"""
135
130
  if data.ndim != 3:
@@ -145,7 +140,3 @@ def _check_valid_input_normalise(data, flats, darks) -> None:
145
140
  flats = flats[cp.newaxis, :, :]
146
141
  if darks.ndim == 2:
147
142
  darks = darks[cp.newaxis, :, :]
148
-
149
- data_checker(data, verbosity=True, method_name="normalize_data")
150
- data_checker(flats, verbosity=True, method_name="normalize_flats")
151
- data_checker(darks, verbosity=True, method_name="normalize_darks")
@@ -39,10 +39,9 @@ from numpy import float32
39
39
  from typing import Tuple
40
40
  import math
41
41
 
42
- from httomolibgpu.misc.supp_func import data_checker
43
-
44
42
  __all__ = [
45
43
  "paganin_filter",
44
+ "paganin_filter_savu_legacy",
46
45
  ]
47
46
 
48
47
 
@@ -85,8 +84,6 @@ def paganin_filter(
85
84
  " please provide a stack of 2D projections."
86
85
  )
87
86
 
88
- tomo = data_checker(tomo, verbosity=True, method_name="paganin_filter")
89
-
90
87
  dz_orig, dy_orig, dx_orig = tomo.shape
91
88
 
92
89
  # Perform padding to the power of 2 as FFT is O(n*log(n)) complexity
@@ -233,3 +230,37 @@ def _reciprocal_coord(pixel_size: float, num_grid: int) -> cp.ndarray:
233
230
  rc = cp.arange(-n, num_grid, 2, dtype=cp.float32)
234
231
  rc *= 2 * math.pi / (n * pixel_size)
235
232
  return rc
233
+
234
+
235
+ def paganin_filter_savu_legacy(
236
+ tomo: cp.ndarray,
237
+ pixel_size: float = 1.28,
238
+ distance: float = 1.0,
239
+ energy: float = 53.0,
240
+ ratio_delta_beta: float = 250,
241
+ ) -> cp.ndarray:
242
+ """
243
+ Perform single-material phase retrieval from flats/darks corrected tomographic measurements. For more detailed information, see :ref:`phase_contrast_module`.
244
+ Also see :cite:`Paganin02` and :cite:`paganin2020boosting` for references. The ratio_delta_beta parameter here follows implementation in Savu software.
245
+ The module will be retired in future in favour of paganin_filter. One can rescale parameter ratio_delta_beta / 4 to achieve the same effect in paganin_filter.
246
+
247
+ Parameters
248
+ ----------
249
+ tomo : cp.ndarray
250
+ 3D array of f/d corrected tomographic projections.
251
+ pixel_size : float
252
+ Detector pixel size (resolution) in micron units.
253
+ distance : float
254
+ Propagation distance of the wavefront from sample to detector in metre units.
255
+ energy : float
256
+ Beam energy in keV.
257
+ ratio_delta_beta : float
258
+ The ratio of delta/beta, where delta is the phase shift and real part of the complex material refractive index and beta is the absorption.
259
+
260
+ Returns
261
+ -------
262
+ cp.ndarray
263
+ The 3D array of Paganin phase-filtered projection images.
264
+ """
265
+
266
+ return paganin_filter(tomo, pixel_size, distance, energy, ratio_delta_beta / 4)
@@ -43,8 +43,6 @@ else:
43
43
 
44
44
  from typing import Union
45
45
 
46
- from httomolibgpu.misc.supp_func import data_checker
47
-
48
46
  __all__ = [
49
47
  "remove_stripe_based_sorting",
50
48
  "remove_stripe_ti",
@@ -83,8 +81,6 @@ def remove_stripe_based_sorting(
83
81
 
84
82
  """
85
83
 
86
- data = data_checker(data, verbosity=True, method_name="remove_stripe_based_sorting")
87
-
88
84
  if size is None:
89
85
  if data.shape[2] > 2000:
90
86
  size = 21
@@ -139,7 +135,6 @@ def remove_stripe_ti(
139
135
  ndarray
140
136
  3D array of de-striped projections.
141
137
  """
142
- data = data_checker(data, verbosity=True, method_name="remove_stripe_ti")
143
138
 
144
139
  _, _, dx_orig = data.shape
145
140
  if (dx_orig % 2) != 0:
@@ -216,7 +211,6 @@ def remove_all_stripe(
216
211
  Corrected 3D tomographic data as a CuPy or NumPy array.
217
212
 
218
213
  """
219
- data = data_checker(data, verbosity=True, method_name="remove_all_stripe")
220
214
 
221
215
  matindex = _create_matindex(data.shape[2], data.shape[0])
222
216
  for m in range(data.shape[1]):
@@ -392,8 +386,6 @@ def raven_filter(
392
386
  if data.dtype != cp.float32:
393
387
  raise ValueError("The input data should be float32 data type")
394
388
 
395
- data = data_checker(data, verbosity=True, method_name="raven_filter")
396
-
397
389
  # Padding of the sinogram
398
390
  data = cp.pad(data, ((pad_y, pad_y), (0, 0), (pad_x, pad_x)), mode=pad_method)
399
391