httomolibgpu 4.0__py3-none-any.whl → 5.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -43,8 +43,6 @@ else:
43
43
 
44
44
  from typing import Union
45
45
 
46
- from httomolibgpu.misc.supp_func import data_checker
47
-
48
46
  __all__ = [
49
47
  "remove_stripe_based_sorting",
50
48
  "remove_stripe_ti",
@@ -83,8 +81,6 @@ def remove_stripe_based_sorting(
83
81
 
84
82
  """
85
83
 
86
- data = data_checker(data, verbosity=True, method_name="remove_stripe_based_sorting")
87
-
88
84
  if size is None:
89
85
  if data.shape[2] > 2000:
90
86
  size = 21
@@ -139,7 +135,6 @@ def remove_stripe_ti(
139
135
  ndarray
140
136
  3D array of de-striped projections.
141
137
  """
142
- data = data_checker(data, verbosity=True, method_name="remove_stripe_ti")
143
138
 
144
139
  _, _, dx_orig = data.shape
145
140
  if (dx_orig % 2) != 0:
@@ -216,7 +211,6 @@ def remove_all_stripe(
216
211
  Corrected 3D tomographic data as a CuPy or NumPy array.
217
212
 
218
213
  """
219
- data = data_checker(data, verbosity=True, method_name="remove_all_stripe")
220
214
 
221
215
  matindex = _create_matindex(data.shape[2], data.shape[0])
222
216
  for m in range(data.shape[1]):
@@ -392,8 +386,6 @@ def raven_filter(
392
386
  if data.dtype != cp.float32:
393
387
  raise ValueError("The input data should be float32 data type")
394
388
 
395
- data = data_checker(data, verbosity=True, method_name="raven_filter")
396
-
397
389
  # Padding of the sinogram
398
390
  data = cp.pad(data, ((pad_y, pad_y), (0, 0), (pad_x, pad_x)), mode=pad_method)
399
391
 
@@ -0,0 +1,402 @@
1
+ #!/usr/bin/env python3
2
+ # -*- coding: utf-8 -*-
3
+ # ---------------------------------------------------------------------------
4
+ # Copyright 2025 Diamond Light Source Ltd.
5
+ #
6
+ # Licensed under the Apache License, Version 2.0 (the "License");
7
+ # you may not use this file except in compliance with the License.
8
+ # You may obtain a copy of the License at
9
+ #
10
+ # http://www.apache.org/licenses/LICENSE-2.0
11
+ #
12
+ # Unless required by applicable law or agreed to in writing, software
13
+ # distributed under the License is distributed on an "AS IS" BASIS,
14
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15
+ # See the License for the specific language governing permissions and
16
+ # limitations under the License.
17
+ # ---------------------------------------------------------------------------
18
+ # Created By : Tomography Team at DLS <scientificsoftware@diamond.ac.uk>
19
+ # Created Date: 2 December 2025
20
+ # ---------------------------------------------------------------------------
21
+
22
+ # SPDX-FileCopyrightText: 2009-2022 the scikit-image team
23
+ # SPDX-FileCopyrightText: Copyright (c) 2021-2025, NVIDIA CORPORATION. All rights reserved.
24
+ # SPDX-License-Identifier: Apache-2.0 AND BSD-3-Clause
25
+
26
+ """
27
+ Port of Manuel Guizar's code from:
28
+ http://www.mathworks.com/matlabcentral/fileexchange/18401-efficient-subpixel-image-registration-by-cross-correlation
29
+ """
30
+
31
+ import itertools
32
+ import math
33
+ import warnings
34
+
35
+ import cupy as cp
36
+ import cupyx.scipy.ndimage as ndi
37
+ import numpy as np
38
+
39
+
40
+ def _upsampled_dft(data, upsampled_region_size, upsample_factor=1, axis_offsets=None):
41
+ """
42
+ Upsampled DFT by matrix multiplication.
43
+
44
+ This code is intended to provide the same result as if the following
45
+ operations were performed:
46
+ - Embed the array "data" in an array that is ``upsample_factor`` times
47
+ larger in each dimension. ifftshift to bring the center of the
48
+ image to (1,1).
49
+ - Take the FFT of the larger array.
50
+ - Extract an ``[upsampled_region_size]`` region of the result, starting
51
+ with the ``[axis_offsets+1]`` element.
52
+
53
+ It achieves this result by computing the DFT in the output array without
54
+ the need to zeropad. Much faster and memory efficient than the zero-padded
55
+ FFT approach if ``upsampled_region_size`` is much smaller than
56
+ ``data.size * upsample_factor``.
57
+
58
+ Parameters
59
+ ----------
60
+ data : array
61
+ The input data array (DFT of original data) to upsample.
62
+ upsampled_region_size : integer or tuple of integers, optional
63
+ The size of the region to be sampled. If one integer is provided, it
64
+ is duplicated up to the dimensionality of ``data``.
65
+ upsample_factor : integer, optional
66
+ The upsampling factor. Defaults to 1.
67
+ axis_offsets : tuple of integers, optional
68
+ The offsets of the region to be sampled. Defaults to None (uses
69
+ image center)
70
+
71
+ Returns
72
+ -------
73
+ output : ndarray
74
+ The upsampled DFT of the specified region.
75
+ """
76
+ # if people pass in an integer, expand it to a list of equal-sized sections
77
+ if not hasattr(upsampled_region_size, "__iter__"):
78
+ upsampled_region_size = [upsampled_region_size] * data.ndim
79
+ else:
80
+ if len(upsampled_region_size) != data.ndim:
81
+ raise ValueError(
82
+ "shape of upsampled region sizes must be equal "
83
+ "to input data's number of dimensions."
84
+ )
85
+
86
+ if axis_offsets is None:
87
+ axis_offsets = [0] * data.ndim
88
+ else:
89
+ if len(axis_offsets) != data.ndim:
90
+ raise ValueError(
91
+ "number of axis offsets must be equal to input "
92
+ "data's number of dimensions."
93
+ )
94
+
95
+ im2pi = 1j * 2 * np.pi
96
+
97
+ dim_properties = list(zip(data.shape, upsampled_region_size, axis_offsets))
98
+
99
+ for n_items, ups_size, ax_offset in dim_properties[::-1]:
100
+ kernel = (cp.arange(ups_size) - ax_offset)[:, None] * cp.fft.fftfreq(
101
+ n_items, upsample_factor
102
+ )
103
+ kernel = cp.exp(-im2pi * kernel)
104
+ # CuPy Backend: use kernel of same precision as the data
105
+ kernel = kernel.astype(data.dtype, copy=False)
106
+
107
+ # Equivalent to:
108
+ # data[i, j, k] = kernel[i, :] @ data[j, k].T
109
+ data = cp.tensordot(kernel, data, axes=(1, -1))
110
+ return data
111
+
112
+
113
+ def _compute_phasediff(cross_correlation_max):
114
+ """
115
+ Compute global phase difference between the two images (should be
116
+ zero if images are non-negative).
117
+
118
+ Parameters
119
+ ----------
120
+ cross_correlation_max : complex
121
+ The complex value of the cross correlation at its maximum point.
122
+ """
123
+ return cp.arctan2(cross_correlation_max.imag, cross_correlation_max.real)
124
+
125
+
126
+ def _compute_error(cross_correlation_max, src_amp, target_amp):
127
+ """
128
+ Compute RMS error metric between ``src_image`` and ``target_image``.
129
+
130
+ Parameters
131
+ ----------
132
+ cross_correlation_max : complex
133
+ The complex value of the cross correlation at its maximum point.
134
+ src_amp : float
135
+ The normalized average image intensity of the source image
136
+ target_amp : float
137
+ The normalized average image intensity of the target image
138
+ """
139
+ amp = src_amp * target_amp
140
+ if amp == 0:
141
+ warnings.warn(
142
+ "Could not determine RMS error between images with the normalized "
143
+ f"average intensities {src_amp!r} and {target_amp!r}. Either the "
144
+ "reference or moving image may be empty.",
145
+ UserWarning,
146
+ stacklevel=3,
147
+ )
148
+
149
+ with np.errstate(invalid="ignore"):
150
+ error = 1.0 - cross_correlation_max * cross_correlation_max.conj() / (amp)
151
+
152
+ return cp.sqrt(cp.abs(error))
153
+
154
+
155
+ def _disambiguate_shift(reference_image, moving_image, shift):
156
+ """Determine the correct real-space shift based on periodic shift.
157
+
158
+ When determining a translation shift from phase cross-correlation in
159
+ Fourier space, the shift is only correct to within a period of the image
160
+ size along each axis, resulting in $2^n$ possible shifts, where $n$ is the
161
+ number of dimensions of the image. This function checks the
162
+ cross-correlation in real space for each of those shifts, and returns the
163
+ one with the highest cross-correlation.
164
+
165
+ The strategy we use is to perform the shift on the moving image *using the
166
+ 'grid-wrap' mode* in `scipy.ndimage`. The moving image's original borders
167
+ then define $2^n$ quadrants, which we cross-correlate with the reference
168
+ image in turn using slicing. The entire operation is thus $O(2^n + m)$,
169
+ where $m$ is the number of pixels in the image (and typically dominates).
170
+
171
+ Parameters
172
+ ----------
173
+ reference_image : numpy array
174
+ The reference (non-moving) image.
175
+ moving_image : numpy array
176
+ The moving image: applying the shift to this image overlays it on the
177
+ reference image. Must be the same shape as the reference image.
178
+ shift : tuple of float
179
+ The shift to apply to each axis of the moving image, *modulo* image
180
+ size. The length of ``shift`` must be equal to ``moving_image.ndim``.
181
+
182
+ Returns
183
+ -------
184
+ real_shift : tuple of float
185
+ The shift disambiguated in real space.
186
+ """
187
+ shape = reference_image.shape
188
+ positive_shift = [shift_i % s for shift_i, s in zip(shift, shape)]
189
+ negative_shift = [shift_i - s for shift_i, s in zip(positive_shift, shape)]
190
+ subpixel = any(s % 1 != 0 for s in shift)
191
+ interp_order = 3 if subpixel else 0
192
+ shifted = ndi.shift(moving_image, shift, mode="grid-wrap", order=interp_order)
193
+ indices = tuple(round(s) for s in positive_shift)
194
+ splits_per_dim = [(slice(0, i), slice(i, None)) for i in indices]
195
+ max_corr = -1.0
196
+ max_slice = None
197
+ for test_slice in itertools.product(*splits_per_dim):
198
+ reference_tile = cp.reshape(reference_image[test_slice], -1)
199
+ moving_tile = cp.reshape(shifted[test_slice], -1)
200
+ corr = -1.0
201
+ if reference_tile.size > 2:
202
+ corr = float(cp.corrcoef(reference_tile, moving_tile)[0, 1])
203
+ if corr > max_corr:
204
+ max_corr = corr
205
+ max_slice = test_slice
206
+ if max_slice is None:
207
+ warnings.warn(
208
+ "Could not determine real-space shift for periodic shift "
209
+ f"{shift!r} as requested by `disambiguate=True` (disambiguation "
210
+ "is degenerate).",
211
+ stacklevel=3,
212
+ )
213
+ return shift
214
+ real_shift_acc = []
215
+ for sl, pos_shift, neg_shift in zip(max_slice, positive_shift, negative_shift):
216
+ real_shift_acc.append(pos_shift if sl.stop is None else neg_shift)
217
+ if not subpixel:
218
+ real_shift = tuple(map(int, real_shift_acc))
219
+ else:
220
+ real_shift = tuple(real_shift_acc)
221
+ return real_shift
222
+
223
+
224
+ def phase_cross_correlation(
225
+ reference_image,
226
+ moving_image,
227
+ *,
228
+ upsample_factor=1,
229
+ space="real",
230
+ disambiguate=False,
231
+ normalization="phase",
232
+ ):
233
+ """Efficient subpixel image translation registration by cross-correlation.
234
+
235
+ This code gives the same precision as the FFT upsampled cross-correlation
236
+ in a fraction of the computation time and with reduced memory requirements.
237
+ It obtains an initial estimate of the cross-correlation peak by an FFT and
238
+ then refines the shift estimation by upsampling the DFT only in a small
239
+ neighborhood of that estimate by means of a matrix-multiply DFT [1]_.
240
+
241
+ Parameters
242
+ ----------
243
+ reference_image : array
244
+ Reference image.
245
+ moving_image : array
246
+ Image to register. Must be same dimensionality as
247
+ ``reference_image``.
248
+ upsample_factor : int, optional
249
+ Upsampling factor. Images will be registered to within
250
+ ``1 / upsample_factor`` of a pixel. For example
251
+ ``upsample_factor == 20`` means the images will be registered
252
+ within 1/20th of a pixel. Default is 1 (no upsampling).
253
+ space : string, one of "real" or "fourier", optional
254
+ Defines how the algorithm interprets input data. "real" means
255
+ data will be FFT'd to compute the correlation, while "fourier"
256
+ data will bypass FFT of input data. Case insensitive.
257
+ disambiguate : bool
258
+ The shift returned by this function is only accurate *modulo* the
259
+ image shape, due to the periodic nature of the Fourier transform. If
260
+ this parameter is set to ``True``, the *real* space cross-correlation
261
+ is computed for each possible shift, and the shift with the highest
262
+ cross-correlation within the overlapping area is returned.
263
+ normalization : {"phase", None}
264
+ The type of normalization to apply to the cross-correlation.
265
+
266
+ Returns
267
+ -------
268
+ shift : tuple
269
+ Shift vector (in pixels) required to register ``moving_image``
270
+ with ``reference_image``. Axis ordering is consistent with
271
+ the axis order of the input array.
272
+ error : float
273
+ Translation invariant normalized RMS error between
274
+ ``reference_image`` and ``moving_image``.
275
+ phasediff : float
276
+ Global phase difference between the two images (should be
277
+ zero if images are non-negative).
278
+
279
+ Notes
280
+ -----
281
+ The use of cross-correlation to estimate image translation has a long
282
+ history dating back to at least [2]_. The "phase correlation"
283
+ method (selected by ``normalization="phase"``) was first proposed in [3]_.
284
+ Publications [1]_ and [2]_ use an unnormalized cross-correlation
285
+ (``normalization=None``). Which form of normalization is better is
286
+ application-dependent. For example, the phase correlation method works
287
+ well in registering images under different illumination, but is not very
288
+ robust to noise. In a high noise scenario, the unnormalized method may be
289
+ preferable.
290
+
291
+ References
292
+ ----------
293
+ .. [1] Manuel Guizar-Sicairos, Samuel T. Thurman, and James R. Fienup,
294
+ "Efficient subpixel image registration algorithms,"
295
+ Optics Letters 33, 156-158 (2008). :DOI:`10.1364/OL.33.000156`
296
+ .. [2] P. Anuta, Spatial registration of multispectral and multitemporal
297
+ digital imagery using fast Fourier transform techniques, IEEE Trans.
298
+ Geosci. Electron., vol. 8, no. 4, pp. 353–368, Oct. 1970.
299
+ :DOI:`10.1109/TGE.1970.271435`.
300
+ .. [3] C. D. Kuglin D. C. Hines. The phase correlation image alignment
301
+ method, Proceeding of IEEE International Conference on Cybernetics
302
+ and Society, pp. 163-165, New York, NY, USA, 1975, pp. 163–165.
303
+ .. [4] James R. Fienup, "Invariant error metrics for image reconstruction"
304
+ Optics Letters 36, 8352-8357 (1997). :DOI:`10.1364/AO.36.008352`
305
+ """
306
+
307
+ # images must be the same shape
308
+ if reference_image.shape != moving_image.shape:
309
+ raise ValueError("images must be same shape")
310
+
311
+ # assume complex data is already in Fourier space
312
+ if space.lower() == "fourier":
313
+ src_freq = reference_image
314
+ target_freq = moving_image
315
+ # real data needs to be fft'd.
316
+ elif space.lower() == "real":
317
+ src_freq = cp.fft.fftn(reference_image)
318
+ target_freq = cp.fft.fftn(moving_image)
319
+ else:
320
+ raise ValueError('space argument must be "real" of "fourier"')
321
+
322
+ # Whole-pixel shift - Compute cross-correlation by an IFFT
323
+ shape = src_freq.shape
324
+ image_product = src_freq * target_freq.conj()
325
+ if normalization == "phase":
326
+ eps = cp.finfo(image_product.real.dtype).eps
327
+ image_product /= cp.maximum(cp.abs(image_product), 100 * eps)
328
+ elif normalization is not None:
329
+ raise ValueError("normalization must be either phase or None")
330
+ cross_correlation = cp.fft.ifftn(image_product)
331
+
332
+ # Locate maximum
333
+ maxima = np.unravel_index(
334
+ int(cp.argmax(cp.abs(cross_correlation))), cross_correlation.shape
335
+ )
336
+ midpoint = tuple(float(axis_size // 2) for axis_size in shape)
337
+ shift = tuple(
338
+ _max - axis_size if _max > mid else _max
339
+ for _max, mid, axis_size in zip(maxima, midpoint, shape)
340
+ )
341
+
342
+ if upsample_factor == 1:
343
+ sabs = cp.abs(src_freq)
344
+ sabs *= sabs
345
+ tabs = cp.abs(target_freq)
346
+ tabs *= tabs
347
+ src_amp = np.sum(sabs) / src_freq.size
348
+ target_amp = np.sum(tabs) / target_freq.size
349
+ CCmax = cross_correlation[maxima]
350
+ # If upsampling > 1, then refine estimate with matrix multiply DFT
351
+ else:
352
+ # Initial shift estimate in upsampled grid
353
+ # shift = cp.around(shift * upsample_factor) / upsample_factor
354
+ upsample_factor = float(upsample_factor)
355
+ shift = tuple(round(s * upsample_factor) / upsample_factor for s in shift)
356
+ upsampled_region_size = math.ceil(upsample_factor * 1.5)
357
+ # Center of output array at dftshift + 1
358
+ dftshift = float(upsampled_region_size // 2)
359
+ # Matrix multiply DFT around the current shift estimate
360
+ sample_region_offset = tuple(dftshift - s * upsample_factor for s in shift)
361
+ cross_correlation = _upsampled_dft(
362
+ image_product.conj(),
363
+ upsampled_region_size,
364
+ upsample_factor,
365
+ sample_region_offset,
366
+ ).conj()
367
+
368
+ # Locate maximum and map back to original pixel grid
369
+ maxima = np.unravel_index(
370
+ int(cp.argmax(cp.abs(cross_correlation))), cross_correlation.shape
371
+ )
372
+ CCmax = cross_correlation[maxima]
373
+
374
+ maxima = tuple(float(m) - dftshift for m in maxima)
375
+ shift = tuple(s + m / upsample_factor for s, m in zip(shift, maxima))
376
+
377
+ src_amp = cp.abs(src_freq)
378
+ src_amp *= src_amp
379
+ src_amp = cp.sum(src_amp)
380
+ target_amp = cp.abs(target_freq)
381
+ target_amp *= target_amp
382
+ target_amp = cp.sum(target_amp)
383
+
384
+ # If its only one row or column the shift along that dimension has no
385
+ # effect. We set to zero.
386
+ shift = tuple(s if axis_size != 1 else 0 for s, axis_size in zip(shift, shape))
387
+
388
+ if disambiguate:
389
+ if space.lower() != "real":
390
+ reference_image = cp.fft.ifftn(reference_image)
391
+ moving_image = cp.fft.ifftn(moving_image)
392
+ shift = _disambiguate_shift(reference_image, moving_image, shift)
393
+
394
+ # Redirect user to masked_phase_cross_correlation if NaNs are observed
395
+ if cp.isnan(CCmax) or cp.isnan(src_amp) or cp.isnan(target_amp):
396
+ raise ValueError("NaN values found, please remove NaNs from your " "input data")
397
+
398
+ return (
399
+ shift,
400
+ _compute_error(CCmax, src_amp, target_amp),
401
+ _compute_phasediff(CCmax),
402
+ )
@@ -40,8 +40,6 @@ else:
40
40
  from numpy import float32
41
41
  from typing import Optional, Type, Union
42
42
 
43
- from httomolibgpu.misc.supp_func import data_checker
44
-
45
43
 
46
44
  __all__ = [
47
45
  "FBP2d_astra",
@@ -66,7 +64,6 @@ def FBP2d_astra(
66
64
  filter_d: Optional[float] = None,
67
65
  recon_size: Optional[int] = None,
68
66
  recon_mask_radius: float = 0.95,
69
- neglog: bool = False,
70
67
  gpu_id: int = 0,
71
68
  ) -> np.ndarray:
72
69
  """
@@ -98,9 +95,6 @@ def FBP2d_astra(
98
95
  The radius of the circular mask that applies to the reconstructed slice in order to crop
99
96
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
100
97
  To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
101
- neglog: bool
102
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
103
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
104
98
  gpu_id : int
105
99
  A GPU device index to perform operation on.
106
100
 
@@ -109,7 +103,6 @@ def FBP2d_astra(
109
103
  np.ndarray
110
104
  The FBP reconstructed volume as a numpy array.
111
105
  """
112
- data = data_checker(data, verbosity=True, method_name="FBP2d_astra")
113
106
 
114
107
  data_shape = np.shape(data)
115
108
  if recon_size is None:
@@ -123,8 +116,6 @@ def FBP2d_astra(
123
116
  reconstruction = np.empty(
124
117
  (recon_size, detY_size, recon_size), dtype=float32, order="C"
125
118
  )
126
- _take_neg_log_np(data) if neglog else data
127
-
128
119
  # loop over detY slices
129
120
  for slice_index in range(0, detY_size):
130
121
  reconstruction[:, slice_index, :] = np.flipud(
@@ -148,7 +139,6 @@ def FBP3d_tomobar(
148
139
  filter_freq_cutoff: float = 0.35,
149
140
  recon_size: Optional[int] = None,
150
141
  recon_mask_radius: Optional[float] = 0.95,
151
- neglog: bool = False,
152
142
  gpu_id: int = 0,
153
143
  ) -> cp.ndarray:
154
144
  """
@@ -177,9 +167,6 @@ def FBP3d_tomobar(
177
167
  The radius of the circular mask that applies to the reconstructed slice in order to crop
178
168
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
179
169
  To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
180
- neglog: bool
181
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
182
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
183
170
  gpu_id : int
184
171
  A GPU device index to perform operation on.
185
172
 
@@ -188,14 +175,13 @@ def FBP3d_tomobar(
188
175
  cp.ndarray
189
176
  FBP reconstructed volume as a CuPy array.
190
177
  """
191
- data = data_checker(data, verbosity=True, method_name="FBP3d_tomobar")
192
178
 
193
179
  RecToolsCP = _instantiate_direct_recon_class(
194
180
  data, angles, center, detector_pad, recon_size, gpu_id
195
181
  )
196
182
 
197
183
  reconstruction = RecToolsCP.FBP(
198
- _take_neg_log(data) if neglog else data,
184
+ data,
199
185
  cutoff_freq=filter_freq_cutoff,
200
186
  recon_mask_radius=recon_mask_radius,
201
187
  data_axes_labels_order=input_data_axis_labels,
@@ -218,7 +204,6 @@ def LPRec3d_tomobar(
218
204
  power_of_2_cropping: Optional[bool] = False,
219
205
  min_mem_usage_filter: Optional[bool] = True,
220
206
  min_mem_usage_ifft2: Optional[bool] = True,
221
- neglog: bool = False,
222
207
  ) -> cp.ndarray:
223
208
  """
224
209
  Fourier direct inversion in 3D on unequally spaced (also called as Log-Polar) grids using
@@ -247,9 +232,6 @@ def LPRec3d_tomobar(
247
232
  The radius of the circular mask that applies to the reconstructed slice in order to crop
248
233
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
249
234
  To implement the cropping one can use the range [0.7-1.0] or set to 2.0 when no cropping required.
250
- neglog: bool
251
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
252
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
253
235
 
254
236
  Returns
255
237
  -------
@@ -257,14 +239,12 @@ def LPRec3d_tomobar(
257
239
  The Log-polar Fourier reconstructed volume as a CuPy array.
258
240
  """
259
241
 
260
- data = data_checker(data, verbosity=True, method_name="LPRec3d_tomobar")
261
-
262
242
  RecToolsCP = _instantiate_direct_recon_class(
263
243
  data, angles, center, detector_pad, recon_size, 0
264
244
  )
265
245
 
266
246
  reconstruction = RecToolsCP.FOURIER_INV(
267
- _take_neg_log(data) if neglog else data,
247
+ data,
268
248
  recon_mask_radius=recon_mask_radius,
269
249
  data_axes_labels_order=input_data_axis_labels,
270
250
  filter_type=filter_type,
@@ -288,7 +268,6 @@ def SIRT3d_tomobar(
288
268
  recon_mask_radius: float = 0.95,
289
269
  iterations: int = 300,
290
270
  nonnegativity: bool = True,
291
- neglog: bool = False,
292
271
  gpu_id: int = 0,
293
272
  ) -> cp.ndarray:
294
273
  """
@@ -318,10 +297,7 @@ def SIRT3d_tomobar(
318
297
  iterations : int
319
298
  The number of SIRT iterations.
320
299
  nonnegativity : bool
321
- Impose nonnegativity constraint on reconstructed image.
322
- neglog: bool
323
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
324
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
300
+ Impose nonnegativity constraint on the reconstructed image.
325
301
  gpu_id : int
326
302
  A GPU device index to perform operation on.
327
303
 
@@ -330,7 +306,6 @@ def SIRT3d_tomobar(
330
306
  cp.ndarray
331
307
  The SIRT reconstructed volume as a CuPy array.
332
308
  """
333
- data = data_checker(data, verbosity=True, method_name="SIRT3d_tomobar")
334
309
 
335
310
  RecToolsCP = _instantiate_iterative_recon_class(
336
311
  data,
@@ -343,7 +318,7 @@ def SIRT3d_tomobar(
343
318
  )
344
319
 
345
320
  _data_ = {
346
- "projection_norm_data": _take_neg_log(data) if neglog else data,
321
+ "projection_norm_data": data,
347
322
  "data_axes_labels_order": input_data_axis_labels,
348
323
  } # data dictionary
349
324
  _algorithm_ = {
@@ -366,7 +341,6 @@ def CGLS3d_tomobar(
366
341
  recon_mask_radius: float = 0.95,
367
342
  iterations: int = 20,
368
343
  nonnegativity: bool = True,
369
- neglog: bool = False,
370
344
  gpu_id: int = 0,
371
345
  ) -> cp.ndarray:
372
346
  """
@@ -397,9 +371,6 @@ def CGLS3d_tomobar(
397
371
  The number of CGLS iterations.
398
372
  nonnegativity : bool
399
373
  Impose nonnegativity constraint on reconstructed image.
400
- neglog: bool
401
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
402
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
403
374
  gpu_id : int, optional
404
375
  A GPU device index to perform operation on.
405
376
 
@@ -408,14 +379,13 @@ def CGLS3d_tomobar(
408
379
  cp.ndarray
409
380
  The CGLS reconstructed volume as a CuPy array.
410
381
  """
411
- data = data_checker(data, verbosity=True, method_name="CGLS3d_tomobar")
412
382
 
413
383
  RecToolsCP = _instantiate_iterative_recon_class(
414
384
  data, angles, center, detector_pad, recon_size, gpu_id, datafidelity="LS"
415
385
  )
416
386
 
417
387
  _data_ = {
418
- "projection_norm_data": _take_neg_log(data) if neglog else data,
388
+ "projection_norm_data": data,
419
389
  "data_axes_labels_order": input_data_axis_labels,
420
390
  } # data dictionary
421
391
  _algorithm_ = {
@@ -443,7 +413,6 @@ def FISTA3d_tomobar(
443
413
  regularisation_iterations: int = 50,
444
414
  regularisation_half_precision: bool = True,
445
415
  nonnegativity: bool = True,
446
- neglog: bool = False,
447
416
  gpu_id: int = 0,
448
417
  ) -> cp.ndarray:
449
418
  """
@@ -482,9 +451,6 @@ def FISTA3d_tomobar(
482
451
  Perform faster regularisation computation in half-precision with a very minimal sacrifice in quality.
483
452
  nonnegativity : bool
484
453
  Impose nonnegativity constraint on the reconstructed image.
485
- neglog: bool
486
- Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
487
- assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
488
454
  gpu_id : int
489
455
  A GPU device index to perform operation on.
490
456
 
@@ -493,14 +459,12 @@ def FISTA3d_tomobar(
493
459
  cp.ndarray
494
460
  The FISTA reconstructed volume as a CuPy array.
495
461
  """
496
- data = data_checker(data, verbosity=True, method_name="FISTA3d_tomobar")
497
-
498
462
  RecToolsCP = _instantiate_iterative_recon_class(
499
463
  data, angles, center, detector_pad, recon_size, gpu_id, datafidelity="LS"
500
464
  )
501
465
 
502
466
  _data_ = {
503
- "projection_norm_data": _take_neg_log(data) if neglog else data,
467
+ "projection_norm_data": data,
504
468
  "OS_number": subsets_number,
505
469
  "data_axes_labels_order": input_data_axis_labels,
506
470
  }
@@ -659,24 +623,6 @@ def _instantiate_iterative_recon_class(
659
623
  return RecToolsCP
660
624
 
661
625
 
662
- def _take_neg_log(data: cp.ndarray) -> cp.ndarray:
663
- """Taking negative log"""
664
- data[data <= 0] = 1
665
- data = -cp.log(data)
666
- data[cp.isnan(data)] = 6.0
667
- data[cp.isinf(data)] = 0
668
- return data
669
-
670
-
671
- def _take_neg_log_np(data: np.ndarray) -> np.ndarray:
672
- """Taking negative log"""
673
- data[data <= 0] = 1
674
- data = -np.log(data)
675
- data[np.isnan(data)] = 6.0
676
- data[np.isinf(data)] = 0
677
- return data
678
-
679
-
680
626
  def __estimate_detectorHoriz_padding(detX_size) -> int:
681
627
  det_half = detX_size // 2
682
628
  padded_value_exact = int(np.sqrt(2 * (det_half**2))) - det_half