httomolibgpu 2.3.2__py3-none-any.whl → 2.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
httomolibgpu/__init__.py CHANGED
@@ -11,5 +11,12 @@ from httomolibgpu.prep.stripe import (
11
11
  remove_all_stripe,
12
12
  )
13
13
 
14
- from httomolibgpu.recon.algorithm import FBP, LPRec, SIRT, CGLS
14
+ from httomolibgpu.recon.algorithm import (
15
+ FBP2d_astra,
16
+ FBP3d_tomobar,
17
+ LPRec3d_tomobar,
18
+ SIRT3d_tomobar,
19
+ CGLS3d_tomobar,
20
+ )
21
+
15
22
  from httomolibgpu.recon.rotation import find_center_vo, find_center_360, find_center_pc
@@ -112,7 +112,7 @@ def data_resampler(
112
112
 
113
113
  Parameters
114
114
  ----------
115
- data : cp.ndarray
115
+ data : cp.ndarray
116
116
  3d cupy array.
117
117
  newshape : list
118
118
  2d list that defines the 2D slice shape of new shape data.
@@ -43,6 +43,8 @@ def normalize(
43
43
  data: cp.ndarray,
44
44
  flats: cp.ndarray,
45
45
  darks: cp.ndarray,
46
+ flats_multiplier: float = 1.0,
47
+ darks_multiplier: float = 1.0,
46
48
  cutoff: float = 10.0,
47
49
  minus_log: bool = True,
48
50
  nonnegativity: bool = False,
@@ -60,13 +62,17 @@ def normalize(
60
62
  3D flat field data as a CuPy array.
61
63
  darks : cp.ndarray
62
64
  3D dark field data as a CuPy array.
63
- cutoff : float, optional
65
+ flats_multiplier: float
66
+ A multiplier to apply to flats, can work as an intensity compensation constant.
67
+ darks_multiplier: float
68
+ A multiplier to apply to darks, can work as an intensity compensation constant.
69
+ cutoff : float
64
70
  Permitted maximum value for the normalised data.
65
- minus_log : bool, optional
71
+ minus_log : bool
66
72
  Apply negative log to the normalised data.
67
- nonnegativity : bool, optional
73
+ nonnegativity : bool
68
74
  Remove negative values in the normalised data.
69
- remove_nans : bool, optional
75
+ remove_nans : bool
70
76
  Remove NaN and Inf values in the normalised data.
71
77
 
72
78
  Returns
@@ -82,6 +88,9 @@ def normalize(
82
88
  mean(darks, axis=0, dtype=float32, out=dark0)
83
89
  mean(flats, axis=0, dtype=float32, out=flat0)
84
90
 
91
+ dark0 *= darks_multiplier
92
+ flat0 *= flats_multiplier
93
+
85
94
  kernel_name = "normalisation"
86
95
  kernel = r"""
87
96
  float denom = float(flats) - float(darks);
@@ -58,7 +58,7 @@ def remove_stripe_based_sorting(
58
58
  ) -> Union[cp.ndarray, np.ndarray]:
59
59
  """
60
60
  Remove full and partial stripe artifacts from sinogram using Nghia Vo's
61
- approach, see :cite:`vo2018superior`. This algorithm works particularly
61
+ approach, see :ref:`method_remove_stripe_based_sorting` and :cite:`vo2018superior`. This algorithm works particularly
62
62
  well for removing partial stripes.
63
63
 
64
64
  Steps of the algorithm: 1. Sort each column of the sinogram by its grayscale values.
@@ -119,7 +119,7 @@ def remove_stripe_ti(
119
119
  ) -> Union[cp.ndarray, np.ndarray]:
120
120
  """
121
121
  Removes stripes with the method of V. Titarenko (TomoCuPy implementation).
122
- See :cite:`titarenko2010analytical`.
122
+ See :ref:`method_remove_stripe_ti` and :cite:`titarenko2010analytical`.
123
123
 
124
124
  Parameters
125
125
  ----------
@@ -179,7 +179,7 @@ def remove_all_stripe(
179
179
  ) -> cp.ndarray:
180
180
  """
181
181
  Remove all types of stripe artifacts from sinogram using Nghia Vo's
182
- approach, see :cite:`vo2018superior` (combination of algorithm 3,4,5, and 6).
182
+ approach, see :ref:`method_remove_all_stripe` and :cite:`vo2018superior` (combination of algorithm 3,4,5, and 6).
183
183
 
184
184
  Parameters
185
185
  ----------
@@ -205,40 +205,12 @@ def remove_all_stripe(
205
205
  for m in range(data.shape[1]):
206
206
  sino = data[:, m, :]
207
207
  sino = _rs_dead(sino, snr, la_size, matindex)
208
- sino = _rs_sort2(sino, sm_size, matindex, dim)
208
+ sino = _rs_sort(sino, sm_size, dim)
209
209
  sino = cp.nan_to_num(sino)
210
210
  data[:, m, :] = sino
211
211
  return data
212
212
 
213
213
 
214
- def _rs_sort2(sinogram, size, matindex, dim):
215
- """
216
- Remove stripes using the sorting technique.
217
- """
218
- sinogram = cp.transpose(sinogram)
219
- matcomb = cp.asarray(cp.dstack((matindex, sinogram)))
220
-
221
- # matsort = cp.asarray([row[row[:, 1].argsort()] for row in matcomb])
222
- ids = cp.argsort(matcomb[:, :, 1], axis=1)
223
- matsort = matcomb.copy()
224
- matsort[:, :, 0] = cp.take_along_axis(matsort[:, :, 0], ids, axis=1)
225
- matsort[:, :, 1] = cp.take_along_axis(matsort[:, :, 1], ids, axis=1)
226
- if dim == 1:
227
- matsort[:, :, 1] = median_filter(matsort[:, :, 1], (size, 1))
228
- else:
229
- matsort[:, :, 1] = median_filter(matsort[:, :, 1], (size, size))
230
-
231
- # matsortback = cp.asarray([row[row[:, 0].argsort()] for row in matsort])
232
-
233
- ids = cp.argsort(matsort[:, :, 0], axis=1)
234
- matsortback = matsort.copy()
235
- matsortback[:, :, 0] = cp.take_along_axis(matsortback[:, :, 0], ids, axis=1)
236
- matsortback[:, :, 1] = cp.take_along_axis(matsortback[:, :, 1], ids, axis=1)
237
-
238
- sino_corrected = matsortback[:, :, 1]
239
- return cp.transpose(sino_corrected)
240
-
241
-
242
214
  def _mpolyfit(x, y):
243
215
  n = len(x)
244
216
  x_mean = cp.mean(x)
@@ -261,8 +233,6 @@ def _detect_stripe(listdata, snr):
261
233
  listsorted = cp.sort(listdata)[::-1]
262
234
  xlist = cp.arange(0, numdata, 1.0)
263
235
  ndrop = cp.int16(0.25 * numdata)
264
- # (_slope, _intercept) = cp.polyfit(xlist[ndrop:-ndrop - 1],
265
- # listsorted[ndrop:-ndrop - 1], 1)
266
236
  (_slope, _intercept) = _mpolyfit(
267
237
  xlist[ndrop : -ndrop - 1], listsorted[ndrop : -ndrop - 1]
268
238
  )
@@ -293,11 +263,6 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
293
263
  sinosmooth = median_filter(sinosort, (1, size))
294
264
  list1 = cp.mean(sinosort[ndrop : nrow - ndrop], axis=0)
295
265
  list2 = cp.mean(sinosmooth[ndrop : nrow - ndrop], axis=0)
296
- # listfact = cp.divide(list1,
297
- # list2,
298
- # out=cp.ones_like(list1),
299
- # where=list2 != 0)
300
-
301
266
  listfact = list1 / list2
302
267
 
303
268
  # Locate stripes
@@ -310,14 +275,12 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
310
275
  sinogram1 = cp.transpose(sinogram)
311
276
  matcombine = cp.asarray(cp.dstack((matindex, sinogram1)))
312
277
 
313
- # matsort = cp.asarray([row[row[:, 1].argsort()] for row in matcombine])
314
278
  ids = cp.argsort(matcombine[:, :, 1], axis=1)
315
279
  matsort = matcombine.copy()
316
280
  matsort[:, :, 0] = cp.take_along_axis(matsort[:, :, 0], ids, axis=1)
317
281
  matsort[:, :, 1] = cp.take_along_axis(matsort[:, :, 1], ids, axis=1)
318
282
 
319
283
  matsort[:, :, 1] = cp.transpose(sinosmooth)
320
- # matsortback = cp.asarray([row[row[:, 0].argsort()] for row in matsort])
321
284
  ids = cp.argsort(matsort[:, :, 0], axis=1)
322
285
  matsortback = matsort.copy()
323
286
  matsortback[:, :, 0] = cp.take_along_axis(matsortback[:, :, 0], ids, axis=1)
@@ -330,12 +293,9 @@ def _rs_large(sinogram, snr, size, matindex, drop_ratio=0.1, norm=True):
330
293
 
331
294
 
332
295
  def _rs_dead(sinogram, snr, size, matindex, norm=True):
333
- """
334
- Remove unresponsive and fluctuating stripes.
335
- """
296
+ """remove unresponsive and fluctuating stripes"""
336
297
  sinogram = cp.copy(sinogram) # Make it mutable
337
298
  (nrow, _) = sinogram.shape
338
- # sinosmooth = cp.apply_along_axis(uniform_filter1d, 0, sinogram, 10)
339
299
  sinosmooth = uniform_filter1d(sinogram, 10, axis=0)
340
300
 
341
301
  listdiff = cp.sum(cp.abs(sinogram - sinosmooth), axis=0)
@@ -344,22 +304,22 @@ def _rs_dead(sinogram, snr, size, matindex, norm=True):
344
304
  listfact = listdiff / listdiffbck
345
305
 
346
306
  listmask = _detect_stripe(listfact, snr)
307
+ del listfact
347
308
  listmask = binary_dilation(listmask, iterations=1).astype(listmask.dtype)
348
309
  listmask[0:2] = 0.0
349
310
  listmask[-2:] = 0.0
350
- listx = cp.where(listmask < 1.0)[0]
351
- listy = cp.arange(nrow)
352
- matz = sinogram[:, listx]
353
311
 
312
+ listx = cp.where(listmask < 1.0)[0]
354
313
  listxmiss = cp.where(listmask > 0.0)[0]
314
+ del listmask
355
315
 
356
- # finter = interpolate.interp2d(listx.get(), listy.get(), matz.get(), kind='linear')
357
316
  if len(listxmiss) > 0:
358
- # sinogram_c[:, listxmiss.get()] = finter(listxmiss.get(), listy.get())
359
317
  ids = cp.searchsorted(listx, listxmiss)
360
- sinogram[:, listxmiss] = matz[:, ids - 1] + (listxmiss - listx[ids - 1]) * (
361
- matz[:, ids] - matz[:, ids - 1]
362
- ) / (listx[ids] - listx[ids - 1])
318
+ weights = (listxmiss - listx[ids - 1]) / (listx[ids] - listx[ids - 1])
319
+ # direct interpolation without making an extra copy
320
+ sinogram[:, listxmiss] = sinogram[:, listx[ids - 1]] + weights * (
321
+ sinogram[:, listx[ids]] - sinogram[:, listx[ids - 1]]
322
+ )
363
323
 
364
324
  # Remove residual stripes
365
325
  if norm is True:
@@ -455,7 +415,7 @@ def raven_filter(
455
415
  # Removing padding
456
416
  data = data[pad_y : height - pad_y, :, pad_x : width - pad_x].real
457
417
 
458
- return data
418
+ return cp.require(data, requirements="C")
459
419
 
460
420
 
461
421
  def _create_matindex(nrow, ncol):
@@ -28,6 +28,8 @@ cupy_run = cupywrapper.cupy_run
28
28
 
29
29
  from unittest.mock import Mock
30
30
 
31
+ from tomobar.methodsDIR import RecToolsDIR
32
+
31
33
  if cupy_run:
32
34
  from tomobar.methodsDIR_CuPy import RecToolsDIRCuPy
33
35
  from tomobar.methodsIR_CuPy import RecToolsIRCuPy
@@ -40,23 +42,104 @@ from typing import Optional, Type
40
42
 
41
43
 
42
44
  __all__ = [
43
- "FBP",
44
- "LPRec",
45
- "SIRT",
46
- "CGLS",
45
+ "FBP2d_astra",
46
+ "FBP3d_tomobar",
47
+ "LPRec3d_tomobar",
48
+ "SIRT3d_tomobar",
49
+ "CGLS3d_tomobar",
47
50
  ]
48
51
 
49
52
  input_data_axis_labels = ["angles", "detY", "detX"] # set the labels of the input data
50
53
 
51
54
 
55
+ ## %%%%%%%%%%%%%%%%%%%%%%% FBP2d_astra reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
56
+ def FBP2d_astra(
57
+ data: np.ndarray,
58
+ angles: np.ndarray,
59
+ center: Optional[float] = None,
60
+ filter_type: str = "ram-lak",
61
+ filter_parameter: Optional[float] = None,
62
+ filter_d: Optional[float] = None,
63
+ recon_size: Optional[int] = None,
64
+ recon_mask_radius: float = 0.95,
65
+ neglog: bool = False,
66
+ gpu_id: int = 0,
67
+ ) -> np.ndarray:
68
+ """
69
+ Perform Filtered Backprojection (FBP) reconstruction slice-by-slice (2d) using ASTRA toolbox :cite:`van2016fast` and
70
+ ToMoBAR :cite:`kazantsev2020tomographic` wrappers.
71
+ This is a 2D recon using ASTRA's API for the FBP method, see for more parameters ASTRA's documentation here:
72
+ https://astra-toolbox.com/docs/algs/FBP_CUDA.html.
73
+
74
+ Parameters`
75
+ ----------
76
+ data : np.ndarray
77
+ Projection data as a 3d numpy array.
78
+ angles : np.ndarray
79
+ An array of angles given in radians.
80
+ center : float, optional
81
+ The center of rotation (CoR).
82
+ filter_type: str
83
+ Type of projection filter, see ASTRA's API for all available options for filters.
84
+ filter_parameter: float, optional
85
+ Parameter value for the 'tukey', 'gaussian', 'blackman' and 'kaiser' filter types.
86
+ filter_d: float, optional
87
+ D parameter value for 'shepp-logan', 'cosine', 'hamming' and 'hann' filter types.
88
+ recon_size : int, optional
89
+ The [recon_size, recon_size] shape of the reconstructed slice in pixels.
90
+ By default (None), the reconstructed size will be the dimension of the horizontal detector.
91
+ recon_mask_radius: float
92
+ The radius of the circular mask that applies to the reconstructed slice in order to crop
93
+ out some undesirable artifacts. The values outside the given diameter will be set to zero.
94
+ It is recommended to keep the value in the range [0.7-1.0].
95
+ neglog: bool
96
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
97
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
98
+ gpu_id : int
99
+ A GPU device index to perform operation on.
100
+
101
+ Returns
102
+ -------
103
+ np.ndarray
104
+ The FBP reconstructed volume as a numpy array.
105
+ """
106
+ data_shape = np.shape(data)
107
+ if recon_size is None:
108
+ recon_size = data_shape[2]
109
+
110
+ RecTools = _instantiate_direct_recon2d_class(
111
+ data, angles, center, recon_size, gpu_id
112
+ )
113
+
114
+ detY_size = data_shape[1]
115
+ reconstruction = np.empty(
116
+ (recon_size, detY_size, recon_size), dtype=np.float32(), order="C"
117
+ )
118
+ _take_neg_log_np(data) if neglog else data
119
+
120
+ # loop over detY slices
121
+ for slice_index in range(0, detY_size):
122
+ reconstruction[:, slice_index, :] = np.flipud(
123
+ RecTools.FBP(
124
+ data[:, slice_index, :],
125
+ filter_type=filter_type,
126
+ filter_parameter=filter_parameter,
127
+ filter_d=filter_d,
128
+ recon_mask_radius=recon_mask_radius,
129
+ )
130
+ )
131
+ return reconstruction
132
+
133
+
52
134
  ## %%%%%%%%%%%%%%%%%%%%%%% FBP reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
53
- def FBP(
135
+ def FBP3d_tomobar(
54
136
  data: cp.ndarray,
55
137
  angles: np.ndarray,
56
138
  center: Optional[float] = None,
57
- filter_freq_cutoff: Optional[float] = 0.35,
139
+ filter_freq_cutoff: float = 0.35,
58
140
  recon_size: Optional[int] = None,
59
141
  recon_mask_radius: Optional[float] = 0.95,
142
+ neglog: bool = False,
60
143
  gpu_id: int = 0,
61
144
  ) -> cp.ndarray:
62
145
  """
@@ -67,7 +150,7 @@ def FBP(
67
150
  Parameters
68
151
  ----------
69
152
  data : cp.ndarray
70
- Projection data as a CuPy array.
153
+ Projection data as a 3d CuPy array.
71
154
  angles : np.ndarray
72
155
  An array of angles given in radians.
73
156
  center : float, optional
@@ -77,24 +160,27 @@ def FBP(
77
160
  recon_size : int, optional
78
161
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
79
162
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
80
- recon_mask_radius: float
163
+ recon_mask_radius: float, optional
81
164
  The radius of the circular mask that applies to the reconstructed slice in order to crop
82
165
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
83
166
  It is recommended to keep the value in the range [0.7-1.0].
167
+ neglog: bool
168
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
169
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
84
170
  gpu_id : int
85
171
  A GPU device index to perform operation on.
86
172
 
87
173
  Returns
88
174
  -------
89
175
  cp.ndarray
90
- The FBP reconstructed volume as a CuPy array.
176
+ FBP reconstructed volume as a CuPy array.
91
177
  """
92
178
  RecToolsCP = _instantiate_direct_recon_class(
93
179
  data, angles, center, recon_size, gpu_id
94
180
  )
95
181
 
96
182
  reconstruction = RecToolsCP.FBP(
97
- data,
183
+ _take_neg_log(data) if neglog else data,
98
184
  cutoff_freq=filter_freq_cutoff,
99
185
  recon_mask_radius=recon_mask_radius,
100
186
  data_axes_labels_order=input_data_axis_labels,
@@ -104,12 +190,13 @@ def FBP(
104
190
 
105
191
 
106
192
  ## %%%%%%%%%%%%%%%%%%%%%%% LPRec %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
107
- def LPRec(
193
+ def LPRec3d_tomobar(
108
194
  data: cp.ndarray,
109
195
  angles: np.ndarray,
110
196
  center: Optional[float] = None,
111
197
  recon_size: Optional[int] = None,
112
198
  recon_mask_radius: Optional[float] = 0.95,
199
+ neglog: bool = False,
113
200
  ) -> cp.ndarray:
114
201
  """
115
202
  Fourier direct inversion in 3D on unequally spaced (also called as Log-Polar) grids using
@@ -119,7 +206,7 @@ def LPRec(
119
206
  Parameters
120
207
  ----------
121
208
  data : cp.ndarray
122
- Projection data as a CuPy array.
209
+ Projection data as a 3d CuPy array.
123
210
  angles : np.ndarray
124
211
  An array of angles given in radians.
125
212
  center : float, optional
@@ -127,10 +214,13 @@ def LPRec(
127
214
  recon_size : int, optional
128
215
  The [recon_size, recon_size] shape of the reconstructed slice in pixels.
129
216
  By default (None), the reconstructed size will be the dimension of the horizontal detector.
130
- recon_mask_radius: float
217
+ recon_mask_radius: float, optional
131
218
  The radius of the circular mask that applies to the reconstructed slice in order to crop
132
219
  out some undesirable artifacts. The values outside the given diameter will be set to zero.
133
220
  It is recommended to keep the value in the range [0.7-1.0].
221
+ neglog: bool
222
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
223
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
134
224
 
135
225
  Returns
136
226
  -------
@@ -140,7 +230,7 @@ def LPRec(
140
230
  RecToolsCP = _instantiate_direct_recon_class(data, angles, center, recon_size, 0)
141
231
 
142
232
  reconstruction = RecToolsCP.FOURIER_INV(
143
- data,
233
+ _take_neg_log(data) if neglog else data,
144
234
  recon_mask_radius=recon_mask_radius,
145
235
  data_axes_labels_order=input_data_axis_labels,
146
236
  )
@@ -149,19 +239,21 @@ def LPRec(
149
239
 
150
240
 
151
241
  ## %%%%%%%%%%%%%%%%%%%%%%% SIRT reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
152
- def SIRT(
242
+ def SIRT3d_tomobar(
153
243
  data: cp.ndarray,
154
244
  angles: np.ndarray,
155
245
  center: Optional[float] = None,
156
246
  recon_size: Optional[int] = None,
157
247
  iterations: Optional[int] = 300,
158
248
  nonnegativity: Optional[bool] = True,
249
+ neglog: bool = False,
159
250
  gpu_id: int = 0,
160
251
  ) -> cp.ndarray:
161
252
  """
162
253
  Perform Simultaneous Iterative Recostruction Technique (SIRT) using ASTRA toolbox :cite:`van2016fast` and
163
254
  ToMoBAR :cite:`kazantsev2020tomographic` wrappers.
164
- This is 3D recon directly from a CuPy array while using ASTRA GPUlink capability.
255
+ This is 3D recon directly from a CuPy array while using ASTRA GPUlink capability to avoid host-device
256
+ transactions for projection and backprojection.
165
257
 
166
258
  Parameters
167
259
  ----------
@@ -178,6 +270,9 @@ def SIRT(
178
270
  The number of SIRT iterations.
179
271
  nonnegativity : bool, optional
180
272
  Impose nonnegativity constraint on reconstructed image.
273
+ neglog: bool
274
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
275
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
181
276
  gpu_id : int, optional
182
277
  A GPU device index to perform operation on.
183
278
 
@@ -187,11 +282,16 @@ def SIRT(
187
282
  The SIRT reconstructed volume as a CuPy array.
188
283
  """
189
284
  RecToolsCP = _instantiate_iterative_recon_class(
190
- data, angles, center, recon_size, gpu_id, datafidelity="LS"
285
+ data,
286
+ angles,
287
+ center,
288
+ recon_size,
289
+ gpu_id,
290
+ datafidelity="LS",
191
291
  )
192
292
 
193
293
  _data_ = {
194
- "projection_norm_data": data,
294
+ "projection_norm_data": _take_neg_log(data) if neglog else data,
195
295
  "data_axes_labels_order": input_data_axis_labels,
196
296
  } # data dictionary
197
297
  _algorithm_ = {
@@ -204,19 +304,21 @@ def SIRT(
204
304
 
205
305
 
206
306
  ## %%%%%%%%%%%%%%%%%%%%%%% CGLS reconstruction %%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
207
- def CGLS(
307
+ def CGLS3d_tomobar(
208
308
  data: cp.ndarray,
209
309
  angles: np.ndarray,
210
310
  center: Optional[float] = None,
211
311
  recon_size: Optional[int] = None,
212
312
  iterations: Optional[int] = 20,
213
313
  nonnegativity: Optional[bool] = True,
314
+ neglog: bool = False,
214
315
  gpu_id: int = 0,
215
316
  ) -> cp.ndarray:
216
317
  """
217
- Perform Congugate Gradient Least Squares (CGLS) using ASTRA toolbox :cite:`van2016fast` and
318
+ Perform Conjugate Gradient Least Squares (CGLS) using ASTRA toolbox :cite:`van2016fast` and
218
319
  ToMoBAR :cite:`kazantsev2020tomographic` wrappers.
219
- This is 3D recon directly from a CuPy array while using ASTRA GPUlink capability.
320
+ This is 3D recon directly from a CuPy array while using ASTRA GPUlink capability to avoid host-device
321
+ transactions for projection and backprojection.
220
322
 
221
323
  Parameters
222
324
  ----------
@@ -233,6 +335,9 @@ def CGLS(
233
335
  The number of CGLS iterations.
234
336
  nonnegativity : bool, optional
235
337
  Impose nonnegativity constraint on reconstructed image.
338
+ neglog: bool
339
+ Take negative logarithm on input data to convert to attenuation coefficient or a density of the scanned object. Defaults to False,
340
+ assuming that the negative log is taken either in normalisation procedure on with Paganin filter application.
236
341
  gpu_id : int, optional
237
342
  A GPU device index to perform operation on.
238
343
 
@@ -246,7 +351,7 @@ def CGLS(
246
351
  )
247
352
 
248
353
  _data_ = {
249
- "projection_norm_data": data,
354
+ "projection_norm_data": _take_neg_log(data) if neglog else data,
250
355
  "data_axes_labels_order": input_data_axis_labels,
251
356
  } # data dictionary
252
357
  _algorithm_ = {"iterations": iterations, "nonnegativity": nonnegativity}
@@ -292,6 +397,43 @@ def _instantiate_direct_recon_class(
292
397
  return RecToolsCP
293
398
 
294
399
 
400
+ ## %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% ##
401
+ def _instantiate_direct_recon2d_class(
402
+ data: np.ndarray,
403
+ angles: np.ndarray,
404
+ center: Optional[float] = None,
405
+ recon_size: Optional[int] = None,
406
+ gpu_id: int = 0,
407
+ ) -> Type:
408
+ """instantiate ToMoBAR's direct recon class for 2d reconstruction
409
+
410
+ Args:
411
+ data (cp.ndarray): data array
412
+ angles (np.ndarray): angles
413
+ center (Optional[float], optional): center of recon. Defaults to None.
414
+ recon_size (Optional[int], optional): recon_size. Defaults to None.
415
+ gpu_id (int, optional): gpu ID. Defaults to 0.
416
+
417
+ Returns:
418
+ Type[RecToolsDIR]: an instance of the direct recon class
419
+ """
420
+ if center is None:
421
+ center = data.shape[2] // 2 # making a crude guess
422
+ if recon_size is None:
423
+ recon_size = data.shape[2]
424
+ RecTools = RecToolsDIR(
425
+ DetectorsDimH=data.shape[2], # Horizontal detector dimension
426
+ DetectorsDimV=None, # 2d case
427
+ CenterRotOffset=data.shape[2] / 2
428
+ - center
429
+ - 0.5, # Center of Rotation scalar or a vector
430
+ AnglesVec=-angles, # A vector of projection angles in radians
431
+ ObjSize=recon_size, # Reconstructed object dimensions (scalar)
432
+ device_projector=gpu_id,
433
+ )
434
+ return RecTools
435
+
436
+
295
437
  def _instantiate_iterative_recon_class(
296
438
  data: cp.ndarray,
297
439
  angles: np.ndarray,
@@ -329,3 +471,21 @@ def _instantiate_iterative_recon_class(
329
471
  device_projector=gpu_id,
330
472
  )
331
473
  return RecToolsCP
474
+
475
+
476
+ def _take_neg_log(data: cp.ndarray) -> cp.ndarray:
477
+ """Taking negative log"""
478
+ data[data <= 0] = 1
479
+ data = -cp.log(data)
480
+ data[cp.isnan(data)] = 6.0
481
+ data[cp.isinf(data)] = 0
482
+ return data
483
+
484
+
485
+ def _take_neg_log_np(data: np.ndarray) -> np.ndarray:
486
+ """Taking negative log"""
487
+ data[data <= 0] = 1
488
+ data = -np.log(data)
489
+ data[np.isnan(data)] = 6.0
490
+ data[np.isinf(data)] = 0
491
+ return data
@@ -1,6 +1,6 @@
1
- Metadata-Version: 2.2
1
+ Metadata-Version: 2.4
2
2
  Name: httomolibgpu
3
- Version: 2.3.2
3
+ Version: 2.5
4
4
  Summary: Commonly used tomography data processing methods at DLS.
5
5
  Author-email: Daniil Kazantsev <daniil.kazantsev@diamond.ac.uk>, Yousef Moazzam <yousef.moazzam@diamond.ac.uk>, Naman Gera <naman.gera@diamond.ac.uk>
6
6
  License: BSD-3-Clause
@@ -19,6 +19,7 @@ Requires-Dist: scipy
19
19
  Requires-Dist: pillow
20
20
  Requires-Dist: scikit-image
21
21
  Requires-Dist: tomobar
22
+ Requires-Dist: ccpi-regularisation-cupy
22
23
  Provides-Extra: dev
23
24
  Requires-Dist: pytest; extra == "dev"
24
25
  Requires-Dist: pytest-cov; extra == "dev"
@@ -32,6 +33,7 @@ Requires-Dist: imageio; extra == "dev"
32
33
  Requires-Dist: h5py; extra == "dev"
33
34
  Requires-Dist: pre-commit; extra == "dev"
34
35
  Requires-Dist: pyfftw; extra == "dev"
36
+ Dynamic: license-file
35
37
 
36
38
  HTTomolibGPU is a library of GPU accelerated methods for tomography
37
39
  --------------------------------------------------------------------
@@ -49,19 +51,28 @@ Although **HTTomolibGPU** can be used as a stand-alone library, it has been spec
49
51
  its backend for data processing. HTTomo is a user interface (UI) written in Python for fast big tomographic data processing using
50
52
  MPI protocols or as well serially.
51
53
 
52
- Install HTTomolibGPU as a PyPi package
53
- =========================================================
54
+ Installation
55
+ ============
56
+
57
+ HTTomolibGPU is available on PyPI, so it can be installed into either a virtual environment or
58
+ a conda environment.
59
+
60
+ Virtual environment
61
+ ~~~~~~~~~~~~~~~~~~~
54
62
  .. code-block:: console
55
63
 
64
+ $ python -m venv httomolibgpu
65
+ $ source httomolibgpu/bin/activate
56
66
  $ pip install httomolibgpu
57
67
 
58
- Install HTTomolibGPU as a pre-built conda Python package
59
- =========================================================
68
+ Conda environment
69
+ ~~~~~~~~~~~~~~~~~
60
70
  .. code-block:: console
61
71
 
62
72
  $ conda create --name httomolibgpu # create a fresh conda environment
63
73
  $ conda activate httomolibgpu # activate the environment
64
- $ conda install -c httomo httomolibgpu -c conda-forge # for linux users
74
+ $ conda install -c conda-forge cupy==12.3.0 # for linux users
75
+ $ pip install httomolibgpu
65
76
 
66
77
  Setup the development environment:
67
78
  ==================================
@@ -69,14 +80,6 @@ Setup the development environment:
69
80
  .. code-block:: console
70
81
 
71
82
  $ git clone git@github.com:DiamondLightSource/httomolibgpu.git # clone the repo
72
- $ conda env create --name httomolibgpu --file conda/environment.yml # install dependencies
83
+ $ conda env create --name httomolibgpu -c conda-forge cupy==12.3.0 # install dependencies
73
84
  $ conda activate httomolibgpu # activate the environment
74
- $ pip install -e .[dev] # editable/development mode
75
-
76
- Build HTTomolibGPU as a conda Python package
77
- ============================================
78
-
79
- .. code-block:: console
80
-
81
- $ conda build conda/recipe/ -c conda-forge -c httomo
82
-
85
+ $ pip install -e ./httomolibgpu[dev] # editable/development mode
@@ -1,4 +1,4 @@
1
- httomolibgpu/__init__.py,sha256=uGzTIzYjxYoXpaMoy60f_5niLS_FF_ayuF_s0pihlps,741
1
+ httomolibgpu/__init__.py,sha256=nLXdPpzb6smp80bCciANQHJq0o-mmMJkuga-PitGmzw,820
2
2
  httomolibgpu/cupywrapper.py,sha256=6ITGJ2Jw5I5kVmKEL5LlsnLRniEqqBLsHiAjvLtk0Xk,493
3
3
  httomolibgpu/cuda_kernels/__init__.py,sha256=VQNMaGcVDwiE-C64FfLtubHpLriLG0Y3_QnjHBSHrN0,884
4
4
  httomolibgpu/cuda_kernels/calc_metrics.cu,sha256=oV7ZPcwjWafmZjbNsUkBYPvOViJ_nX3zBoOAuPCmIrA,11335
@@ -10,18 +10,18 @@ httomolibgpu/cuda_kernels/raven_filter.cu,sha256=KX2TM_9tMpvoGCHezDNWYABCnv2cT9m
10
10
  httomolibgpu/misc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
11
11
  httomolibgpu/misc/corr.py,sha256=0_jMX_Pc962aGZlHZW2cp7KJFJiAY065f45S9ruWMTY,4440
12
12
  httomolibgpu/misc/denoise.py,sha256=-5QKP_cJQTzaVydQPlBOFfkdoEWvy5mBqCbjp7S4vaQ,4546
13
- httomolibgpu/misc/morph.py,sha256=sb5sm3DjY0NIghvzyZDAIVr2QvWDTeK-KUwShuXGlEE,7224
13
+ httomolibgpu/misc/morph.py,sha256=QZTutZ28YrvTJ9wyBmgEtU9cW4bcQH6CnkETEMcaVdA,7223
14
14
  httomolibgpu/misc/rescale.py,sha256=prJa6IvdoB4O9FuonglfPsjkV_Y3xKajSFcsX8gO_Gs,4982
15
15
  httomolibgpu/prep/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
16
16
  httomolibgpu/prep/alignment.py,sha256=GVxnyioipmqsHb4s3mPQ8tKGoPIQMPftDrQxUO-HBuE,5491
17
- httomolibgpu/prep/normalize.py,sha256=V3SnqigoZaL7QB3BE8I4gcrL6wyDWgFk9rpZkoD9vxs,4270
17
+ httomolibgpu/prep/normalize.py,sha256=DV8CskBOPySJzydOaKOj2ytzPC7f73RS9rrqQ02acMU,4593
18
18
  httomolibgpu/prep/phase.py,sha256=vuKL3GkeJ0IG0ez8zG8t6aDoEWJNxnIdGJvaIF3kuyE,11823
19
- httomolibgpu/prep/stripe.py,sha256=xDWS02dKLyfYnKIeskB83aNZqZBiNXGvJhhXlgmBgRM,16071
19
+ httomolibgpu/prep/stripe.py,sha256=_xHcIYQ-e5tN3mxASvYb9khX5dM1PB1dBXyLn9re9AQ,14541
20
20
  httomolibgpu/recon/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
21
- httomolibgpu/recon/algorithm.py,sha256=gIBpCTo28L7rrQCve1aILEiEPuu_f88lnX9oIwyt_Yo,11990
21
+ httomolibgpu/recon/algorithm.py,sha256=g-IEcPsmyZ_OjSDTnlibx5j3duHh7t5LOLQyFeuEwpc,18576
22
22
  httomolibgpu/recon/rotation.py,sha256=zdi8I21hh8DGmij2pvncY35cioVz0ioauSsgaBPiKQs,26926
23
- httomolibgpu-2.3.2.dist-info/LICENSE,sha256=bXeLsgelPUUXw8HCIYiVC97Dpjhm2nB54m7TACdH8ng,48032
24
- httomolibgpu-2.3.2.dist-info/METADATA,sha256=_ELLLqTKpR6CY1iakEk847K64isHzZg4R5nyzSBsWks,3402
25
- httomolibgpu-2.3.2.dist-info/WHEEL,sha256=In9FTNxeP60KnTkGw7wk6mJPYd_dQSjEZmXdBdMCI-8,91
26
- httomolibgpu-2.3.2.dist-info/top_level.txt,sha256=nV0Ty_YvSPVd1O6MNWuIplD0w1nwk5hT76YgBZ-bzUw,13
27
- httomolibgpu-2.3.2.dist-info/RECORD,,
23
+ httomolibgpu-2.5.dist-info/licenses/LICENSE,sha256=bXeLsgelPUUXw8HCIYiVC97Dpjhm2nB54m7TACdH8ng,48032
24
+ httomolibgpu-2.5.dist-info/METADATA,sha256=KSbY0l0HXRv41ht58KdaL0dMBrq2tCXfjezbTvH5r2Q,3399
25
+ httomolibgpu-2.5.dist-info/WHEEL,sha256=DnLRTWE75wApRYVsjgc6wsVswC54sMSJhAEd4xhDpBk,91
26
+ httomolibgpu-2.5.dist-info/top_level.txt,sha256=nV0Ty_YvSPVd1O6MNWuIplD0w1nwk5hT76YgBZ-bzUw,13
27
+ httomolibgpu-2.5.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (75.8.0)
2
+ Generator: setuptools (80.4.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5