nabu 2024.1.10__py3-none-any.whl → 2024.2.0rc1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (152) hide show
  1. nabu/__init__.py +1 -1
  2. nabu/app/bootstrap.py +2 -3
  3. nabu/app/cast_volume.py +4 -2
  4. nabu/app/cli_configs.py +5 -0
  5. nabu/app/composite_cor.py +1 -1
  6. nabu/app/create_distortion_map_from_poly.py +5 -6
  7. nabu/app/diag_to_pix.py +7 -19
  8. nabu/app/diag_to_rot.py +14 -29
  9. nabu/app/double_flatfield.py +32 -44
  10. nabu/app/parse_reconstruction_log.py +3 -0
  11. nabu/app/reconstruct.py +53 -15
  12. nabu/app/reconstruct_helical.py +2 -2
  13. nabu/app/stitching.py +27 -13
  14. nabu/app/tests/__init__.py +0 -0
  15. nabu/app/tests/test_reduce_dark_flat.py +4 -1
  16. nabu/cuda/kernel.py +11 -2
  17. nabu/cuda/processing.py +2 -2
  18. nabu/cuda/src/cone.cu +77 -0
  19. nabu/cuda/src/hierarchical_backproj.cu +271 -0
  20. nabu/cuda/utils.py +0 -6
  21. nabu/estimation/alignment.py +5 -19
  22. nabu/estimation/cor.py +176 -597
  23. nabu/estimation/cor_sino.py +353 -25
  24. nabu/estimation/focus.py +63 -11
  25. nabu/estimation/tests/test_cor.py +124 -58
  26. nabu/estimation/tests/test_focus.py +6 -6
  27. nabu/estimation/tilt.py +2 -1
  28. nabu/estimation/utils.py +5 -33
  29. nabu/io/__init__.py +1 -1
  30. nabu/io/cast_volume.py +1 -1
  31. nabu/io/reader.py +416 -21
  32. nabu/io/tests/test_readers.py +422 -0
  33. nabu/io/tests/test_writers.py +1 -102
  34. nabu/io/writer.py +4 -433
  35. nabu/opencl/kernel.py +14 -3
  36. nabu/opencl/processing.py +8 -0
  37. nabu/pipeline/config_validators.py +5 -2
  38. nabu/pipeline/datadump.py +12 -5
  39. nabu/pipeline/estimators.py +125 -187
  40. nabu/pipeline/fullfield/chunked.py +162 -90
  41. nabu/pipeline/fullfield/chunked_cuda.py +7 -3
  42. nabu/pipeline/fullfield/computations.py +2 -7
  43. nabu/pipeline/fullfield/dataset_validator.py +0 -4
  44. nabu/pipeline/fullfield/nabu_config.py +37 -13
  45. nabu/pipeline/fullfield/processconfig.py +22 -13
  46. nabu/pipeline/fullfield/reconstruction.py +13 -9
  47. nabu/pipeline/helical/helical_chunked_regridded.py +1 -1
  48. nabu/pipeline/helical/helical_chunked_regridded_cuda.py +1 -0
  49. nabu/pipeline/helical/helical_reconstruction.py +1 -1
  50. nabu/pipeline/params.py +21 -1
  51. nabu/pipeline/processconfig.py +1 -12
  52. nabu/pipeline/reader.py +146 -0
  53. nabu/pipeline/tests/test_estimators.py +40 -72
  54. nabu/pipeline/utils.py +4 -2
  55. nabu/pipeline/writer.py +3 -2
  56. nabu/preproc/ccd_cuda.py +1 -1
  57. nabu/preproc/ctf.py +14 -7
  58. nabu/preproc/ctf_cuda.py +2 -3
  59. nabu/preproc/double_flatfield.py +5 -12
  60. nabu/preproc/double_flatfield_cuda.py +2 -2
  61. nabu/preproc/flatfield.py +5 -1
  62. nabu/preproc/flatfield_cuda.py +5 -1
  63. nabu/preproc/phase.py +24 -73
  64. nabu/preproc/phase_cuda.py +5 -8
  65. nabu/preproc/tests/test_ctf.py +11 -7
  66. nabu/preproc/tests/test_flatfield.py +67 -122
  67. nabu/preproc/tests/test_paganin.py +54 -30
  68. nabu/processing/azim.py +206 -0
  69. nabu/processing/convolution_cuda.py +1 -1
  70. nabu/processing/fft_cuda.py +15 -17
  71. nabu/processing/histogram.py +2 -0
  72. nabu/processing/histogram_cuda.py +2 -1
  73. nabu/processing/kernel_base.py +3 -0
  74. nabu/processing/muladd_cuda.py +1 -0
  75. nabu/processing/padding_opencl.py +1 -1
  76. nabu/processing/roll_opencl.py +1 -0
  77. nabu/processing/rotation_cuda.py +2 -2
  78. nabu/processing/tests/test_fft.py +17 -10
  79. nabu/processing/unsharp_cuda.py +1 -1
  80. nabu/reconstruction/cone.py +104 -40
  81. nabu/reconstruction/fbp.py +3 -0
  82. nabu/reconstruction/fbp_base.py +7 -2
  83. nabu/reconstruction/filtering.py +20 -7
  84. nabu/reconstruction/filtering_cuda.py +7 -1
  85. nabu/reconstruction/hbp.py +424 -0
  86. nabu/reconstruction/mlem.py +99 -0
  87. nabu/reconstruction/reconstructor.py +2 -0
  88. nabu/reconstruction/rings_cuda.py +19 -19
  89. nabu/reconstruction/sinogram_cuda.py +1 -0
  90. nabu/reconstruction/sinogram_opencl.py +3 -1
  91. nabu/reconstruction/tests/test_cone.py +10 -5
  92. nabu/reconstruction/tests/test_deringer.py +7 -6
  93. nabu/reconstruction/tests/test_fbp.py +124 -10
  94. nabu/reconstruction/tests/test_filtering.py +13 -11
  95. nabu/reconstruction/tests/test_halftomo.py +30 -4
  96. nabu/reconstruction/tests/test_mlem.py +91 -0
  97. nabu/reconstruction/tests/test_reconstructor.py +8 -3
  98. nabu/resources/dataset_analyzer.py +130 -85
  99. nabu/resources/gpu.py +1 -0
  100. nabu/resources/nxflatfield.py +134 -125
  101. nabu/resources/templates/id16a_fluo.conf +42 -0
  102. nabu/resources/tests/test_extract.py +10 -0
  103. nabu/resources/tests/test_nxflatfield.py +2 -2
  104. nabu/stitching/alignment.py +80 -24
  105. nabu/stitching/config.py +105 -68
  106. nabu/stitching/definitions.py +1 -0
  107. nabu/stitching/frame_composition.py +68 -60
  108. nabu/stitching/overlap.py +91 -51
  109. nabu/stitching/single_axis_stitching.py +32 -0
  110. nabu/stitching/slurm_utils.py +6 -6
  111. nabu/stitching/stitcher/__init__.py +0 -0
  112. nabu/stitching/stitcher/base.py +124 -0
  113. nabu/stitching/stitcher/dumper/__init__.py +3 -0
  114. nabu/stitching/stitcher/dumper/base.py +94 -0
  115. nabu/stitching/stitcher/dumper/postprocessing.py +356 -0
  116. nabu/stitching/stitcher/dumper/preprocessing.py +60 -0
  117. nabu/stitching/stitcher/post_processing.py +555 -0
  118. nabu/stitching/stitcher/pre_processing.py +1068 -0
  119. nabu/stitching/stitcher/single_axis.py +484 -0
  120. nabu/stitching/stitcher/stitcher.py +0 -0
  121. nabu/stitching/stitcher/y_stitcher.py +13 -0
  122. nabu/stitching/stitcher/z_stitcher.py +45 -0
  123. nabu/stitching/stitcher_2D.py +278 -0
  124. nabu/stitching/tests/test_config.py +12 -37
  125. nabu/stitching/tests/test_frame_composition.py +33 -59
  126. nabu/stitching/tests/test_overlap.py +149 -7
  127. nabu/stitching/tests/test_utils.py +1 -1
  128. nabu/stitching/tests/test_y_preprocessing_stitching.py +132 -0
  129. nabu/stitching/tests/{test_z_stitching.py → test_z_postprocessing_stitching.py} +167 -561
  130. nabu/stitching/tests/test_z_preprocessing_stitching.py +431 -0
  131. nabu/stitching/utils/__init__.py +1 -0
  132. nabu/stitching/utils/post_processing.py +281 -0
  133. nabu/stitching/utils/tests/test_post-processing.py +21 -0
  134. nabu/stitching/{utils.py → utils/utils.py} +79 -52
  135. nabu/stitching/y_stitching.py +27 -0
  136. nabu/stitching/z_stitching.py +32 -2281
  137. nabu/testutils.py +1 -152
  138. nabu/thirdparty/tomocupy_remove_stripe.py +43 -9
  139. nabu/utils.py +158 -61
  140. {nabu-2024.1.10.dist-info → nabu-2024.2.0rc1.dist-info}/METADATA +24 -17
  141. {nabu-2024.1.10.dist-info → nabu-2024.2.0rc1.dist-info}/RECORD +145 -121
  142. {nabu-2024.1.10.dist-info → nabu-2024.2.0rc1.dist-info}/WHEEL +1 -1
  143. nabu/io/tiffwriter_zmm.py +0 -99
  144. nabu/pipeline/fallback_utils.py +0 -149
  145. nabu/pipeline/helical/tests/test_accumulator.py +0 -158
  146. nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -355
  147. nabu/pipeline/helical/tests/test_strategy.py +0 -61
  148. nabu/pipeline/helical/utils.py +0 -51
  149. nabu/pipeline/tests/test_chunk_reader.py +0 -74
  150. {nabu-2024.1.10.dist-info → nabu-2024.2.0rc1.dist-info}/LICENSE +0 -0
  151. {nabu-2024.1.10.dist-info → nabu-2024.2.0rc1.dist-info}/entry_points.txt +0 -0
  152. {nabu-2024.1.10.dist-info → nabu-2024.2.0rc1.dist-info}/top_level.txt +0 -0
@@ -1,30 +1,16 @@
1
- """
2
- This module provides global definitions and methods to compute COR in extrem
3
- Half Acquisition mode
4
- """
5
-
6
- __authors__ = ["C. Nemoz", "H.Payno"]
7
- __license__ = "MIT"
8
- __date__ = "13/04/2021"
9
-
10
1
  import numpy as np
11
2
  from scipy.signal import convolve2d
3
+ from scipy.fft import rfft
4
+
5
+ from ..utils import deprecation_warning, is_scalar
12
6
  from ..resources.logger import LoggerOrPrint
13
7
 
8
+ try:
9
+ from algotom.prep.calculation import find_center_vo, find_center_360
14
10
 
15
- def schift(mat, val):
16
- ker = np.zeros((3, 3))
17
- s = 1.0
18
- if val < 0:
19
- s = -1.0
20
- val = s * val
21
- ker[1, 1] = 1 - val
22
- if s > 0:
23
- ker[1, 2] = val
24
- else:
25
- ker[1, 0] = val
26
- mat = convolve2d(mat, ker, mode="same")
27
- return mat
11
+ __have_algotom__ = True
12
+ except ImportError:
13
+ __have_algotom__ = False
28
14
 
29
15
 
30
16
  class SinoCor:
@@ -57,6 +43,21 @@ class SinoCor:
57
43
 
58
44
  self.window_width = round(self.sx / 5)
59
45
 
46
+ @staticmethod
47
+ def schift(mat, val):
48
+ ker = np.zeros((3, 3))
49
+ s = 1.0
50
+ if val < 0:
51
+ s = -1.0
52
+ val = s * val
53
+ ker[1, 1] = 1 - val
54
+ if s > 0:
55
+ ker[1, 2] = val
56
+ else:
57
+ ker[1, 0] = val
58
+ mat = convolve2d(mat, ker, mode="same")
59
+ return mat
60
+
60
61
  def overlap(self, side="right", window_width=None):
61
62
  """
62
63
  Compute COR by minimizing difference of circulating ROI
@@ -151,7 +152,7 @@ class SinoCor:
151
152
  x0 = xc1 + pix
152
153
  for isf in isfr:
153
154
  if isf != 0:
154
- ims = schift(self.data1[:, x0 : x0 + xwin].copy(), -p_sign * isf)
155
+ ims = self.schift(self.data1[:, x0 : x0 + xwin].copy(), -p_sign * isf)
155
156
  else:
156
157
  ims = self.data1[:, x0 : x0 + xwin]
157
158
 
@@ -175,9 +176,336 @@ class SinoCorInterface:
175
176
  def __init__(self, logger=None, **kwargs):
176
177
  self._logger = logger
177
178
 
178
- def find_shift(self, img_1, img_2, side="right", window_width=None, neighborhood=7, shift_value=0.1, **kwargs):
179
+ def find_shift(
180
+ self,
181
+ img_1,
182
+ img_2,
183
+ side="right",
184
+ window_width=None,
185
+ neighborhood=7,
186
+ shift_value=0.1,
187
+ return_relative_to_middle=None,
188
+ **kwargs,
189
+ ):
190
+
191
+ # COMPAT.
192
+ if return_relative_to_middle is None:
193
+ deprecation_warning(
194
+ "The current default behavior is to return the shift relative the the middle of the image. In a future release, this function will return the shift relative to the left-most pixel. To keep the current behavior, please use 'return_relative_to_middle=True'.",
195
+ do_print=True,
196
+ func_name="CenterOfRotationCoarseToFine.find_shift",
197
+ )
198
+ return_relative_to_middle = True # the kwarg above will be False by default in a future release
199
+ # ---
200
+
179
201
  cor_finder = SinoCor(img_1, img_2, logger=self._logger)
180
202
  cor_finder.estimate_cor_coarse(side=side, window_width=window_width)
181
203
  cor = cor_finder.estimate_cor_fine(neighborhood=neighborhood, shift_value=shift_value)
182
204
  # offset will be added later - keep compatibility with result from AlignmentBase.find_shift()
183
- return cor - img_1.shape[1] / 2
205
+ if return_relative_to_middle:
206
+ return cor - (img_1.shape[1] - 1) / 2
207
+ else:
208
+ return cor
209
+
210
+
211
+ class CenterOfRotationFourierAngles:
212
+ """This CoR estimation algo is proposed by V. Valls (BCU). It is based on the Fourier
213
+ transform of the columns on the sinogram.
214
+ It requires an initial guesss of the CoR wich is retrieved from
215
+ dataset_info.dataset_scanner.x_rotation_axis_pixel_position. It is assumed in mm and pixel size in um.
216
+ Options are (for the moment) hard-coded in the SinoCORFinder.cor_finder.extra_options dict.
217
+ """
218
+
219
+ def __init__(self, *args, **kwargs):
220
+ pass
221
+
222
+ def _convert_from_fft_2_fftpack_format(self, f_signal, o_signal_length):
223
+ """
224
+ Converts a scipy.fft.rfft into the (legacy) scipy.fftpack.rfft format.
225
+ The fftpack.rfft returns a (roughly) twice as long array as fft.rfft as the latter returns an array
226
+ of complex numbers wheras the former returns an array with real and imag parts in consecutive
227
+ spots in the array.
228
+
229
+ Parameters
230
+ ----------
231
+ f_signal : array_like
232
+ The output of scipy.fft.rfft(signal)
233
+ o_signal_length : int
234
+ Size of the original signal (before FT).
235
+
236
+ Returns
237
+ -------
238
+ out
239
+ The rfft converted to the fftpack.rfft format (roughly twice as long).
240
+ """
241
+ out = np.zeros(o_signal_length, dtype=np.float32)
242
+ if o_signal_length % 2 == 0:
243
+ out[0] = f_signal[0].real
244
+ out[1::2] = f_signal[1:].real
245
+ out[2::2] = f_signal[1:-1].imag
246
+ else:
247
+ out[0] = f_signal[0].real
248
+ out[1::2] = f_signal[1:].real
249
+ out[2::2] = f_signal[1:].imag
250
+ return out
251
+
252
+ def _freq_radio(self, sinos, ifrom, ito):
253
+ size = (sinos.shape[0] + sinos.shape[0] % 2) // 2
254
+ fs = np.empty((size, sinos.shape[1]))
255
+ for i in range(ifrom, ito):
256
+ line = sinos[:, i]
257
+ f_signal = rfft(line)
258
+ f_signal = self._convert_from_fft_2_fftpack_format(f_signal, line.shape[0])
259
+ f = np.abs(f_signal[: (f_signal.size - 1) // 2 + 1])
260
+ f2 = np.abs(f_signal[(f_signal.size - 1) // 2 + 1 :][::-1])
261
+ if len(f) > len(f2):
262
+ f[1:] += f2
263
+ else:
264
+ f[0:] += f2
265
+ fs[:, i] = f
266
+ with np.errstate(divide="ignore", invalid="ignore", under="ignore"):
267
+ fs = np.log(fs)
268
+ return fs
269
+
270
+ def gaussian(self, p, x):
271
+ return p[3] + p[2] * np.exp(-((x - p[0]) ** 2) / (2 * p[1] ** 2))
272
+
273
+ def tukey(self, p, x):
274
+ pos, std, alpha, height, background = p
275
+ alpha = np.clip(alpha, 0, 1)
276
+ pi = np.pi
277
+ inv_alpha = 1 - alpha
278
+ width = std / (1 - alpha * 0.5)
279
+ xx = (np.abs(x - pos) - (width * 0.5 * inv_alpha)) / (width * 0.5 * alpha)
280
+ xx = np.clip(xx, 0, 1)
281
+ return (0.5 + np.cos(pi * xx) * 0.5) * height + background
282
+
283
+ def sinlet(self, p, x):
284
+ std = p[1] * 2.5
285
+ lin = np.maximum(0, std - np.abs(p[0] - x)) * 0.5 * np.pi / std
286
+ return p[3] + p[2] * np.sin(lin)
287
+
288
+ def _px(self, detector_width, abs_pos, near_width, near_std, crop_around_cor, near_step):
289
+ sym_range = None
290
+ if abs_pos is not None:
291
+ if crop_around_cor:
292
+ sym_range = int(abs_pos - near_std * 2), int(abs_pos + near_std * 2)
293
+
294
+ window = near_width
295
+ if sym_range is not None:
296
+ xx_from = max(window, sym_range[0])
297
+ xx_to = max(xx_from, min(detector_width - window, sym_range[1]))
298
+ if xx_from == xx_to:
299
+ sym_range = None
300
+ if sym_range is None:
301
+ xx_from = window
302
+ xx_to = detector_width - window
303
+
304
+ xx = np.arange(xx_from, xx_to, near_step)
305
+
306
+ return xx
307
+
308
+ def _symmetry_correlation(self, px, array, angles, window, shift_sino):
309
+ if shift_sino:
310
+ shift_index = np.argmin(np.abs(angles - np.pi)) - np.argmin(np.abs(angles - 0))
311
+ else:
312
+ shift_index = None
313
+ px_from = int(px[0])
314
+ px_to = int(np.ceil(px[-1]))
315
+ f_coef = np.empty(len(px))
316
+ f_array = self._freq_radio(array, px_from - window, px_to + window)
317
+ if shift_index is not None:
318
+ shift_array = np.empty(array.shape, dtype=array.dtype)
319
+ shift_array[0 : len(shift_array) - shift_index, :] = array[shift_index:, :]
320
+ shift_array[len(shift_array) - shift_index :, :] = array[:shift_index, :]
321
+ f_shift_array = self._freq_radio(shift_array, px_from - window, px_to + window)
322
+ else:
323
+ f_shift_array = f_array
324
+
325
+ for j, x in enumerate(px):
326
+ i = int(np.floor(x))
327
+ if x - i > 0.4: # TO DO : Specific to near_step = 0.5?
328
+ f_left = f_array[:, i - window : i]
329
+ f_right = f_shift_array[:, i + 1 : i + window + 1][:, ::-1]
330
+ else:
331
+ f_left = f_array[:, i - window : i]
332
+ f_right = f_shift_array[:, i : i + window][:, ::-1]
333
+ with np.errstate(divide="ignore", invalid="ignore"):
334
+ f_coef[j] = np.sum(np.abs(f_left - f_right))
335
+ return f_coef
336
+
337
+ def _cor_correlation(self, px, abs_pos, near_std, signal, near_weight, near_alpha):
338
+ if abs_pos is not None:
339
+ if signal == "sinlet":
340
+ coef = self.sinlet((abs_pos, near_std, -near_weight, 1), px)
341
+ elif signal == "gaussian":
342
+ coef = self.gaussian((abs_pos, near_std, -near_weight, 1), px)
343
+ elif signal == "tukey":
344
+ coef = self.tukey((abs_pos, near_std * 2, near_alpha, -near_weight, 1), px)
345
+ else:
346
+ raise ValueError("Shape unsupported")
347
+ else:
348
+ coef = np.ones_like(px)
349
+ return coef
350
+
351
+ def find_shift(
352
+ self,
353
+ sino,
354
+ angles=None,
355
+ side="center",
356
+ near_std=100,
357
+ near_width=20,
358
+ shift_sino=True,
359
+ crop_around_cor=False,
360
+ signal="tukey",
361
+ near_weight=0.1,
362
+ near_alpha=0.5,
363
+ near_step=0.5,
364
+ return_relative_to_middle=None,
365
+ ):
366
+ detector_width = sino.shape[1]
367
+
368
+ # COMPAT.
369
+ if return_relative_to_middle is None:
370
+ deprecation_warning(
371
+ "The current default behavior is to return the shift relative the the middle of the image. In a future release, this function will return the shift relative to the left-most pixel. To keep the current behavior, please use 'return_relative_to_middle=True'.",
372
+ do_print=True,
373
+ func_name="CenterOfRotationFourierAngles.find_shift",
374
+ )
375
+ return_relative_to_middle = True # the kwarg above will be False by default in a future release
376
+ # ---
377
+
378
+ if angles is None:
379
+ angles = np.linspace(0, 2 * np.pi, sino.shape[0], endpoint=True)
380
+ increment = np.abs(angles[0] - angles[1])
381
+ if np.abs(angles[0] - angles[-1]) < (360 - 0.5) * np.pi / 180 - increment:
382
+ raise ValueError("Not enough angles, estimator skipped")
383
+
384
+ if is_scalar(side):
385
+ abs_pos = side
386
+ # COMPAT.
387
+ elif side == "near":
388
+ deprecation_warning(
389
+ "side='near' is deprecated, please use side=<a scalar>", do_print=True, func_name="fourier_angles_near"
390
+ )
391
+ abs_pos = detector_width // 2
392
+ ##.
393
+ elif side == "center":
394
+ abs_pos = detector_width // 2
395
+ elif side == "left":
396
+ abs_pos = detector_width // 4
397
+ elif side == "right":
398
+ abs_pos = detector_width * 3 // 4
399
+ else:
400
+ raise ValueError(f"side '{side}' is not handled")
401
+
402
+ px = self._px(detector_width, abs_pos, near_width, near_std, crop_around_cor, near_step)
403
+
404
+ coef_f = self._symmetry_correlation(
405
+ px,
406
+ sino,
407
+ angles,
408
+ near_width,
409
+ shift_sino,
410
+ )
411
+ coef_p = self._cor_correlation(px, abs_pos, near_std, signal, near_weight, near_alpha)
412
+ coef = coef_f * coef_p
413
+
414
+ if len(px) > 0:
415
+ cor = px[np.argmin(coef)] - (detector_width - 1) / 2
416
+ else:
417
+ # raise ValueError ?
418
+ cor = None
419
+ if not (return_relative_to_middle):
420
+ cor += (detector_width - 1) / 2
421
+ return cor
422
+
423
+ __call__ = find_shift
424
+
425
+
426
+ class CenterOfRotationVo:
427
+ """
428
+ A wrapper around algotom 'find_center_vo' and 'find_center_360'.
429
+
430
+ Nghia T. Vo, Michael Drakopoulos, Robert C. Atwood, and Christina Reinhard,
431
+ "Reliable method for calculating the center of rotation in parallel-beam tomography,"
432
+ Opt. Express 22, 19078-19086 (2014)
433
+ """
434
+
435
+ default_extra_options = {}
436
+
437
+ def __init__(self, logger=None, verbose=False, extra_options=None):
438
+ if not (__have_algotom__):
439
+ raise ImportError("Need the 'algotom' package")
440
+ self.extra_options = self.default_extra_options.copy()
441
+ self.extra_options.update(extra_options or {})
442
+
443
+ def find_shift(
444
+ self,
445
+ sino,
446
+ halftomo=False,
447
+ is_360=False,
448
+ win_width=100,
449
+ side="center",
450
+ search_width_fraction=0.1,
451
+ step=0.25,
452
+ radius=4,
453
+ ratio=0.5,
454
+ dsp=True,
455
+ ncore=None,
456
+ hor_drop=None,
457
+ ver_drop=None,
458
+ denoise=True,
459
+ norm=True,
460
+ use_overlap=False,
461
+ return_relative_to_middle=None,
462
+ ):
463
+ # COMPAT.
464
+ if return_relative_to_middle is None:
465
+ deprecation_warning(
466
+ "The current default behavior is to return the shift relative the the middle of the image. In a future release, this function will return the shift relative to the left-most pixel. To keep the current behavior, please use 'return_relative_to_middle=True'.",
467
+ do_print=True,
468
+ func_name="CenterOfRotationVo.find_shift",
469
+ )
470
+ return_relative_to_middle = True # the kwarg above will be False by default in a future release
471
+ # ---
472
+
473
+ if halftomo:
474
+ side_algotom = {"left": 0, "right": 1}.get(side, None)
475
+ cor, _, _, _ = find_center_360(
476
+ sino, win_width, side=side_algotom, denoise=denoise, norm=norm, use_overlap=use_overlap, ncore=ncore
477
+ )
478
+ else:
479
+ if is_360 and not (halftomo):
480
+ # Take only one part of the sinogram and use "find_center_vo" - this works better in this case
481
+ sino = sino[: sino.shape[0] // 2]
482
+
483
+ sino_width = sino.shape[-1]
484
+ search_width = int(search_width_fraction * sino_width)
485
+
486
+ if side == "left":
487
+ start, stop = 0, search_width
488
+ elif side == "center":
489
+ start, stop = sino_width // 2 - search_width, sino_width // 2 + search_width
490
+ elif side == "right":
491
+ start, stop = sino_width - search_width, sino_width
492
+ elif is_scalar(side):
493
+ # side is passed as an offset from the middle of detector
494
+ side = side + (sino.shape[-1] - 1) / 2.0
495
+ start, stop = max(0, side - search_width), min(sino_width, side + search_width)
496
+ else:
497
+ raise ValueError("Expected 'side' to be 'left', 'center', 'right' or a scalar")
498
+
499
+ cor = find_center_vo(
500
+ sino,
501
+ start=start,
502
+ stop=stop,
503
+ step=step,
504
+ radius=radius,
505
+ ratio=ratio,
506
+ dsp=dsp,
507
+ ncore=ncore,
508
+ hor_drop=hor_drop,
509
+ ver_drop=ver_drop,
510
+ )
511
+ return cor if not (return_relative_to_middle) else cor - (sino.shape[1] - 1) / 2
nabu/estimation/focus.py CHANGED
@@ -1,10 +1,54 @@
1
1
  import numpy as np
2
2
 
3
+ from scipy.fft import fftn
4
+
5
+ from ..processing.azim import azimuthal_integration_skimage_stack, azimuthal_integration_imagej_stack, __have_skimage__
3
6
  from .alignment import plt
4
7
  from .cor import CenterOfRotation
5
8
 
6
9
 
7
10
  class CameraFocus(CenterOfRotation):
11
+
12
+ def _check_position_jitter(self, img_pos):
13
+ pos_diff = np.diff(img_pos)
14
+ if np.any(pos_diff <= 0):
15
+ self.logger.warning(
16
+ "Image position regressed throughout scan! (negative movement for some image positions)"
17
+ )
18
+
19
+ @staticmethod
20
+ def _gradient(x, axes):
21
+ d = [None] * len(axes)
22
+ for ii in range(len(axes)):
23
+ ind = -(ii + 1)
24
+ padding = [(0, 0)] * len(x.shape)
25
+ padding[ind] = (0, 1)
26
+ temp_x = np.pad(x, padding, mode="constant")
27
+ d[ind] = np.diff(temp_x, n=1, axis=ind)
28
+ return np.stack(d, axis=0)
29
+
30
+ @staticmethod
31
+ def _compute_metric_value(data, metric, axes=(-2, -1)):
32
+ if metric.lower() == "std":
33
+ return np.std(data, axis=axes) / np.mean(data, axis=axes)
34
+ elif metric.lower() == "grad":
35
+ grad_data = CameraFocus._gradient(data, axes=axes)
36
+ grad_mag = np.sqrt(np.sum(grad_data**2, axis=0))
37
+ return np.sum(grad_mag, axis=axes)
38
+ elif metric.lower() == "psd":
39
+ f_data = fftn(data, axes=axes, workers=4)
40
+ f_data = np.fft.fftshift(f_data, axes=(-2, -1))
41
+ # octave-fasttomo3 uses |.|^2, probably with scaled FFT (norm="forward" in python),
42
+ # but tests show that it's less accurate.
43
+ f_data = np.abs(f_data)
44
+ ai_func = azimuthal_integration_skimage_stack if __have_skimage__ else azimuthal_integration_imagej_stack
45
+ az_data = ai_func(f_data, n_threads=4)
46
+ max_vals = np.max(az_data, axis=0)
47
+ az_data /= max_vals[None, :]
48
+ return np.mean(az_data, axis=-1)
49
+ else:
50
+ raise ValueError("Unknown metric function %s" % metric)
51
+
8
52
  def find_distance(
9
53
  self,
10
54
  img_stack: np.ndarray,
@@ -76,6 +120,7 @@ class CameraFocus(CenterOfRotation):
76
120
  is the associated image position (starting from 1).
77
121
  """
78
122
  self._check_img_stack_size(img_stack, img_pos)
123
+ self._check_position_jitter(img_pos)
79
124
 
80
125
  if peak_fit_radius < 1:
81
126
  self.logger.warning("Parameter peak_fit_radius should be at least 1, given: %d instead." % peak_fit_radius)
@@ -93,19 +138,25 @@ class CameraFocus(CenterOfRotation):
93
138
  high_pass=high_pass,
94
139
  )
95
140
 
96
- img_stds = np.std(img_stack, axis=(-2, -1)) / np.mean(img_stack, axis=(-2, -1))
141
+ img_resp = self._compute_metric_value(img_stack, metric=metric, axes=(-2, -1))
97
142
 
98
- # assuming images are equispaced
143
+ # assuming images are equispaced!
144
+ # focus_step = np.mean(np.abs(np.diff(img_pos)))
99
145
  focus_step = (img_pos[-1] - img_pos[0]) / (num_imgs - 1)
100
146
 
101
147
  img_inds = np.arange(num_imgs)
102
- (f_vals, f_pos) = self.extract_peak_regions_1d(img_stds, peak_radius=peak_fit_radius, cc_coords=img_inds)
103
- focus_ind, img_std_max = self.refine_max_position_1d(f_vals, return_vertex_val=True)
148
+ (f_vals, f_pos) = self.extract_peak_regions_1d(img_resp, peak_radius=peak_fit_radius, cc_coords=img_inds)
149
+ focus_ind, img_resp_max = self.refine_max_position_1d(f_vals, return_vertex_val=True, return_all_coeffs=True)
104
150
  focus_ind += f_pos[1, :]
105
151
 
106
152
  focus_pos = img_pos[0] + focus_step * focus_ind
107
153
  focus_ind += 1
108
154
 
155
+ if focus_pos.size == 1:
156
+ focus_pos = focus_pos[0]
157
+ if focus_ind.size == 1:
158
+ focus_ind = focus_ind[0]
159
+
109
160
  if self.verbose:
110
161
  self.logger.info(
111
162
  "Fitted focus motor position:",
@@ -115,9 +166,9 @@ class CameraFocus(CenterOfRotation):
115
166
  )
116
167
  f, ax = plt.subplots(1, 1)
117
168
  self._add_plot_window(f, ax=ax)
118
- ax.stem(img_pos, img_stds)
119
- ax.stem(focus_pos, img_std_max, linefmt="C1-", markerfmt="C1o")
120
- ax.set_title("Images std")
169
+ ax.stem(img_pos, img_resp)
170
+ ax.stem(focus_pos, img_resp_max, linefmt="C1-", markerfmt="C1o")
171
+ ax.set_title("Images response (metric: %s)" % metric)
121
172
  plt.show(block=False)
122
173
 
123
174
  return focus_pos, focus_ind
@@ -272,6 +323,7 @@ class CameraFocus(CenterOfRotation):
272
323
  ... img_stack, img_pos, roi_yxhw=img_roi, regions_number=regions_number)
273
324
  """
274
325
  self._check_img_stack_size(img_stack, img_pos)
326
+ self._check_position_jitter(img_pos)
275
327
 
276
328
  if peak_fit_radius < 1:
277
329
  self.logger.warning("Parameter peak_fit_radius should be at least 1, given: %d instead." % peak_fit_radius)
@@ -306,15 +358,15 @@ class CameraFocus(CenterOfRotation):
306
358
  )
307
359
  img_stack = np.reshape(img_stack, block_stack_size)
308
360
 
309
- img_stds = np.std(img_stack, axis=(-3, -1)) / np.mean(img_stack, axis=(-3, -1))
310
- img_stds = np.reshape(img_stds, [num_imgs, -1]).transpose()
361
+ img_resp = self._compute_metric_value(img_stack, metric=metric, axes=(-3, -1))
362
+ img_resp = np.reshape(img_resp, [num_imgs, -1]).transpose()
311
363
 
312
364
  # assuming images are equispaced
313
365
  focus_step = (img_pos[-1] - img_pos[0]) / (num_imgs - 1)
314
366
 
315
367
  img_inds = np.arange(num_imgs)
316
- (f_vals, f_pos) = self.extract_peak_regions_1d(img_stds, peak_radius=peak_fit_radius, cc_coords=img_inds)
317
- focus_inds = self.refine_max_position_1d(f_vals)
368
+ (f_vals, f_pos) = self.extract_peak_regions_1d(img_resp, peak_radius=peak_fit_radius, cc_coords=img_inds)
369
+ focus_inds = self.refine_max_position_1d(f_vals, return_all_coeffs=True)
318
370
  focus_inds += f_pos[1, :]
319
371
 
320
372
  focus_poss = img_pos[0] + focus_step * focus_inds