nabu 2023.2.1__py3-none-any.whl → 2024.1.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. doc/conf.py +1 -1
  2. doc/doc_config.py +32 -0
  3. nabu/__init__.py +2 -1
  4. nabu/app/bootstrap_stitching.py +1 -1
  5. nabu/app/cli_configs.py +122 -2
  6. nabu/app/composite_cor.py +27 -2
  7. nabu/app/correct_rot.py +70 -0
  8. nabu/app/create_distortion_map_from_poly.py +42 -18
  9. nabu/app/diag_to_pix.py +358 -0
  10. nabu/app/diag_to_rot.py +449 -0
  11. nabu/app/generate_header.py +4 -3
  12. nabu/app/histogram.py +2 -2
  13. nabu/app/multicor.py +6 -1
  14. nabu/app/parse_reconstruction_log.py +151 -0
  15. nabu/app/prepare_weights_double.py +83 -22
  16. nabu/app/reconstruct.py +5 -1
  17. nabu/app/reconstruct_helical.py +7 -0
  18. nabu/app/reduce_dark_flat.py +6 -3
  19. nabu/app/rotate.py +4 -4
  20. nabu/app/stitching.py +16 -2
  21. nabu/app/tests/test_reduce_dark_flat.py +18 -2
  22. nabu/app/validator.py +4 -4
  23. nabu/cuda/convolution.py +8 -376
  24. nabu/cuda/fft.py +4 -0
  25. nabu/cuda/kernel.py +4 -4
  26. nabu/cuda/medfilt.py +5 -158
  27. nabu/cuda/padding.py +5 -71
  28. nabu/cuda/processing.py +23 -2
  29. nabu/cuda/src/ElementOp.cu +78 -0
  30. nabu/cuda/src/backproj.cu +28 -2
  31. nabu/cuda/src/fourier_wavelets.cu +2 -2
  32. nabu/cuda/src/normalization.cu +23 -0
  33. nabu/cuda/src/padding.cu +2 -2
  34. nabu/cuda/src/transpose.cu +16 -0
  35. nabu/cuda/utils.py +39 -0
  36. nabu/estimation/alignment.py +10 -1
  37. nabu/estimation/cor.py +808 -38
  38. nabu/estimation/cor_sino.py +7 -9
  39. nabu/estimation/tests/test_cor.py +85 -3
  40. nabu/io/reader.py +26 -18
  41. nabu/io/tests/test_cast_volume.py +3 -3
  42. nabu/io/tests/test_detector_distortion.py +3 -3
  43. nabu/io/tiffwriter_zmm.py +2 -2
  44. nabu/io/utils.py +14 -4
  45. nabu/io/writer.py +5 -3
  46. nabu/misc/fftshift.py +6 -0
  47. nabu/misc/histogram.py +5 -285
  48. nabu/misc/histogram_cuda.py +8 -104
  49. nabu/misc/kernel_base.py +3 -121
  50. nabu/misc/padding_base.py +5 -69
  51. nabu/misc/processing_base.py +3 -107
  52. nabu/misc/rotation.py +5 -62
  53. nabu/misc/rotation_cuda.py +5 -65
  54. nabu/misc/transpose.py +6 -0
  55. nabu/misc/unsharp.py +3 -78
  56. nabu/misc/unsharp_cuda.py +5 -52
  57. nabu/misc/unsharp_opencl.py +8 -85
  58. nabu/opencl/fft.py +6 -0
  59. nabu/opencl/kernel.py +21 -6
  60. nabu/opencl/padding.py +5 -72
  61. nabu/opencl/processing.py +27 -5
  62. nabu/opencl/src/backproj.cl +3 -3
  63. nabu/opencl/src/fftshift.cl +65 -12
  64. nabu/opencl/src/padding.cl +2 -2
  65. nabu/opencl/src/roll.cl +96 -0
  66. nabu/opencl/src/transpose.cl +16 -0
  67. nabu/pipeline/config_validators.py +63 -3
  68. nabu/pipeline/dataset_validator.py +2 -2
  69. nabu/pipeline/estimators.py +193 -35
  70. nabu/pipeline/fullfield/chunked.py +34 -17
  71. nabu/pipeline/fullfield/chunked_cuda.py +7 -5
  72. nabu/pipeline/fullfield/computations.py +48 -13
  73. nabu/pipeline/fullfield/nabu_config.py +13 -13
  74. nabu/pipeline/fullfield/processconfig.py +10 -5
  75. nabu/pipeline/fullfield/reconstruction.py +1 -2
  76. nabu/pipeline/helical/fbp.py +5 -0
  77. nabu/pipeline/helical/filtering.py +12 -9
  78. nabu/pipeline/helical/gridded_accumulator.py +179 -33
  79. nabu/pipeline/helical/helical_chunked_regridded.py +262 -151
  80. nabu/pipeline/helical/helical_chunked_regridded_cuda.py +4 -11
  81. nabu/pipeline/helical/helical_reconstruction.py +56 -18
  82. nabu/pipeline/helical/span_strategy.py +1 -1
  83. nabu/pipeline/helical/tests/test_accumulator.py +4 -0
  84. nabu/pipeline/params.py +23 -2
  85. nabu/pipeline/processconfig.py +3 -8
  86. nabu/pipeline/tests/test_chunk_reader.py +78 -0
  87. nabu/pipeline/tests/test_estimators.py +120 -2
  88. nabu/pipeline/utils.py +25 -0
  89. nabu/pipeline/writer.py +2 -0
  90. nabu/preproc/ccd_cuda.py +9 -7
  91. nabu/preproc/ctf.py +21 -26
  92. nabu/preproc/ctf_cuda.py +25 -25
  93. nabu/preproc/double_flatfield.py +14 -2
  94. nabu/preproc/double_flatfield_cuda.py +7 -11
  95. nabu/preproc/flatfield_cuda.py +23 -27
  96. nabu/preproc/phase.py +19 -24
  97. nabu/preproc/phase_cuda.py +21 -21
  98. nabu/preproc/shift_cuda.py +58 -28
  99. nabu/preproc/tests/test_ctf.py +5 -5
  100. nabu/preproc/tests/test_double_flatfield.py +2 -2
  101. nabu/preproc/tests/test_vshift.py +13 -2
  102. nabu/processing/__init__.py +0 -0
  103. nabu/processing/convolution_cuda.py +375 -0
  104. nabu/processing/fft_base.py +163 -0
  105. nabu/processing/fft_cuda.py +256 -0
  106. nabu/processing/fft_opencl.py +54 -0
  107. nabu/processing/fftshift.py +134 -0
  108. nabu/processing/histogram.py +286 -0
  109. nabu/processing/histogram_cuda.py +103 -0
  110. nabu/processing/kernel_base.py +126 -0
  111. nabu/processing/medfilt_cuda.py +159 -0
  112. nabu/processing/muladd.py +29 -0
  113. nabu/processing/muladd_cuda.py +68 -0
  114. nabu/processing/padding_base.py +71 -0
  115. nabu/processing/padding_cuda.py +75 -0
  116. nabu/processing/padding_opencl.py +77 -0
  117. nabu/processing/processing_base.py +123 -0
  118. nabu/processing/roll_opencl.py +64 -0
  119. nabu/processing/rotation.py +63 -0
  120. nabu/processing/rotation_cuda.py +66 -0
  121. nabu/processing/tests/__init__.py +0 -0
  122. nabu/processing/tests/test_fft.py +268 -0
  123. nabu/processing/tests/test_fftshift.py +71 -0
  124. nabu/{misc → processing}/tests/test_histogram.py +2 -4
  125. nabu/{cuda → processing}/tests/test_medfilt.py +1 -1
  126. nabu/processing/tests/test_muladd.py +54 -0
  127. nabu/{cuda → processing}/tests/test_padding.py +119 -75
  128. nabu/processing/tests/test_roll.py +63 -0
  129. nabu/{misc → processing}/tests/test_rotation.py +3 -2
  130. nabu/processing/tests/test_transpose.py +72 -0
  131. nabu/{misc → processing}/tests/test_unsharp.py +41 -8
  132. nabu/processing/transpose.py +126 -0
  133. nabu/processing/unsharp.py +79 -0
  134. nabu/processing/unsharp_cuda.py +53 -0
  135. nabu/processing/unsharp_opencl.py +75 -0
  136. nabu/reconstruction/fbp.py +34 -10
  137. nabu/reconstruction/fbp_base.py +35 -16
  138. nabu/reconstruction/fbp_opencl.py +7 -12
  139. nabu/reconstruction/filtering.py +2 -2
  140. nabu/reconstruction/filtering_cuda.py +13 -14
  141. nabu/reconstruction/filtering_opencl.py +3 -4
  142. nabu/reconstruction/projection.py +2 -0
  143. nabu/reconstruction/rings.py +158 -1
  144. nabu/reconstruction/rings_cuda.py +218 -58
  145. nabu/reconstruction/sinogram_cuda.py +16 -12
  146. nabu/reconstruction/tests/test_deringer.py +116 -14
  147. nabu/reconstruction/tests/test_fbp.py +22 -31
  148. nabu/reconstruction/tests/test_filtering.py +11 -2
  149. nabu/resources/dataset_analyzer.py +89 -26
  150. nabu/resources/nxflatfield.py +2 -2
  151. nabu/resources/tests/test_nxflatfield.py +1 -1
  152. nabu/resources/utils.py +9 -2
  153. nabu/stitching/alignment.py +184 -0
  154. nabu/stitching/config.py +241 -39
  155. nabu/stitching/definitions.py +6 -0
  156. nabu/stitching/frame_composition.py +4 -2
  157. nabu/stitching/overlap.py +99 -3
  158. nabu/stitching/sample_normalization.py +60 -0
  159. nabu/stitching/slurm_utils.py +10 -10
  160. nabu/stitching/tests/test_alignment.py +99 -0
  161. nabu/stitching/tests/test_config.py +16 -1
  162. nabu/stitching/tests/test_overlap.py +68 -2
  163. nabu/stitching/tests/test_sample_normalization.py +49 -0
  164. nabu/stitching/tests/test_slurm_utils.py +5 -5
  165. nabu/stitching/tests/test_utils.py +3 -33
  166. nabu/stitching/tests/test_z_stitching.py +391 -22
  167. nabu/stitching/utils.py +144 -202
  168. nabu/stitching/z_stitching.py +309 -126
  169. nabu/testutils.py +18 -0
  170. nabu/thirdparty/tomocupy_remove_stripe.py +586 -0
  171. nabu/utils.py +32 -6
  172. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/LICENSE +1 -1
  173. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/METADATA +5 -5
  174. nabu-2024.1.0rc3.dist-info/RECORD +296 -0
  175. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/WHEEL +1 -1
  176. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/entry_points.txt +5 -1
  177. nabu/conftest.py +0 -14
  178. nabu/opencl/fftshift.py +0 -92
  179. nabu/opencl/tests/test_fftshift.py +0 -55
  180. nabu/opencl/tests/test_padding.py +0 -84
  181. nabu-2023.2.1.dist-info/RECORD +0 -252
  182. /nabu/cuda/src/{fftshift.cu → dfi_fftshift.cu} +0 -0
  183. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/top_level.txt +0 -0
nabu/estimation/cor.py CHANGED
@@ -1,7 +1,10 @@
1
1
  import math
2
2
  import numpy as np
3
+ from scipy.fftpack import rfft
4
+ from numbers import Real
3
5
  from ..misc import fourier_filters
4
6
  from .alignment import AlignmentBase, plt, progress_bar, local_fftn, local_ifftn
7
+ from ..resources.utils import extract_parameters
5
8
 
6
9
  # three possible values for the validity check, which can optionally be returned by the find_shifts methods
7
10
  cor_result_validity = {
@@ -25,6 +28,7 @@ class CenterOfRotation(AlignmentBase):
25
28
  high_pass=None,
26
29
  low_pass=None,
27
30
  return_validity=False,
31
+ cor_options=None,
28
32
  ):
29
33
  """Find the Center of Rotation (CoR), given two images.
30
34
 
@@ -125,10 +129,23 @@ class CenterOfRotation(AlignmentBase):
125
129
  (f_vals, fv, fh) = self.extract_peak_region_2d(cc, peak_radius=peak_fit_radius, cc_vs=cc_vs, cc_hs=cc_hs)
126
130
  fitted_shifts_vh = self.refine_max_position_2d(f_vals, fv, fh)
127
131
 
132
+ estimated_cor = fitted_shifts_vh[shift_axis] / 2.0
133
+
134
+ if isinstance(self.cor_options.get("near_pos", None), (int, float)):
135
+ near_pos = self.cor_options["near_pos"]
136
+ if (
137
+ np.abs(near_pos - estimated_cor) / near_pos > 0.2
138
+ ): # For comparison, near_pos is RELATIVE (as estimated_cor is).
139
+ validity_check_result = cor_result_validity["questionable"]
140
+ else:
141
+ validity_check_result = cor_result_validity["sound"]
142
+ else:
143
+ validity_check_result = cor_result_validity["unknown"]
144
+
128
145
  if return_validity:
129
- return fitted_shifts_vh[shift_axis] / 2.0, cor_result_validity["unknown"]
146
+ return estimated_cor, validity_check_result
130
147
  else:
131
- return fitted_shifts_vh[shift_axis] / 2.0
148
+ return estimated_cor
132
149
 
133
150
 
134
151
  class CenterOfRotationSlidingWindow(CenterOfRotation):
@@ -145,6 +162,7 @@ class CenterOfRotationSlidingWindow(CenterOfRotation):
145
162
  high_pass=None,
146
163
  low_pass=None,
147
164
  return_validity=False,
165
+ cor_options=None,
148
166
  ):
149
167
  """Semi-automatically find the Center of Rotation (CoR), given two images
150
168
  or sinograms. Suitable for half-aquisition scan.
@@ -250,13 +268,30 @@ class CenterOfRotationSlidingWindow(CenterOfRotation):
250
268
  )
251
269
  img_shape = img_2.shape
252
270
 
253
- if window_width is None:
254
- if side.lower() == "center":
255
- window_width = round(img_shape[-1] / 4.0 * 3.0)
256
- else:
257
- window_width = round(img_shape[-1] / 10)
258
- window_shift = window_width // 2
259
- window_width = window_shift * 2 + 1
271
+ near_pos = self.cor_options.get("near_pos", None)
272
+ if near_pos is None:
273
+ if window_width is None:
274
+ if side.lower() == "center":
275
+ window_width = round(img_shape[-1] / 4.0 * 3.0)
276
+ else:
277
+ window_width = round(img_shape[-1] / 10)
278
+ window_shift = window_width // 2
279
+ window_width = window_shift * 2 + 1
280
+
281
+ win_1_start_seed = 0
282
+ # number of pixels where the window will "slide".
283
+ n = img_shape[-1] - window_width
284
+ else:
285
+ abs_pos = near_pos + img_shape[-1] // 2
286
+ offset = min(img_shape[-1] - abs_pos, abs_pos) # distance to closest edge.
287
+
288
+ window_fraction = 0.8 # Hard-coded ?
289
+ window_shift = int(np.floor(offset * window_fraction))
290
+ window_width = 2 * window_shift + 1
291
+
292
+ sliding_shift = int(np.floor(offset * (1 - window_fraction))) - 1
293
+ n = 2 * sliding_shift + 1
294
+ win_1_start_seed = abs_pos - window_shift - sliding_shift
260
295
 
261
296
  if side.lower() == "right":
262
297
  win_2_start = 0
@@ -271,13 +306,12 @@ class CenterOfRotationSlidingWindow(CenterOfRotation):
271
306
 
272
307
  win_2_end = win_2_start + window_width
273
308
 
274
- # number of pixels where the window will "slide".
275
- n = img_shape[-1] - window_width
276
309
  diffs_mean = np.zeros((n,), dtype=img_1.dtype)
277
310
  diffs_std = np.zeros((n,), dtype=img_1.dtype)
278
311
 
279
312
  for ii in progress_bar(range(n), verbose=self.verbose):
280
- win_1_start, win_1_end = ii, ii + window_width
313
+ win_1_start = win_1_start_seed + ii
314
+ win_1_end = win_1_start + window_width
281
315
  img_diff = img_1[:, win_1_start:win_1_end] - img_2[:, win_2_start:win_2_end]
282
316
  diffs_abs = np.abs(img_diff)
283
317
  diffs_mean[ii] = diffs_abs.mean()
@@ -299,14 +333,18 @@ class CenterOfRotationSlidingWindow(CenterOfRotation):
299
333
  (f_vals, f_pos) = self.extract_peak_regions_1d(diffs_mean, peak_radius=peak_fit_radius)
300
334
  win_pos_max, win_val_max = self.refine_max_position_1d(f_vals, return_vertex_val=True)
301
335
 
302
- cor_h = -(win_2_start - (win_ind_max + win_pos_max)) / 2.0
336
+ # Derive the COR
337
+ if isinstance(near_pos, Real):
338
+ cor_h = -(win_2_start - (win_1_start_seed + win_ind_max + win_pos_max)) / 2.0
339
+ cor_pos = -(win_2_start - (win_1_start_seed + np.arange(n))) / 2.0
340
+ else:
341
+ cor_h = -(win_2_start - (win_ind_max + win_pos_max)) / 2.0
342
+ cor_pos = -(win_2_start - np.arange(n)) / 2.0
303
343
 
304
344
  if (side.lower() == "right" and win_ind_max == 0) or (side.lower() == "left" and win_ind_max == n):
305
345
  self.logger.warning("Sliding window width %d might be too large!" % window_width)
306
346
 
307
347
  if self.verbose:
308
- cor_pos = -(win_2_start - np.arange(n)) / 2.0
309
-
310
348
  print("Lowest difference window: index=%d, range=[0, %d]" % (win_ind_max, n))
311
349
  print("CoR tested for='%s', found at voxel=%g (from center)" % (side, cor_h))
312
350
 
@@ -316,6 +354,7 @@ class CenterOfRotationSlidingWindow(CenterOfRotation):
316
354
  ax.stem(cor_h, win_val_max, linefmt="C1-", markerfmt="C1o", label="Best mean difference")
317
355
  ax.stem(cor_pos, -diffs_std, linefmt="C2-", markerfmt="C2o", label="Std-dev difference")
318
356
  ax.set_title("Window dispersions")
357
+ plt.legend()
319
358
  plt.show(block=False)
320
359
 
321
360
  if return_validity:
@@ -452,30 +491,38 @@ class CenterOfRotationGrowingWindow(CenterOfRotation):
452
491
  img_lower_half_size = np.floor(img_shape[-1] / 2).astype(np.intp)
453
492
  img_upper_half_size = np.ceil(img_shape[-1] / 2).astype(np.intp)
454
493
 
455
- if side.lower() == "right":
456
- win_1_mid_start = img_lower_half_size
457
- win_1_mid_end = np.floor(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
458
- win_2_mid_start = -img_upper_half_size + min_window_width
459
- win_2_mid_end = img_upper_half_size
460
- elif side.lower() == "left":
461
- win_1_mid_start = -img_lower_half_size + min_window_width
462
- win_1_mid_end = img_lower_half_size
463
- win_2_mid_start = img_upper_half_size
464
- win_2_mid_end = np.ceil(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
465
- elif side.lower() == "center":
466
- win_1_mid_start = 0
467
- win_1_mid_end = img_shape[-1]
468
- win_2_mid_start = 0
469
- win_2_mid_end = img_shape[-1]
470
- elif side.lower() == "all":
471
- win_1_mid_start = -img_lower_half_size + min_window_width
472
- win_1_mid_end = np.floor(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
473
- win_2_mid_start = -img_upper_half_size + min_window_width
474
- win_2_mid_end = np.ceil(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
494
+ use_estimate_from_motor = "near_pos" in self.cor_options.keys() and isinstance(
495
+ self.cor_options["near_pos"], (int, float)
496
+ )
497
+ use_estimate_from_motor = False # Not yet implemented.
498
+ if use_estimate_from_motor:
499
+ near_pos = self.cor_options["near_pos"]
500
+
475
501
  else:
476
- raise ValueError(
477
- "Side should be one of 'left', 'right', or 'center'. '%s' was given instead" % side.lower()
478
- )
502
+ if side.lower() == "right":
503
+ win_1_mid_start = img_lower_half_size
504
+ win_1_mid_end = np.floor(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
505
+ win_2_mid_start = -img_upper_half_size + min_window_width
506
+ win_2_mid_end = img_upper_half_size
507
+ elif side.lower() == "left":
508
+ win_1_mid_start = -img_lower_half_size + min_window_width
509
+ win_1_mid_end = img_lower_half_size
510
+ win_2_mid_start = img_upper_half_size
511
+ win_2_mid_end = np.ceil(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
512
+ elif side.lower() == "center":
513
+ win_1_mid_start = 0
514
+ win_1_mid_end = img_shape[-1]
515
+ win_2_mid_start = 0
516
+ win_2_mid_end = img_shape[-1]
517
+ elif side.lower() == "all":
518
+ win_1_mid_start = -img_lower_half_size + min_window_width
519
+ win_1_mid_end = np.floor(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
520
+ win_2_mid_start = -img_upper_half_size + min_window_width
521
+ win_2_mid_end = np.ceil(img_shape[-1] * 3 / 2).astype(np.intp) - min_window_width
522
+ else:
523
+ raise ValueError(
524
+ "Side should be one of 'left', 'right', or 'center'. '%s' was given instead" % side.lower()
525
+ )
479
526
 
480
527
  n1 = win_1_mid_end - win_1_mid_start
481
528
  n2 = win_2_mid_end - win_2_mid_start
@@ -849,3 +896,726 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
849
896
  return cor_position
850
897
 
851
898
  __call__ = find_shift
899
+
900
+
901
+ class CenterOfRotationFourierAngles(CenterOfRotation):
902
+ """This CoR estimation algo is proposed by V. Valls (BCU). It is based on the Fourier
903
+ transform of the columns on the sinogram.
904
+ It requires an initial guesss of the CoR wich is retrieved from
905
+ dataset_info.dataset_scanner.estimated_cor_from_motor. It is assumed in mm and pixel size in um.
906
+ Options are (for the moment) hard-coded in the SinoCORFinder.cor_finder.extra_options dict.
907
+ """
908
+
909
+ _default_cor_options = {
910
+ "crop_around_cor": False,
911
+ "side": "center",
912
+ "near_pos": None,
913
+ "near_std": 100,
914
+ "near_width": 20,
915
+ "near_shape": "tukey",
916
+ "near_weight": 0.1,
917
+ "near_alpha": 0.5,
918
+ "shift_sino": True,
919
+ "near_step": 0.5,
920
+ "refine": False,
921
+ }
922
+
923
+ def _freq_radio(self, sinos, ifrom, ito):
924
+ size = (sinos.shape[0] + sinos.shape[0] % 2) // 2
925
+ fs = np.empty((size, sinos.shape[1]))
926
+ for i in range(ifrom, ito):
927
+ line = sinos[:, i]
928
+ f_signal = rfft(line)
929
+ f = np.abs(f_signal[: (f_signal.size - 1) // 2 + 1])
930
+ f2 = np.abs(f_signal[(f_signal.size - 1) // 2 + 1 :][::-1])
931
+ if len(f) > len(f2):
932
+ f[1:] += f2
933
+ else:
934
+ f[0:] += f2
935
+ fs[:, i] = f
936
+ with np.errstate(divide="ignore", invalid="ignore", under="ignore"):
937
+ fs = np.log(fs)
938
+ return fs
939
+
940
+ def gaussian(self, p, x):
941
+ return p[3] + p[2] * np.exp(-((x - p[0]) ** 2) / (2 * p[1] ** 2))
942
+
943
+ def tukey(self, p, x):
944
+ pos, std, alpha, height, background = p
945
+ alpha = np.clip(alpha, 0, 1)
946
+ pi = np.pi
947
+ inv_alpha = 1 - alpha
948
+ width = std / (1 - alpha * 0.5)
949
+ xx = (np.abs(x - pos) - (width * 0.5 * inv_alpha)) / (width * 0.5 * alpha)
950
+ xx = np.clip(xx, 0, 1)
951
+ return (0.5 + np.cos(pi * xx) * 0.5) * height + background
952
+
953
+ def sinlet(self, p, x):
954
+ std = p[1] * 2.5
955
+ lin = np.maximum(0, std - np.abs(p[0] - x)) * 0.5 * np.pi / std
956
+ return p[3] + p[2] * np.sin(lin)
957
+
958
+ def _px(self, detector_width, abs_pos, near_std):
959
+ sym_range = None
960
+ if abs_pos is not None:
961
+ if self.cor_options["crop_around_cor"]:
962
+ sym_range = int(abs_pos - near_std * 2), int(abs_pos + near_std * 2)
963
+
964
+ window = self.cor_options["near_width"]
965
+ if sym_range is not None:
966
+ xx_from = max(window, sym_range[0])
967
+ xx_to = max(xx_from, min(detector_width - window, sym_range[1]))
968
+ if xx_from == xx_to:
969
+ sym_range = None
970
+ if sym_range is None:
971
+ xx_from = window
972
+ xx_to = detector_width - window
973
+
974
+ xx = np.arange(xx_from, xx_to, self.cor_options["near_step"])
975
+
976
+ return xx
977
+
978
+ def _symmetry_correlation(self, px, array, angles):
979
+ window = self.cor_options["near_width"]
980
+ if self.cor_options["shift_sino"]:
981
+ shift_index = np.argmin(np.abs(angles - np.pi)) - np.argmin(np.abs(angles - 0))
982
+ else:
983
+ shift_index = None
984
+ px_from = int(px[0])
985
+ px_to = int(np.ceil(px[-1]))
986
+ f_coef = np.empty(len(px))
987
+ f_array = self._freq_radio(array, px_from - window, px_to + window)
988
+ if shift_index is not None:
989
+ shift_array = np.empty(array.shape, dtype=array.dtype)
990
+ shift_array[0 : len(shift_array) - shift_index, :] = array[shift_index:, :]
991
+ shift_array[len(shift_array) - shift_index :, :] = array[:shift_index, :]
992
+ f_shift_array = self._freq_radio(shift_array, px_from - window, px_to + window)
993
+ else:
994
+ f_shift_array = f_array
995
+
996
+ for j, x in enumerate(px):
997
+ i = int(np.floor(x))
998
+ if x - i > 0.4: # TO DO : Specific to near_step = 0.5?
999
+ f_left = f_array[:, i - window : i]
1000
+ f_right = f_shift_array[:, i + 1 : i + window + 1][:, ::-1]
1001
+ else:
1002
+ f_left = f_array[:, i - window : i]
1003
+ f_right = f_shift_array[:, i : i + window][:, ::-1]
1004
+ with np.errstate(divide="ignore", invalid="ignore"):
1005
+ f_coef[j] = np.sum(np.abs(f_left - f_right))
1006
+ return f_coef
1007
+
1008
+ def _cor_correlation(self, px, abs_pos, near_std):
1009
+ if abs_pos is not None:
1010
+ signal = self.cor_options["near_shape"]
1011
+ weight = self.cor_options["near_weight"]
1012
+ alpha = self.cor_options["near_alpha"]
1013
+ if signal == "sinlet":
1014
+ coef = self.sinlet((abs_pos, near_std, -weight, 1), px)
1015
+ elif signal == "gaussian":
1016
+ coef = self.gaussian((abs_pos, near_std, -weight, 1), px)
1017
+ elif signal == "tukey":
1018
+ coef = self.tukey((abs_pos, near_std * 2, alpha, -weight, 1), px)
1019
+ else:
1020
+ raise ValueError("Shape unsupported")
1021
+ else:
1022
+ coef = np.ones_like(px)
1023
+ return coef
1024
+
1025
+ def find_shift(
1026
+ self,
1027
+ img_1,
1028
+ img_2,
1029
+ angles,
1030
+ side,
1031
+ roi_yxhw=None,
1032
+ median_filt_shape=None,
1033
+ padding_mode=None,
1034
+ peak_fit_radius=1,
1035
+ high_pass=None,
1036
+ low_pass=None,
1037
+ ):
1038
+ sinos = np.vstack([img_1, np.fliplr(img_2).copy()])
1039
+ detector_width = sinos.shape[1]
1040
+
1041
+ increment = np.abs(angles[0] - angles[1])
1042
+ if np.abs(angles[0] - angles[-1]) < (360 - 0.5) * np.pi / 180 - increment:
1043
+ self.logger.warning("Not enough angles, estimator skipped")
1044
+ return None
1045
+
1046
+ near_pos = self.cor_options.get("near_pos", None) # A RELATIVE estimation of the COR
1047
+
1048
+ # Default coarse estimate to center of detector
1049
+ # if no one is given either in NX or by user.
1050
+ if near_pos is None:
1051
+ self.logger.warning("No initial guess was found (from metadata or user) for CoR")
1052
+ self.logger.warning("Setting initial guess to center of detector.")
1053
+ if side == "center":
1054
+ abs_pos = detector_width // 2
1055
+ elif side == "left":
1056
+ abs_pos = detector_width // 4
1057
+ elif side == "right":
1058
+ abs_pos = detector_width * 3 // 4
1059
+ elif side == "near":
1060
+ abs_pos = detector_width // 2
1061
+ else:
1062
+ raise ValueError(f"side '{side}' is not handled")
1063
+ elif isinstance(near_pos, (int, float)): # Convert RELATIVE to ABSOLUTE position
1064
+ abs_pos = near_pos + detector_width / 2
1065
+
1066
+ near_std = None
1067
+ if abs_pos is not None:
1068
+ near_std = self.cor_options["near_std"]
1069
+
1070
+ px = self._px(detector_width, abs_pos, near_std)
1071
+
1072
+ coef_f = self._symmetry_correlation(
1073
+ px,
1074
+ sinos,
1075
+ angles,
1076
+ )
1077
+ coef_p = self._cor_correlation(px, abs_pos, near_std)
1078
+ coef = coef_f * coef_p
1079
+
1080
+ if len(px) > 0:
1081
+ if self.cor_options["refine"]:
1082
+ f_vals, f_pos = self.extract_peak_regions_1d(-coef, peak_radius=20, cc_coords=px)
1083
+ cor, _ = self.refine_max_position_1d(f_vals, fx=f_pos, return_vertex_val=True)
1084
+ else:
1085
+ cor = px[np.argmin(coef)]
1086
+ cor = cor - detector_width / 2
1087
+ else:
1088
+ cor = None
1089
+
1090
+ return cor
1091
+
1092
+ __call__ = find_shift
1093
+
1094
+
1095
+ class CenterOfRotationOctaveAccurate(AlignmentBase):
1096
+ """This is a Python implementation of Octave/fastomo3/accurate COR estimator.
1097
+ The Octave 'accurate' function is renamed `local_correlation`.
1098
+ The Nabu standard `find_shift` has the same API as the other COR estimators (sliding, growing...)
1099
+
1100
+ The class inherits directly from AlignmentBase.
1101
+ """
1102
+
1103
+ _default_cor_options = {
1104
+ "maxsize": [5, 5],
1105
+ "refine": None,
1106
+ "pmcc": False,
1107
+ "normalize": True,
1108
+ "low_pass": 0.01,
1109
+ "limz": 0.5,
1110
+ }
1111
+
1112
+ def _cut(self, im, nrows, ncols, new_center_row=None, new_center_col=None):
1113
+ """Cuts a sub-matrix out of a larger matrix.
1114
+ Cuts in the center of the original matrix, except if new center is specified
1115
+ NO CHECKING of validity indices sub-matrix!
1116
+
1117
+ Parameters
1118
+ ----------
1119
+ im : array.
1120
+ Original matrix
1121
+ nrows : int
1122
+ Number of rows in the output matrix.
1123
+ ncols : int
1124
+ Number of columns in the output matrix.
1125
+ new_center_row : int
1126
+ Index of center row around which to cut (default: None, i.e. center)
1127
+ new_center_col : int
1128
+ Index of center column around which to cut (default: None, i.e. center)
1129
+
1130
+ Returns
1131
+ -------
1132
+ nrows x ncols array.
1133
+
1134
+ Examples
1135
+ --------
1136
+ im_roi = cut(im, 1024, 1024) -> cut center 1024x1024 pixels
1137
+ im_roi = cut(im, 1024, 1024, 600.5, 700.5) -> cut 1024x1024 pixels around pixels (600-601, 700-701)
1138
+
1139
+ Author: P. Cloetens <cloetens@esrf.eu>
1140
+ 2023-11-06 J. Lesaint <jerome.lesaint@esrf.fr>
1141
+
1142
+ * See octave-archive for the original Octave code.
1143
+ * 2023-11-06: Python implementation. Comparison seems OK.
1144
+ """
1145
+ [n, m] = im.shape
1146
+ if new_center_row is None:
1147
+ new_center_row = (n + 1) / 2
1148
+ if new_center_col is None:
1149
+ new_center_col = (m + 1) / 2
1150
+
1151
+ rb = int(np.round(0.5 + new_center_row - nrows / 2))
1152
+ rb = int(np.round(new_center_row - nrows / 2))
1153
+ re = int(nrows + rb)
1154
+ cb = int(np.round(0.5 + new_center_col - ncols / 2))
1155
+ cb = int(np.round(new_center_col - ncols / 2))
1156
+ ce = int(ncols + cb)
1157
+
1158
+ return im[rb:re, cb:ce]
1159
+
1160
+ def _checkifpart(self, rapp, rapp_hist):
1161
+ res = 0
1162
+ for k in range(rapp_hist.shape[0]):
1163
+ if np.allclose(rapp, rapp_hist[k, :]):
1164
+ res = 1
1165
+ return res
1166
+ return res
1167
+
1168
+ def _interpolate(self, input, shift, mode="mean", interpolation_method="linear"):
1169
+ """Applies to the input a translation by a vector `shift`. Based on
1170
+ `scipy.ndimage.affine_transform` function.
1171
+ JL: This Octave function was initially used in the refine clause of the local_correlation (Octave find_shift).
1172
+ Since find_shift is always called with refine=False in Octave, refine is not implemented (see local_interpolation())
1173
+ and this function becomes useless.
1174
+
1175
+ Parameters
1176
+ ----------
1177
+ input : array
1178
+ Array to which the translation is applied.
1179
+ shift : tuple, list or array of length 2.
1180
+ mode : str
1181
+ Type of padding applied to the unapplicable areas of the output image.
1182
+ Default `mean` is a constant padding with the mean of the input array.
1183
+ `mode` must belong to 'reflect', 'grid-mirror', 'constant', 'grid-constant', 'nearest', 'mirror', 'grid-wrap', 'wrap'
1184
+ See `scipy.ndimage.affine_transform` for details.
1185
+ interpolation_method : str or int.
1186
+ The interpolation is based on spline interpolation.
1187
+ Either 0, 1, 2, 3, 4 or 5: order of the spline interpolation functions.
1188
+ Or one among 'linear','cubic','pchip','nearest','spline' (Octave legacy).
1189
+ 'nearest' is equivalent to 0
1190
+ 'linear' is equivalent to 1
1191
+ 'cubic','pchip','spline' are equivalent to 3.
1192
+ """
1193
+ admissible_modes = (
1194
+ "reflect",
1195
+ "grid-mirror",
1196
+ "constant",
1197
+ "grid-constant",
1198
+ "nearest",
1199
+ "mirror",
1200
+ "grid-wrap",
1201
+ "wrap",
1202
+ )
1203
+ admissible_interpolation_methods = ("linear", "cubic", "pchip", "nearest", "spline")
1204
+
1205
+ from scipy.ndimage import affine_transform
1206
+
1207
+ [s0, s1] = shift
1208
+ matrix = np.zeros([2, 3], dtype=float)
1209
+ matrix[0, 0] = 1.0
1210
+ matrix[1, 1] = 1.0
1211
+ matrix[:, 2] = [-s0, -s1] # JL: due to transf. convention diff in Octave and scipy (push fwd vs pull back)
1212
+
1213
+ if interpolation_method == "nearest":
1214
+ order = 0
1215
+ elif interpolation_method == "linear":
1216
+ order = 1
1217
+ elif interpolation_method in ("pchip", "cubic", "spline"):
1218
+ order = 3
1219
+ elif interpolation_method in (0, 1, 2, 3, 4, 5):
1220
+ order = interpolation_method
1221
+ else:
1222
+ raise ValueError(
1223
+ f"Interpolation method is {interpolation_method} and should either an integer between 0 (inc.) and 5 (inc.) or in {admissible_interpolation_methods}."
1224
+ )
1225
+
1226
+ if mode == "mean":
1227
+ mode = "constant"
1228
+ cval = input.mean()
1229
+ return affine_transform(input, matrix, mode=mode, order=order, cval=cval)
1230
+ elif mode not in admissible_modes:
1231
+ raise ValueError(f"Pad method is {mode} and should be in {admissible_modes}.")
1232
+
1233
+ return affine_transform(input, matrix, mode=mode, order=order)
1234
+
1235
+ def _local_correlation(
1236
+ self,
1237
+ z1,
1238
+ z2,
1239
+ maxsize=[5, 5],
1240
+ cor_estimate=[0, 0],
1241
+ refine=None,
1242
+ pmcc=False,
1243
+ normalize=True,
1244
+ ):
1245
+ """Returns the 2D shift in pixels between two images.
1246
+ It looks for a local optimum around the initial shift cor_estimate
1247
+ and within a window 'maxsize'.
1248
+ It uses variance of the difference of the normalized images or PMCC
1249
+ It adapts the shift estimate in case optimum is at the edge of the window
1250
+ If 'maxsize' is set to 0, it will only use approximate shift (+ refine possibly)
1251
+ Set 'cor_estimate' to allow for the use of any initial shift estimation.
1252
+
1253
+ When not successful (stuck in loop or edge reached), returns [nan nan]
1254
+ Positive values corresponds to moving z2 to higher values of the index
1255
+ to compensate drift: interpolate(f)(z2, row, column)
1256
+
1257
+ Parameters
1258
+ ----------
1259
+ z1,z2 : 2D arrays.
1260
+ The two (sub)images to be compared.
1261
+
1262
+ maxsize : 2-list. Default [5,5]
1263
+ Size of the search window.
1264
+
1265
+ cor_estimate:
1266
+ Initial guess of the center of rotation.
1267
+
1268
+ refine: Boolean or None (default is None)
1269
+ Wether the initial guess should be refined of not.
1270
+
1271
+ pmcc: Boolean (default is False)
1272
+ Use Pearson correlation coefficient i.o. variance.
1273
+
1274
+ normalize: Boolean (default is True)
1275
+ Set mean of each image to 1 if True.
1276
+
1277
+ Returns
1278
+ -------
1279
+ c = [row,column] (or [NaN,NaN] if unsuccessful.)
1280
+
1281
+ 2007-01-05 P. Cloetens cloetens@esrf.eu
1282
+ * Initial revision
1283
+ 2023-11-10 J. Lesaint jerome.lesaint@esrf.fr
1284
+ * Python conversion.
1285
+ """
1286
+
1287
+ if type(maxsize) in (float, int):
1288
+ maxsize = [int(maxsize), int(maxsize)]
1289
+ elif type(maxsize) in (tuple, list):
1290
+ maxsize = [int(maxsize[0]), int(maxsize[1])]
1291
+ elif maxsize in ([], None, ""):
1292
+ maxsize = [5, 5]
1293
+
1294
+ if refine is None:
1295
+ refine = np.allclose(maxsize, 0.0)
1296
+
1297
+ if normalize:
1298
+ z1 /= np.mean(z1)
1299
+ z2 /= np.mean(z2)
1300
+
1301
+ #####################################
1302
+ # JL : seems useless since func is always called with a first approximate.
1303
+ ## determination of approximative shift (manually or Fourier correlation)
1304
+ # if isinstance(cor_estimate,str):
1305
+ # if cor_estimate in ('fft','auto','fourier'):
1306
+ # padding_mode = None
1307
+ # cor_estimate = self._compute_correlation_fft(
1308
+ # z1,
1309
+ # z2,
1310
+ # padding_mode,
1311
+ # high_pass=self.high_pass,
1312
+ # low_pass=self.low_pass
1313
+ # )
1314
+ # elif cor_estimate in ('manual','man','m'):
1315
+ # cor_estimate = None
1316
+ # # No ImageJ plugin here :
1317
+ # # rapp = ij_align(z1,z2)
1318
+
1319
+ ####################################
1320
+ # check if refinement with realspace correlation is required
1321
+ # otherwise keep result as it is
1322
+ if np.allclose(maxsize, 0):
1323
+ shiftfound = 1
1324
+ if refine:
1325
+ c = np.round(np.array(cor_estimate, dtype=int))
1326
+ else:
1327
+ c = np.array(cor_estimate, dtype=int)
1328
+ else:
1329
+ shiftfound = 0
1330
+ cor_estimate = np.round(np.array(cor_estimate, dtype=int))
1331
+
1332
+ rapp_hist = []
1333
+ if np.sum(np.abs(cor_estimate) + 1 >= z1.shape):
1334
+ self.logger.info(f"Approximate shift of [{cor_estimate[0]},{cor_estimate[1]}] is too large, setting [0 0]")
1335
+ cor_estimate = np.array([0, 0])
1336
+ maxsize = np.minimum(maxsize, np.floor((np.array(z1.shape) - 1) / 2)).astype(int)
1337
+ maxsize = np.minimum(maxsize, np.array(z1.shape) - np.abs(cor_estimate) - 1).astype(int)
1338
+
1339
+ while not shiftfound:
1340
+ # Set z1 region
1341
+ # Rationale: the (shift[0]+maxsize[0]:,shift[1]+maxsize[1]:) block of z1 should match
1342
+ # the (maxsize[0]:,maxisze[1]:)-upper-left corner of z2.
1343
+ # We first extract this z1 block.
1344
+ # Then, take moving z2-block according to maxsize.
1345
+ # Of course, care must be taken with borders, hence the various max,min calls.
1346
+
1347
+ # Extract the reference block
1348
+ shape_ar = np.array(z1.shape)
1349
+ cor_ar = np.array(cor_estimate)
1350
+ maxsize_ar = np.array(maxsize)
1351
+
1352
+ z1beg = np.maximum(cor_ar + maxsize_ar, np.zeros(2, dtype=int))
1353
+ z1end = shape_ar + np.minimum(cor_ar - maxsize_ar, np.zeros(2, dtype=int))
1354
+
1355
+ z1p = z1[z1beg[0] : z1end[0], z1beg[1] : z1end[1]].flatten()
1356
+
1357
+ # Build local correlations array.
1358
+ window_shape = (2 * int(maxsize[0]) + 1, 2 * int(maxsize[1]) + 1)
1359
+ cc = np.zeros(window_shape)
1360
+
1361
+ # Prepare second block indices
1362
+ z2beg = (cor_ar + maxsize_ar > 0) * cc.shape + (cor_ar + maxsize_ar <= 0) * (shape_ar - z1end + z1beg) - 1
1363
+ z2end = z2beg + z1end - z1beg
1364
+
1365
+ if pmcc:
1366
+ std_z1p = z1p.std()
1367
+ if normalize == 2:
1368
+ z1p /= z1p.mean()
1369
+
1370
+ for k in range(cc.shape[0]):
1371
+ for l in range(cc.shape[1]):
1372
+ if pmcc:
1373
+ z2p = z2[z2beg[0] - k : z2end[0] - k, z2beg[1] - l : z2end[1] - l].flatten()
1374
+ std_z2p = z2p.std()
1375
+ cc[k, l] = -np.cov(z1p, z2p, rowvar=True)[1, 0] / (std_z1p * std_z2p)
1376
+ else:
1377
+ if normalize == 2:
1378
+ z2p = z2[z2beg[0] - k : z2end[0] - k, z2beg[1] - l : z2end[1] - l].flatten()
1379
+ z2p /= z2p.mean()
1380
+ z2p -= z1p
1381
+ else:
1382
+ z2p = z2[z2beg[0] - k : z2end[0] - k, z2beg[1] - l : z2end[1] - l].flatten()
1383
+ z2p -= z1p
1384
+ cc[k, l] = ((z2p - z2p.mean()) ** 2).sum()
1385
+ # cc(k,l) = std(z1p./z2(z2beg(1)-k:z2end(1)-k,z2beg(2)-l:z2end(2)-l)(:));
1386
+
1387
+ c = np.unravel_index(np.argmin(cc, axis=None), shape=cc.shape)
1388
+
1389
+ if not np.sum((c == 0) + (c == np.array(cc.shape) - 1)):
1390
+ # check that we are not at the edge of the region that was sampled
1391
+ x = np.array([-1, 0, 1])
1392
+ tmp = self.refine_max_position_2d(cc[c[0] - 1 : c[0] + 2, c[1] - 1 : c[1] + 2], x, x)
1393
+ c += tmp
1394
+ shiftfound = True
1395
+
1396
+ c += z1beg - z2beg
1397
+
1398
+ rapp_hist = []
1399
+ if not shiftfound:
1400
+ cor_estimate = c
1401
+ # Check that new shift estimate was not already done (avoid eternal loop)
1402
+ if self._checkifpart(cor_estimate, rapp_hist):
1403
+ if self.verbose:
1404
+ self.logger.info(f"Stuck in loop?")
1405
+ refine = True
1406
+ shiftfound = True
1407
+ c = np.array([np.nan, np.nan])
1408
+ else:
1409
+ rapp_hist.append(cor_estimate)
1410
+ if self.verbose:
1411
+ self.logger.info(f"Changing shift estimate: {cor_estimate}")
1412
+ maxsize = np.minimum(maxsize, np.array(z1.shape) - np.abs(cor_estimate) - 1).astype(int)
1413
+ if (maxsize == 0).sum():
1414
+ if self.verbose:
1415
+ self.logger.info(f"Edge of image reached")
1416
+ refine = False
1417
+ shiftfound = True
1418
+ c = np.array([np.nan, np.nan])
1419
+ elif len(rapp_hist) > 0:
1420
+ if self.verbose:
1421
+ self.logger.info("\n")
1422
+
1423
+ ####################################
1424
+ # refine result; useful when shifts are not integer values
1425
+ # JL: I don't understand why this refine step should be useful.
1426
+ # In Octave, from fastomo.m, refine is always set to False.
1427
+ # So this could be ignored.
1428
+ # I keep it for future use if it proves useful.
1429
+ # if refine:
1430
+ # if debug:
1431
+ # print('Refining solution ...')
1432
+ # z2n = self.interpolate(z2,c)
1433
+ # indices = np.ceil(np.abs(c)).astype(int)
1434
+ # z1p = np.roll(z1,((c>0) * (-1) * indices),[0,1])
1435
+ # z1p = z1p[1:-indices[0]-1,1:-indices[1]-1].flatten()
1436
+ # z2n = np.roll(z2n,((c>0) * (-1) * indices),[0,1])
1437
+ # z2n = z2n[:-indices[0],:-indices[1]]
1438
+ # ccrefine = np.zeros([3,3])
1439
+ # [n2,m2] = z2n.shape
1440
+ # for k in range(3):
1441
+ # for l in range(3):
1442
+ # z2p = z1p - z2n[2-k:n2-k,2-l:m2-l].flatten()
1443
+ # ccrefine[k,l] = ((z2p - z2p.mean())**2).sum()
1444
+ # x = np.array([-1,0,1])
1445
+ # crefine = self.refine_max_position_2d(ccrefine, x, x)
1446
+ # #crefine = min2par(ccrefine)
1447
+
1448
+ # # Check if the refinement is effectively confined to subpixel
1449
+ # if (np.abs(crefine) >= 1).sum():
1450
+ # self.logger.info("Problems refining result\n")
1451
+ # else:
1452
+ # c += crefine
1453
+
1454
+ return c
1455
+
1456
+ def find_shift(
1457
+ self,
1458
+ img_1: np.ndarray,
1459
+ img_2: np.ndarray,
1460
+ side: str,
1461
+ roi_yxhw=None,
1462
+ median_filt_shape=None,
1463
+ padding_mode=None,
1464
+ low_pass=0.01,
1465
+ high_pass=None,
1466
+ ):
1467
+ """Automatically finds the Center of Rotation (CoR), given two images
1468
+ (projections/radiographs). Suitable for half-aquisition scan.
1469
+
1470
+ This method finds the half-shift between two opposite images, by
1471
+ minimizing the variance of small ROI around a global COR estimate
1472
+ (obtained by maximizing Fourier-space computed global correlations).
1473
+
1474
+
1475
+ The output of this function, allows to compute motor movements for
1476
+ aligning the sample rotation axis. Given the following values:
1477
+
1478
+ - L1: distance from source to motor
1479
+ - L2: distance from source to detector
1480
+ - ps: physical pixel size
1481
+ - v: output of this function
1482
+
1483
+ displacement of motor = (L1 / L2 * ps) * v
1484
+
1485
+ Parameters
1486
+ ----------
1487
+ img_1: numpy.ndarray
1488
+ First image
1489
+ img_2: numpy.ndarray
1490
+ Second image, it needs to have been flipped already (e.g. using numpy.fliplr).
1491
+ side: string
1492
+ Expected region of the CoR. Must be 'center' in that case.
1493
+ roi_yxhw: (2, ) or (4, ) numpy.ndarray, tuple, or array, optional
1494
+ 4 elements vector containing: vertical and horizontal coordinates
1495
+ of first pixel, plus height and width of the Region of Interest (RoI).
1496
+ Or a 2 elements vector containing: plus height and width of the
1497
+ centered Region of Interest (RoI).
1498
+ Default is None -> deactivated.
1499
+ The ROI will be used for the global estimate.
1500
+ median_filt_shape: (2, ) numpy.ndarray, tuple, or array, optional
1501
+ Shape of the median filter window. Default is None -> deactivated.
1502
+ padding_mode: str in numpy.pad's mode list, optional
1503
+ Padding mode, which determines the type of convolution. If None or
1504
+ 'wrap' are passed, this resorts to the traditional circular convolution.
1505
+ If 'edge' or 'constant' are passed, it results in a linear convolution.
1506
+ Default is the circular convolution.
1507
+ All options are:
1508
+ None | 'constant' | 'edge' | 'linear_ramp' | 'maximum' | 'mean'
1509
+ | 'median' | 'minimum' | 'reflect' | 'symmetric' |'wrap'
1510
+ low_pass: float or sequence of two floats
1511
+ Low-pass filter properties, as described in `nabu.misc.fourier_filters`
1512
+ high_pass: float or sequence of two floats
1513
+ High-pass filter properties, as described in `nabu.misc.fourier_filters`
1514
+
1515
+ Raises
1516
+ ------
1517
+ ValueError
1518
+ In case images are not 2-dimensional or have different sizes.
1519
+
1520
+ Returns
1521
+ -------
1522
+ float
1523
+ Estimated center of rotation position from the center of the RoI in pixels.
1524
+
1525
+ Examples
1526
+ --------
1527
+ The following code computes the center of rotation position for two
1528
+ given images in a tomography scan, where the second image is taken at
1529
+ 180 degrees from the first.
1530
+
1531
+ >>> radio1 = data[0, :, :]
1532
+ ... radio2 = np.fliplr(data[1, :, :])
1533
+ ... CoR_calc = CenterOfRotationOctaveAccurate()
1534
+ ... cor_position = CoR_calc.find_shift(radio1, radio2)
1535
+
1536
+ Or for noisy images:
1537
+
1538
+ >>> cor_position = CoR_calc.find_shift(radio1, radio2, median_filt_shape=(3, 3))
1539
+ """
1540
+
1541
+ self.logger.info(
1542
+ f"Estimation of the COR with following options: high_pass={high_pass}, low_pass={low_pass}, limz={self.cor_options['limz']}."
1543
+ )
1544
+
1545
+ self._check_img_pair_sizes(img_1, img_2)
1546
+
1547
+ if side != "center":
1548
+ self.logger.fatal(
1549
+ "The accurate algorithm cannot handle half acq. Use 'near', 'fourier-angles', 'sliding-window' or 'growing-window' instead."
1550
+ )
1551
+ raise ValueError(
1552
+ "The accurate algorithm cannot handle half acq. Use 'near', 'fourier-angles', 'sliding-window' or 'growing-window' instead."
1553
+ )
1554
+
1555
+ img_shape = img_2.shape
1556
+ roi_yxhw = self._determine_roi(img_shape, roi_yxhw)
1557
+
1558
+ img_1 = self._prepare_image(img_1, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
1559
+ img_2 = self._prepare_image(img_2, roi_yxhw=roi_yxhw, median_filt_shape=median_filt_shape)
1560
+
1561
+ cc = self._compute_correlation_fft(
1562
+ img_1,
1563
+ img_2,
1564
+ padding_mode,
1565
+ high_pass=high_pass,
1566
+ low_pass=low_pass,
1567
+ )
1568
+
1569
+ # We use fftshift to deal more easily with negative shifts.
1570
+ # This has a cost of subtracting half the image shape afterward.
1571
+ shift = np.unravel_index(np.argmax(np.fft.fftshift(cc)), img_shape)
1572
+ shift -= np.array(img_shape) // 2
1573
+
1574
+ # The real "accurate" starts here (i.e. the octave findshift() func).
1575
+ if np.abs(shift[0]) > 10 * self.cor_options["limz"]:
1576
+ # This is suspiscious. We don't trust results of correlate.
1577
+ self.logger.info(f"Pre-correlation yields {shift[0]} pixels vertical motion")
1578
+ self.logger.info(f"We do not consider it.")
1579
+ shift = (0, 0)
1580
+
1581
+ # Limit the size of region for comparison to cutsize in both directions.
1582
+ # Hard-coded?
1583
+ cutsize = img_shape[1] // 2
1584
+ oldshift = np.round(shift).astype(int)
1585
+ if (img_shape[0] > cutsize) or (img_shape[1] > cutsize):
1586
+ im0 = self._cut(img_1, min(img_shape[0], cutsize), min(img_shape[1], cutsize))
1587
+ im1 = self._cut(
1588
+ np.roll(img_2, oldshift, axis=(0, 1)), min(img_shape[0], cutsize), min(img_shape[1], cutsize)
1589
+ )
1590
+ shift = oldshift + self._local_correlation(
1591
+ im0,
1592
+ im1,
1593
+ maxsize=self.cor_options["maxsize"],
1594
+ refine=self.cor_options["refine"],
1595
+ pmcc=self.cor_options["pmcc"],
1596
+ normalize=self.cor_options["normalize"],
1597
+ )
1598
+ else:
1599
+ shift = self._local_correlation(
1600
+ img_1,
1601
+ img_2,
1602
+ maxsize=self.cor_options["maxsize"],
1603
+ cor_estimate=oldshift,
1604
+ refine=self.cor_options["refine"],
1605
+ pmcc=self.cor_options["pmcc"],
1606
+ normalize=self.cor_options["normalize"],
1607
+ )
1608
+ if ((shift - oldshift) ** 2).sum() > 4:
1609
+ self.logger.info(f"Pre-correlation ({oldshift}) and accurate correlation ({shift}) are not consistent.")
1610
+ self.logger.info("Please check!!!")
1611
+
1612
+ offset = shift[1] / 2
1613
+
1614
+ if np.abs(shift[0]) > self.cor_options["limz"]:
1615
+ self.logger.info("Verify alignment or sample motion.")
1616
+ self.logger.info(f"Verical motion: {shift[0]} pixels.")
1617
+ self.logger.info(f"Offset?: {offset} pixels.")
1618
+ else:
1619
+ self.logger.info(f"Offset?: {offset} pixels.")
1620
+
1621
+ return offset