nabu 2023.2.1__py3-none-any.whl → 2024.1.0rc3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (183) hide show
  1. doc/conf.py +1 -1
  2. doc/doc_config.py +32 -0
  3. nabu/__init__.py +2 -1
  4. nabu/app/bootstrap_stitching.py +1 -1
  5. nabu/app/cli_configs.py +122 -2
  6. nabu/app/composite_cor.py +27 -2
  7. nabu/app/correct_rot.py +70 -0
  8. nabu/app/create_distortion_map_from_poly.py +42 -18
  9. nabu/app/diag_to_pix.py +358 -0
  10. nabu/app/diag_to_rot.py +449 -0
  11. nabu/app/generate_header.py +4 -3
  12. nabu/app/histogram.py +2 -2
  13. nabu/app/multicor.py +6 -1
  14. nabu/app/parse_reconstruction_log.py +151 -0
  15. nabu/app/prepare_weights_double.py +83 -22
  16. nabu/app/reconstruct.py +5 -1
  17. nabu/app/reconstruct_helical.py +7 -0
  18. nabu/app/reduce_dark_flat.py +6 -3
  19. nabu/app/rotate.py +4 -4
  20. nabu/app/stitching.py +16 -2
  21. nabu/app/tests/test_reduce_dark_flat.py +18 -2
  22. nabu/app/validator.py +4 -4
  23. nabu/cuda/convolution.py +8 -376
  24. nabu/cuda/fft.py +4 -0
  25. nabu/cuda/kernel.py +4 -4
  26. nabu/cuda/medfilt.py +5 -158
  27. nabu/cuda/padding.py +5 -71
  28. nabu/cuda/processing.py +23 -2
  29. nabu/cuda/src/ElementOp.cu +78 -0
  30. nabu/cuda/src/backproj.cu +28 -2
  31. nabu/cuda/src/fourier_wavelets.cu +2 -2
  32. nabu/cuda/src/normalization.cu +23 -0
  33. nabu/cuda/src/padding.cu +2 -2
  34. nabu/cuda/src/transpose.cu +16 -0
  35. nabu/cuda/utils.py +39 -0
  36. nabu/estimation/alignment.py +10 -1
  37. nabu/estimation/cor.py +808 -38
  38. nabu/estimation/cor_sino.py +7 -9
  39. nabu/estimation/tests/test_cor.py +85 -3
  40. nabu/io/reader.py +26 -18
  41. nabu/io/tests/test_cast_volume.py +3 -3
  42. nabu/io/tests/test_detector_distortion.py +3 -3
  43. nabu/io/tiffwriter_zmm.py +2 -2
  44. nabu/io/utils.py +14 -4
  45. nabu/io/writer.py +5 -3
  46. nabu/misc/fftshift.py +6 -0
  47. nabu/misc/histogram.py +5 -285
  48. nabu/misc/histogram_cuda.py +8 -104
  49. nabu/misc/kernel_base.py +3 -121
  50. nabu/misc/padding_base.py +5 -69
  51. nabu/misc/processing_base.py +3 -107
  52. nabu/misc/rotation.py +5 -62
  53. nabu/misc/rotation_cuda.py +5 -65
  54. nabu/misc/transpose.py +6 -0
  55. nabu/misc/unsharp.py +3 -78
  56. nabu/misc/unsharp_cuda.py +5 -52
  57. nabu/misc/unsharp_opencl.py +8 -85
  58. nabu/opencl/fft.py +6 -0
  59. nabu/opencl/kernel.py +21 -6
  60. nabu/opencl/padding.py +5 -72
  61. nabu/opencl/processing.py +27 -5
  62. nabu/opencl/src/backproj.cl +3 -3
  63. nabu/opencl/src/fftshift.cl +65 -12
  64. nabu/opencl/src/padding.cl +2 -2
  65. nabu/opencl/src/roll.cl +96 -0
  66. nabu/opencl/src/transpose.cl +16 -0
  67. nabu/pipeline/config_validators.py +63 -3
  68. nabu/pipeline/dataset_validator.py +2 -2
  69. nabu/pipeline/estimators.py +193 -35
  70. nabu/pipeline/fullfield/chunked.py +34 -17
  71. nabu/pipeline/fullfield/chunked_cuda.py +7 -5
  72. nabu/pipeline/fullfield/computations.py +48 -13
  73. nabu/pipeline/fullfield/nabu_config.py +13 -13
  74. nabu/pipeline/fullfield/processconfig.py +10 -5
  75. nabu/pipeline/fullfield/reconstruction.py +1 -2
  76. nabu/pipeline/helical/fbp.py +5 -0
  77. nabu/pipeline/helical/filtering.py +12 -9
  78. nabu/pipeline/helical/gridded_accumulator.py +179 -33
  79. nabu/pipeline/helical/helical_chunked_regridded.py +262 -151
  80. nabu/pipeline/helical/helical_chunked_regridded_cuda.py +4 -11
  81. nabu/pipeline/helical/helical_reconstruction.py +56 -18
  82. nabu/pipeline/helical/span_strategy.py +1 -1
  83. nabu/pipeline/helical/tests/test_accumulator.py +4 -0
  84. nabu/pipeline/params.py +23 -2
  85. nabu/pipeline/processconfig.py +3 -8
  86. nabu/pipeline/tests/test_chunk_reader.py +78 -0
  87. nabu/pipeline/tests/test_estimators.py +120 -2
  88. nabu/pipeline/utils.py +25 -0
  89. nabu/pipeline/writer.py +2 -0
  90. nabu/preproc/ccd_cuda.py +9 -7
  91. nabu/preproc/ctf.py +21 -26
  92. nabu/preproc/ctf_cuda.py +25 -25
  93. nabu/preproc/double_flatfield.py +14 -2
  94. nabu/preproc/double_flatfield_cuda.py +7 -11
  95. nabu/preproc/flatfield_cuda.py +23 -27
  96. nabu/preproc/phase.py +19 -24
  97. nabu/preproc/phase_cuda.py +21 -21
  98. nabu/preproc/shift_cuda.py +58 -28
  99. nabu/preproc/tests/test_ctf.py +5 -5
  100. nabu/preproc/tests/test_double_flatfield.py +2 -2
  101. nabu/preproc/tests/test_vshift.py +13 -2
  102. nabu/processing/__init__.py +0 -0
  103. nabu/processing/convolution_cuda.py +375 -0
  104. nabu/processing/fft_base.py +163 -0
  105. nabu/processing/fft_cuda.py +256 -0
  106. nabu/processing/fft_opencl.py +54 -0
  107. nabu/processing/fftshift.py +134 -0
  108. nabu/processing/histogram.py +286 -0
  109. nabu/processing/histogram_cuda.py +103 -0
  110. nabu/processing/kernel_base.py +126 -0
  111. nabu/processing/medfilt_cuda.py +159 -0
  112. nabu/processing/muladd.py +29 -0
  113. nabu/processing/muladd_cuda.py +68 -0
  114. nabu/processing/padding_base.py +71 -0
  115. nabu/processing/padding_cuda.py +75 -0
  116. nabu/processing/padding_opencl.py +77 -0
  117. nabu/processing/processing_base.py +123 -0
  118. nabu/processing/roll_opencl.py +64 -0
  119. nabu/processing/rotation.py +63 -0
  120. nabu/processing/rotation_cuda.py +66 -0
  121. nabu/processing/tests/__init__.py +0 -0
  122. nabu/processing/tests/test_fft.py +268 -0
  123. nabu/processing/tests/test_fftshift.py +71 -0
  124. nabu/{misc → processing}/tests/test_histogram.py +2 -4
  125. nabu/{cuda → processing}/tests/test_medfilt.py +1 -1
  126. nabu/processing/tests/test_muladd.py +54 -0
  127. nabu/{cuda → processing}/tests/test_padding.py +119 -75
  128. nabu/processing/tests/test_roll.py +63 -0
  129. nabu/{misc → processing}/tests/test_rotation.py +3 -2
  130. nabu/processing/tests/test_transpose.py +72 -0
  131. nabu/{misc → processing}/tests/test_unsharp.py +41 -8
  132. nabu/processing/transpose.py +126 -0
  133. nabu/processing/unsharp.py +79 -0
  134. nabu/processing/unsharp_cuda.py +53 -0
  135. nabu/processing/unsharp_opencl.py +75 -0
  136. nabu/reconstruction/fbp.py +34 -10
  137. nabu/reconstruction/fbp_base.py +35 -16
  138. nabu/reconstruction/fbp_opencl.py +7 -12
  139. nabu/reconstruction/filtering.py +2 -2
  140. nabu/reconstruction/filtering_cuda.py +13 -14
  141. nabu/reconstruction/filtering_opencl.py +3 -4
  142. nabu/reconstruction/projection.py +2 -0
  143. nabu/reconstruction/rings.py +158 -1
  144. nabu/reconstruction/rings_cuda.py +218 -58
  145. nabu/reconstruction/sinogram_cuda.py +16 -12
  146. nabu/reconstruction/tests/test_deringer.py +116 -14
  147. nabu/reconstruction/tests/test_fbp.py +22 -31
  148. nabu/reconstruction/tests/test_filtering.py +11 -2
  149. nabu/resources/dataset_analyzer.py +89 -26
  150. nabu/resources/nxflatfield.py +2 -2
  151. nabu/resources/tests/test_nxflatfield.py +1 -1
  152. nabu/resources/utils.py +9 -2
  153. nabu/stitching/alignment.py +184 -0
  154. nabu/stitching/config.py +241 -39
  155. nabu/stitching/definitions.py +6 -0
  156. nabu/stitching/frame_composition.py +4 -2
  157. nabu/stitching/overlap.py +99 -3
  158. nabu/stitching/sample_normalization.py +60 -0
  159. nabu/stitching/slurm_utils.py +10 -10
  160. nabu/stitching/tests/test_alignment.py +99 -0
  161. nabu/stitching/tests/test_config.py +16 -1
  162. nabu/stitching/tests/test_overlap.py +68 -2
  163. nabu/stitching/tests/test_sample_normalization.py +49 -0
  164. nabu/stitching/tests/test_slurm_utils.py +5 -5
  165. nabu/stitching/tests/test_utils.py +3 -33
  166. nabu/stitching/tests/test_z_stitching.py +391 -22
  167. nabu/stitching/utils.py +144 -202
  168. nabu/stitching/z_stitching.py +309 -126
  169. nabu/testutils.py +18 -0
  170. nabu/thirdparty/tomocupy_remove_stripe.py +586 -0
  171. nabu/utils.py +32 -6
  172. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/LICENSE +1 -1
  173. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/METADATA +5 -5
  174. nabu-2024.1.0rc3.dist-info/RECORD +296 -0
  175. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/WHEEL +1 -1
  176. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/entry_points.txt +5 -1
  177. nabu/conftest.py +0 -14
  178. nabu/opencl/fftshift.py +0 -92
  179. nabu/opencl/tests/test_fftshift.py +0 -55
  180. nabu/opencl/tests/test_padding.py +0 -84
  181. nabu-2023.2.1.dist-info/RECORD +0 -252
  182. /nabu/cuda/src/{fftshift.cu → dfi_fftshift.cu} +0 -0
  183. {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/top_level.txt +0 -0
@@ -40,14 +40,14 @@ nabu_config = {
40
40
  },
41
41
  "projections_subsampling": {
42
42
  "default": "1",
43
- "help": "Projections subsampling factor: take one projection out of 'projection_subsampling'",
44
- "validator": binning_validator,
43
+ "help": "Projections subsampling factor: take one projection out of 'projection_subsampling'. The format can be an integer (take 1 projection out of N), or N:M (take 1 projection out of N, start with the projection number M)\nFor example: 2 (or 2:0) to reconstruct from even projections, 2:1 to reconstruct from odd projections.",
44
+ "validator": projections_subsampling_validator,
45
45
  "type": "advanced",
46
46
  },
47
47
  "exclude_projections": {
48
48
  "default": "",
49
- "help": "Path to a file name containing projections to exclude (projection indices).",
50
- "validator": optional_file_location_validator,
49
+ "help": "Projection to exclude from the reconstruction. It can be:\n-indices = exclude_projections_indices.txt : Path to a text file with one integer per line. Each corresponding projection INDEX will be ignored.\n-angles = exclude_projections_angles.txt : Path to a text file with angle in DEGREES, one per line. The corresponding angles will be ignored\n-angular_range = [a, b] : ignore angles belonging to angular range [a, b] in degrees, with b included.",
50
+ "validator": exclude_projections_validator,
51
51
  "type": "advanced",
52
52
  },
53
53
  "overwrite_metadata": {
@@ -77,7 +77,7 @@ nabu_config = {
77
77
  "type": "advanced",
78
78
  },
79
79
  "normalize_srcurrent": {
80
- "default": "0",
80
+ "default": "1",
81
81
  "help": "Whether to normalize frames with Synchrotron Current. This can correct the effect of a beam refill not taken into account by flats.",
82
82
  "validator": boolean_validator,
83
83
  "type": "advanced",
@@ -159,13 +159,13 @@ nabu_config = {
159
159
  },
160
160
  "sino_rings_correction": {
161
161
  "default": "",
162
- "help": "Sinogram rings removal method. Default (empty) is None. Available are: None, munch. See also: sino_rings_options",
162
+ "help": "Sinogram rings removal method. Default (empty) is None. Available are: None, munch, vo, mean-subtraction, mean-division. See also: sino_rings_options",
163
163
  "validator": sino_deringer_methods,
164
164
  "type": "optional",
165
165
  },
166
166
  "sino_rings_options": {
167
- "default": "sigma=1.0 ; levels=10 ; padding=False",
168
- "help": "Options for sinogram rings correction methods. The parameters are separated by commas and passed as 'name=value', for example: sigma=1.0;levels=10. Mind the semicolon separator (;).",
167
+ "default": "",
168
+ "help": "Options for sinogram rings correction methods. The parameters are separated by commas and passed as 'name=value'. Mind the semicolon separator (;). The default options are the following:\n-For munch: sigma=1.0 ; levels=10 ; padding=False\n-For vo: snr=3.0; la_size=51; sm_size=21; dim=1\n-For mean-subtraction and mean-division: filter_cutoff=(0, 30)",
169
169
  "validator": generic_options_validator,
170
170
  "type": "advanced",
171
171
  },
@@ -189,7 +189,7 @@ nabu_config = {
189
189
  },
190
190
  "autotilt_options": {
191
191
  "default": "",
192
- "help": "Options for methods computing automatically the detector tilt. The parameters are separated by commas and passed as 'name=value', for example: low_pass=1; high_pass=20. Mind the semicolon separator (;).",
192
+ "help": "Options for methods computing automatically the detector tilt. The parameters are separated by commas and passed as 'name=value', for example: low_pass=1; high_pass=20. Mind the semicolon separator (;). Use 'value' ('') for values that are strings",
193
193
  "validator": generic_options_validator,
194
194
  "type": "advanced",
195
195
  },
@@ -259,13 +259,13 @@ nabu_config = {
259
259
  },
260
260
  "rotation_axis_position": {
261
261
  "default": "sliding-window",
262
- "help": "Rotation axis position. It can be a number or the name of an estimation method (empty value means the middle of the detector).\nThe following methods are available to find automatically the Center of Rotation (CoR):\n - centered : a fast and simple auto-CoR method. It only works when the CoR is not far from the middle of the detector. It does not work for half-tomography.\n - global : a slow but robust auto-CoR.\n - sliding-window : semi-automatically find the CoR with a sliding window. You have to specify on which side the CoR is (left, center, right). Please see the 'cor_options' parameter.\n - growing-window : automatically find the CoR with a sliding-and-growing window. You can tune the option with the parameter 'cor_options'.\n - sino-coarse-to-fine: Estimate CoR from sinogram. Only works for 360 degrees scans.\n - composite-coarse-to-fine: Estimate CoR from composite multi-angle images. Only works for 360 degrees scans.",
262
+ "help": "Rotation axis position. It can be a number or the name of an estimation method (empty value means the middle of the detector).\nThe following methods are available to find automatically the Center of Rotation (CoR):\n - centered : a fast and simple auto-CoR method. It only works when the CoR is not far from the middle of the detector. It does not work for half-tomography.\n - global : a slow but robust auto-CoR.\n - sliding-window : semi-automatically find the CoR with a sliding window. You have to specify on which side the CoR is (left, center, right). Please see the 'cor_options' parameter.\n - growing-window : automatically find the CoR with a sliding-and-growing window. You can tune the option with the parameter 'cor_options'.\n - sino-coarse-to-fine: Estimate CoR from sinogram. Only works for 360 degrees scans.\n - composite-coarse-to-fine: Estimate CoR from composite multi-angle images. Only works for 360 degrees scans.\n - fourier-angles: Estimate CoR from sino based on an angular correlation analysis. You can tune the option with the parameter 'cor_options'.\n - octave-accurate: Legacy from octave accurate COR estimation algorithm. It first estimates the COR with global fourier-based correlation, then refines this estimation with local correlation based on the variance of the difference patches. You can tune the option with the parameter 'cor_options'.",
263
263
  "validator": cor_validator,
264
264
  "type": "required",
265
265
  },
266
266
  "cor_options": {
267
- "default": "",
268
- "help": "Options for methods finding automatically the rotation axis position. The parameters are separated by commas and passed as 'name=value', for example: low_pass=1; high_pass=20. Mind the semicolon separator (;).",
267
+ "default": "side='from_file'",
268
+ "help": "Options for methods finding automatically the rotation axis position. The parameters are separated by commas and passed as 'name=value'.\nFor example: low_pass=1; high_pass=20. Mind the semicolon separator (;) and the '' for string values that are strings.\nIf 'side' is set, it is expected to be either:\n - 'from_file' (to pick the value in the NX file.)\n - or an relative CoR position in pixels (if so, it overrides the value in the NX file), \n or any of 'left', 'center', 'right', 'all', 'near'.\n The default value for 'side' is 'from_file'.",
269
269
  "validator": generic_options_validator,
270
270
  "type": "advanced",
271
271
  },
@@ -336,7 +336,7 @@ nabu_config = {
336
336
  "type": "optional",
337
337
  },
338
338
  "centered_axis": {
339
- "default": "0",
339
+ "default": "1",
340
340
  "help": "If set to true, the reconstructed region is centered on the rotation axis, i.e the center of the image will be the rotation axis position.",
341
341
  "validator": boolean_validator,
342
342
  "type": "optional",
@@ -1,7 +1,6 @@
1
1
  import os
2
2
  import posixpath
3
3
  import numpy as np
4
- from typing import Optional
5
4
  from silx.io import get_data
6
5
  from silx.io.url import DataUrl
7
6
  from ...utils import copy_dict_items, compare_dicts
@@ -10,12 +9,11 @@ from ...io.reader import import_h5_to_dict
10
9
  from ...resources.utils import extract_parameters, get_values_from_file
11
10
  from ...resources.nxflatfield import update_dataset_info_flats_darks
12
11
  from ...resources.utils import get_quantities_and_units
13
- from ...reconstruction.sinogram import get_extended_sinogram_width
14
12
  from ..estimators import estimate_cor
15
13
  from ..processconfig import ProcessConfigBase
16
14
  from .nabu_config import nabu_config, renamed_keys
17
15
  from .dataset_validator import FullFieldDatasetValidator
18
- from tomoscan.esrf.scan.hdf5scan import ImageKey
16
+ from nxtomo.nxobject.nxdetector import ImageKey
19
17
 
20
18
 
21
19
  class ProcessConfig(ProcessConfigBase):
@@ -447,6 +445,8 @@ class ProcessConfig(ProcessConfigBase):
447
445
  options["double_flatfield"] = {
448
446
  "sigma": nabu_config["preproc"]["dff_sigma"],
449
447
  "processes_file": nabu_config["preproc"]["processes_file"],
448
+ "log_min_clip": nabu_config["preproc"]["log_min_clip"],
449
+ "log_max_clip": nabu_config["preproc"]["log_max_clip"],
450
450
  }
451
451
  #
452
452
  # Radios rotation (do it here if possible)
@@ -481,7 +481,11 @@ class ProcessConfig(ProcessConfigBase):
481
481
  #
482
482
  # Unsharp
483
483
  #
484
- if nabu_config["phase"]["unsharp_method"] is not None and nabu_config["phase"]["unsharp_coeff"] > 0:
484
+ if (
485
+ nabu_config["phase"]["unsharp_method"] is not None
486
+ and nabu_config["phase"]["unsharp_coeff"] > 0
487
+ and nabu_config["phase"]["unsharp_sigma"] > 0
488
+ ):
485
489
  tasks.append("unsharp_mask")
486
490
  options["unsharp_mask"] = copy_dict_items(
487
491
  nabu_config["phase"], ["unsharp_coeff", "unsharp_sigma", "unsharp_method"]
@@ -525,6 +529,7 @@ class ProcessConfig(ProcessConfigBase):
525
529
  if nabu_config["preproc"]["sino_rings_correction"]:
526
530
  tasks.append("sino_rings_correction")
527
531
  options["sino_rings_correction"] = {
532
+ "method": nabu_config["preproc"]["sino_rings_correction"],
528
533
  "user_options": nabu_config["preproc"]["sino_rings_options"],
529
534
  }
530
535
  #
@@ -568,7 +573,7 @@ class ProcessConfig(ProcessConfigBase):
568
573
  ) # pix size is in microns in dataset_info
569
574
 
570
575
  # x/y/z position information
571
- def get_mean_pos(position_array: Optional[np.ndarray]) -> Optional[float]:
576
+ def get_mean_pos(position_array):
572
577
  if position_array is None:
573
578
  return None
574
579
  else:
@@ -13,7 +13,7 @@ from ...resources.utils import is_hdf5_extension
13
13
  from ...io.writer import merge_hdf5_files, NXProcessWriter
14
14
  from ...cuda.utils import collect_cuda_gpus, __has_pycuda__
15
15
  from ...preproc.phase import compute_paganin_margin
16
- from ...misc.histogram import PartialHistogram, add_last_bin, hist_as_2Darray
16
+ from ...processing.histogram import PartialHistogram, add_last_bin, hist_as_2Darray
17
17
  from .chunked import ChunkedPipeline
18
18
  from .computations import estimate_max_chunk_size
19
19
 
@@ -321,7 +321,6 @@ class FullFieldReconstructor:
321
321
  delta_beta=phase_options["delta_beta"],
322
322
  pixel_size=phase_options["pixel_size_m"],
323
323
  padding=phase_options["padding_type"],
324
- use_rfft=False, # disable fftw here
325
324
  )
326
325
  elif phase_options["method"] == "CTF":
327
326
  # The whole projection has to be processed!
@@ -18,6 +18,11 @@ class BackprojectorHelical(Backprojector):
18
18
  *
19
19
  """
20
20
 
21
+ def __init__(self, *args, **kwargs):
22
+ """This became needed after the _d_sino allocation was removed from the base class"""
23
+ super().__init__(*args, **kwargs)
24
+ self._d_sino = self._processing.allocate_array("d_sino", self.sino_shape, "f")
25
+
21
26
  def set_custom_angles_and_axis_corrections(self, angles_rad, x_per_proj):
22
27
  """To arbitrarily change angles
23
28
  Parameters
@@ -1,9 +1,8 @@
1
1
  # pylint: disable=too-many-arguments
2
- import math
3
2
  import numpy as np
4
3
  from ...utils import get_cuda_srcfile, updiv
5
4
  from ...reconstruction.filtering import get_next_power
6
- from ...reconstruction.filtering_cuda import CudaSinoFilter, CudaKernel, garray
5
+ from ...reconstruction.filtering_cuda import CudaSinoFilter
7
6
 
8
7
  # pylint: disable=too-many-arguments,too-many-instance-attributes,too-many-function-args
9
8
 
@@ -65,35 +64,39 @@ class HelicalSinoFilter(CudaSinoFilter):
65
64
 
66
65
  self.kern_args = (self.d_sino_f, self.d_filter_f)
67
66
  self.kern_args += self.d_sino_f.shape[::-1]
68
- self._pad_mirror_edges_kernel = CudaKernel(
67
+ self._pad_mirror_edges_kernel = self.cuda.kernel(
69
68
  "padding",
70
69
  filename=get_cuda_srcfile("helical_padding.cu"),
71
70
  signature="PPfiiiii",
72
71
  options=[str("-DMIRROR_EDGES")],
73
72
  )
74
- self._pad_mirror_constant_kernel = CudaKernel(
73
+ self._pad_mirror_constant_kernel = self.cuda.kernel(
75
74
  "padding",
76
75
  filename=get_cuda_srcfile("helical_padding.cu"),
77
76
  signature="PPfiiiiiff",
78
77
  options=[str("-DMIRROR_CONSTANT")],
79
78
  )
80
79
 
81
- self._pad_mirror_edges_variable_rot_pos_kernel = CudaKernel(
80
+ self._pad_mirror_edges_variable_rot_pos_kernel = self.cuda.kernel(
82
81
  "padding",
83
82
  filename=get_cuda_srcfile("helical_padding.cu"),
84
83
  signature="PPPiiiii",
85
84
  options=[str("-DMIRROR_EDGES_VARIABLE_ROT_POS")],
86
85
  )
87
- self._pad_mirror_constant_variable_rot_pos_kernel = CudaKernel(
86
+ self._pad_mirror_constant_variable_rot_pos_kernel = self.cuda.kernel(
88
87
  "padding",
89
88
  filename=get_cuda_srcfile("helical_padding.cu"),
90
89
  signature="PPPiiiiiff",
91
90
  options=[str("-DMIRROR_CONSTANT_VARIABLE_ROT_POS")],
92
91
  )
93
92
 
94
- self.d_mirror_indexes = garray.zeros((self.sino_padded_shape[-2],), np.int32)
95
- self.d_variable_rot_pos = garray.zeros((self.sino_padded_shape[-2],), np.float32)
96
- self._pad_edges_kernel = CudaKernel(
93
+ self.d_mirror_indexes = self.cuda.allocate_array(
94
+ "d_mirror_indexes", (self.sino_padded_shape[-2],), dtype=np.int32
95
+ )
96
+ self.d_variable_rot_pos = self.cuda.allocate_array(
97
+ "d_variable_rot_pos", (self.sino_padded_shape[-2],), dtype=np.float32
98
+ )
99
+ self._pad_edges_kernel = self.cuda.kernel(
97
100
  "padding_edge", filename=get_cuda_srcfile("padding.cu"), signature="Piiiiiiii"
98
101
  )
99
102
  self._pad_block = (32, 32, 1)
@@ -5,6 +5,8 @@ import math
5
5
 
6
6
 
7
7
  class GriddedAccumulator:
8
+ nominal_current = 0.2
9
+
8
10
  def __init__(
9
11
  self,
10
12
  gridded_radios,
@@ -12,11 +14,16 @@ class GriddedAccumulator:
12
14
  diagnostic_radios,
13
15
  diagnostic_weights,
14
16
  diagnostic_angles,
17
+ diagnostic_searched_angles_rad_clipped,
18
+ diagnostic_zpix_transl,
19
+ diag_zpro_run=0,
15
20
  dark=None,
16
21
  flat_indexes=None,
17
22
  flats=None,
18
23
  weights=None,
19
24
  double_flat=None,
25
+ radios_srcurrent=None,
26
+ flats_srcurrent=None,
20
27
  ):
21
28
  """
22
29
  This class creates, for a selected volume slab, a standard set of radios from an helical dataset.
@@ -33,22 +40,34 @@ class GriddedAccumulator:
33
40
  gridded_weights : 3d np.array
34
41
  same shape as gridded_radios, but it will accumulate the weights, during calls to
35
42
  extract_preprocess_with_flats
36
- diagnostic_radios : 3d np.array, a stack composed of two radios
43
+ diag_zpro_run: int
44
+ if > 0 then only the diagnostics are filled, and no accumulation is done
45
+
46
+ diagnostic_searched_angles_rad_clipped:
47
+ the angles between 0 and 2pi. The contributions to diagnostic will be searched for these angles
48
+ plus for the same angles + 2pi ( following turn)
49
+
50
+ diagnostic_radios : 3d np.array, a stack composed of
37
51
  each radio must have the same size as a radio of the gridded_radios argument.
38
52
  During the calls to extract_preprocess_with_flats methods,
39
- the first radio will collect the transformed data for angle=0 ( and the neighbouring ones
40
- according to angular interpolation coefficients) and this only for the first occurring turn.
41
- The second radio will be initialised at the second turn, if any. These array are meant to be used
42
- to check the translation step over one turn.
53
+ the radios will collect the transformed data for the angles given by diagnostic_searched_angles_rad_clipped and redundancy
43
54
  diagnostic_weights: 3d np.array a stack composed of two radios
44
55
  Same shape as diagnostic_radios. The weigths for diagnostic radios ( will be zero on pixel
45
56
  where no data is available, or where the weight is null)
46
57
  diagnostic_angles : 1D np.array
47
- Must have shape==(2,). The two entries will be filled with the angles at which the contributions
58
+ Must have shape==(2*len(diagnostic_searched_angles_rad_clipped),). The entries will be filled with the angles at which the contributions
48
59
  to diagnostic_radios have been summed.
49
- dark = None or 2D np.array
60
+ diagnostic_zpix_transl: 1D np.array
61
+ same as for diagnostic_angles, but for vertical translation in pixels
62
+ dark: None or 2D np.array
50
63
  must have the shape of the detector ( generally larger that a radio of gridded_radios)
51
64
  If given, the dark will be subtracted from data and flats.
65
+ radios_srcurrent: 1D np.array
66
+ the machine current for every radio
67
+ flats_srcurrent: 1D np.array
68
+ the machine current for every flat
69
+
70
+
52
71
  flat_indexes: None or a list of integers
53
72
  the projection index corresponding to the flats
54
73
  flats : None or 3D np.array
@@ -61,12 +80,22 @@ class GriddedAccumulator:
61
80
  If given, the double flat will be applied (division by double_flat)
62
81
  Must have the same shape as the detector.
63
82
  """
83
+ self.diag_zpro_run = diag_zpro_run
64
84
  self.gridded_radios = gridded_radios
65
85
  self.gridded_weights = gridded_weights
66
86
  self.diagnostic_radios = diagnostic_radios
67
87
  self.diagnostic_weights = diagnostic_weights
68
88
  self.diagnostic_angles = diagnostic_angles
89
+
90
+ self.diagnostic_zpix_transl = diagnostic_zpix_transl
91
+ self.diagnostic_searched_angles_rad_clipped = diagnostic_searched_angles_rad_clipped
92
+
69
93
  self.dark = dark
94
+
95
+ self.radios_srcurrent = radios_srcurrent
96
+
97
+ self.flats_srcurrent = flats_srcurrent
98
+
70
99
  self.flat_indexes = flat_indexes
71
100
  self.flat_indexes_reverse_map = dict(
72
101
  [(global_index, local_index) for (local_index, global_index) in enumerate(flat_indexes)]
@@ -76,7 +105,14 @@ class GriddedAccumulator:
76
105
  self.double_flat = double_flat
77
106
 
78
107
  def extract_preprocess_with_flats(
79
- self, subchunk_slice, subchunk_file_indexes, chunk_info, subr_start_end, dtasrc_start_end, data_raw
108
+ self,
109
+ subchunk_slice,
110
+ subchunk_file_indexes,
111
+ chunk_info,
112
+ subr_start_end,
113
+ dtasrc_start_end,
114
+ data_raw,
115
+ radios_angular_range_slicing,
80
116
  ):
81
117
  """
82
118
  This functions is meant to be called providing, each time, a subset of the data
@@ -120,6 +156,13 @@ class GriddedAccumulator:
120
156
  They indicated, vertically, the detector portion the data_raw data correspond to
121
157
  data_raw: np.array 3D
122
158
  the data which correspond to a limited detector stripe and a limited angular subset
159
+ radios_angular_range_slicing:
160
+ my_subsampled_indexes is important in order to compare the
161
+ radios positions with respect to the flat position, and these position
162
+ are given as the sequential acquisition number which counts everything ( flats, darks, radios )
163
+ Insteqd, in order to access array which spans only the radios, we need to have an idea of where we are.
164
+ this is provided by radios_angular_range_slicing which addresses the radios domain
165
+
123
166
  """
124
167
 
125
168
  # the object below is going to containing some auxiliary variable that are use to reframe the data.
@@ -141,7 +184,9 @@ class GriddedAccumulator:
141
184
  )
142
185
 
143
186
  # extraction of the data
144
- self._extract_preprocess_with_flats(data_raw, reframing_infos, chunk_info, radios_subset)
187
+ self._extract_preprocess_with_flats(
188
+ data_raw, reframing_infos, chunk_info, radios_subset, radios_angular_range_slicing
189
+ )
145
190
 
146
191
  if self.weights is not None:
147
192
  # ... and, if required, extraction of the associated weights
@@ -156,7 +201,9 @@ class GriddedAccumulator:
156
201
  # and the remaining part is a simple projection over the accumulators, for
157
202
  # the data and for the weights
158
203
  my_angles = chunk_info.angles_rad[subchunk_slice]
204
+
159
205
  n_gridded_angles = self.gridded_radios.shape[0]
206
+
160
207
  my_i_float = my_angles * (n_gridded_angles / (2 * math.pi))
161
208
 
162
209
  tmp_i_rounded = np.floor(my_i_float).astype(np.int32)
@@ -164,17 +211,52 @@ class GriddedAccumulator:
164
211
  my_i0 = np.mod(tmp_i_rounded, n_gridded_angles)
165
212
  my_i1 = np.mod(my_i0 + 1, n_gridded_angles)
166
213
 
167
- for i0, epsilon, i1, data, weight, original_angle in zip(
168
- my_i0, my_epsilon, my_i1, radios_subset, radios_weights_subset, chunk_info.angles_rad[subchunk_slice]
214
+ if self.diag_zpro_run:
215
+ # these are used only when collection the diagnostics
216
+
217
+ # an estimation of the angular step
218
+ my_angle_step_rad = abs(np.diff(chunk_info.angles_rad[subchunk_slice]).mean())
219
+ my_angles_02pi = np.mod(my_angles, 2 * np.pi)
220
+ # bins are delimited by ticks
221
+ ticks = np.empty(2 * len(self.diagnostic_searched_angles_rad_clipped), "f")
222
+ ticks[::2] = self.diagnostic_searched_angles_rad_clipped - my_angle_step_rad / 2
223
+ ticks[1::2] = ticks[::2] + my_angle_step_rad
224
+
225
+ for i0, epsilon, i1, data, weight, original_angle, original_zpix_transl in zip(
226
+ my_i0,
227
+ my_epsilon,
228
+ my_i1,
229
+ radios_subset,
230
+ radios_weights_subset,
231
+ chunk_info.angles_rad[subchunk_slice],
232
+ chunk_info.z_pix_per_proj[subchunk_slice],
169
233
  ):
170
234
  data_token = data * weight
171
- self.gridded_radios[i0] += data_token * (1 - epsilon)
172
- self.gridded_radios[i1] += data_token * epsilon
173
235
 
174
- self.gridded_weights[i0] += weight * (1 - epsilon)
175
- self.gridded_weights[i1] += weight * epsilon
236
+ if not self.diag_zpro_run:
237
+ self.gridded_radios[i0] += data_token * (1 - epsilon)
238
+ self.gridded_radios[i1] += data_token * epsilon
239
+
240
+ self.gridded_weights[i0] += weight * (1 - epsilon)
241
+ self.gridded_weights[i1] += weight * epsilon
242
+
243
+ # building the intervals around the diagnostic angles
244
+
245
+ if self.diag_zpro_run:
246
+ my_i0 = np.searchsorted(ticks, my_angles_02pi)
247
+
248
+ for i0, a02pi, data, weight, original_angle, original_zpix_transl in zip(
249
+ my_i0,
250
+ my_angles_02pi,
251
+ radios_subset,
252
+ radios_weights_subset,
253
+ chunk_info.angles_rad[subchunk_slice],
254
+ chunk_info.z_pix_per_proj[subchunk_slice],
255
+ ):
256
+ if i0 % 2 == 0:
257
+ # not in an intervals
258
+ continue
176
259
 
177
- if i0 == 0 or i1 == 0:
178
260
  # There is a contribution to the first regridded radio ( the one indexed by 0)
179
261
  # We build two diagnostics for the contributions to this radio.
180
262
  # The first for the first pass (i_diag=0)
@@ -183,25 +265,49 @@ class GriddedAccumulator:
183
265
  # To discriminate we introduce
184
266
  # An angular margin beyond which we know that a possible contribution
185
267
  # is coming from another turn
186
- safe_angular_margin = 3.14 / 10
187
- for i_diag in range(2):
188
- if original_angle < self.diagnostic_angles[i_diag] + safe_angular_margin:
268
+
269
+ safe_angular_margin = 3.14 / 40
270
+
271
+ my_dist = abs(
272
+ a02pi - self.diagnostic_searched_angles_rad_clipped[(i0 - 1) // 2 : (i0 - 1) // 2 + 1].mean()
273
+ )
274
+
275
+ # print(" i0 ", i0, " original_angle ", original_angle, " a02pi" , a02pi, " my_dist " , my_dist)
276
+
277
+ # consider fist pass and second possible pass. There might be further passes which we dont consider here
278
+ i_diag_list = [(i0 - 1) // 2, (i0 - 1) // 2 + len(self.diagnostic_searched_angles_rad_clipped)]
279
+ for i_redundancy, i_diag in enumerate(i_diag_list):
280
+ # print("IRED ", i_redundancy)
281
+ if i_redundancy:
282
+ # to avoid, in z_stages with >360 range for one single stage, to fill the second items which should instead be filled by another stage.
283
+ if abs(original_zpix_transl - self.diagnostic_zpix_transl[i_diag_list[0]]) < 2.0:
284
+ # print( " >>>>>> stesso z" , i_redundancy )
285
+ continue
286
+
287
+ if np.isnan(self.diagnostic_angles[i_diag]) or (
288
+ abs(original_angle) < abs(self.diagnostic_angles[i_diag] + safe_angular_margin)
289
+ ):
189
290
  # we are searching for the first contributions ( the one at the lowest angle)
190
291
  # for the two diagnostics. With the constraint that the second is at an higher angle
191
292
  # than the first. So if we are here this means that we have found an occurrence with
192
293
  # lower angle and we discard what we could have previously found.
294
+
193
295
  self.diagnostic_radios[i_diag][:] = 0
194
296
  self.diagnostic_weights[i_diag][:] = 0
195
297
  self.diagnostic_angles[i_diag] = original_angle
196
298
 
197
- if abs(original_angle - self.diagnostic_angles[i_diag]) < safe_angular_margin:
198
- if i0 == 0:
199
- factor = 1 - epsilon
200
- else:
201
- factor = epsilon
299
+ self.diagnostic_zpix_transl[i_diag] = original_zpix_transl
300
+ else:
301
+ continue
302
+
303
+ if abs(my_dist) <= my_angle_step_rad * 1.1:
304
+ factor = 1 - abs(my_dist) / (my_angle_step_rad)
305
+
202
306
  self.diagnostic_radios[i_diag] += data_token * factor
203
307
  self.diagnostic_weights[i_diag] += weight * factor
204
308
  break
309
+ else:
310
+ pass
205
311
 
206
312
  class _ReframingInfos:
207
313
  def __init__(self, chunk_info, subchunk_slice, subr_start_end, dtasrc_start_end, subchunk_file_indexes):
@@ -220,7 +326,9 @@ class GriddedAccumulator:
220
326
 
221
327
  self.floating_subregion = None, None, floating_start_z, floating_end_z
222
328
 
223
- def _extract_preprocess_with_flats(self, data_raw, reframing_infos, chunk_info, output, it_is_weight=False):
329
+ def _extract_preprocess_with_flats(
330
+ self, data_raw, reframing_infos, chunk_info, output, radios_angular_range_slicing=None, it_is_weight=False
331
+ ):
224
332
  if not it_is_weight:
225
333
  if self.dark is not None:
226
334
  data_raw = data_raw - self.dark[reframing_infos.dtasrc_start_z : reframing_infos.dtasrc_end_z]
@@ -231,7 +339,12 @@ class GriddedAccumulator:
231
339
  if self.dark is not None:
232
340
  flat = flat - self.dark[reframing_infos.dtasrc_start_z : reframing_infos.dtasrc_end_z]
233
341
 
234
- data_raw[i] = data_raw[i] / flat
342
+ if self.radios_srcurrent is not None:
343
+ factor = self.nominal_current / self.radios_srcurrent[radios_angular_range_slicing.start + i]
344
+ else:
345
+ factor = 1
346
+
347
+ data_raw[i] = data_raw[i] * factor / flat
235
348
 
236
349
  if self.double_flat is not None:
237
350
  data_raw = data_raw / self.double_flat[reframing_infos.dtasrc_start_z : reframing_infos.dtasrc_end_z]
@@ -271,12 +384,35 @@ class GriddedAccumulator:
271
384
  flat_data = self.flats[self.flat_indexes_reverse_map[prev_next[0]]][slice_y, slice_x]
272
385
  else: # interpolate
273
386
  prev_idx, next_idx = prev_next
274
- flat_data_prev = self.flats[self.flat_indexes_reverse_map[prev_idx]][slice_y, slice_x]
275
- flat_data_next = self.flats[self.flat_indexes_reverse_map[next_idx]][slice_y, slice_x]
387
+
388
+ n_prev = self.flat_indexes_reverse_map[prev_idx]
389
+ n_next = self.flat_indexes_reverse_map[next_idx]
390
+
391
+ flat_data_prev = self.flats[n_prev][slice_y, slice_x]
392
+ flat_data_next = self.flats[n_next][slice_y, slice_x]
393
+
394
+ if self.flats_srcurrent is not None:
395
+ prev_current_factor = self.nominal_current / self.flats_srcurrent[n_prev]
396
+ next_current_factor = self.nominal_current / self.flats_srcurrent[n_next]
397
+ else:
398
+ prev_current_factor = 1
399
+ next_current_factor = 1
400
+
276
401
  delta = next_idx - prev_idx
277
402
  w1 = 1 - (idx - prev_idx) / delta
278
403
  w2 = 1 - (next_idx - idx) / delta
279
- flat_data = w1 * flat_data_prev + w2 * flat_data_next
404
+
405
+ if self.dark is not None:
406
+ dark = self.dark[slice_y, slice_x]
407
+ else:
408
+ dark = 0
409
+
410
+ flat_data = (
411
+ dark
412
+ + w1 * (flat_data_prev - dark) * prev_current_factor
413
+ + w2 * (flat_data_next - dark) * next_current_factor
414
+ )
415
+
280
416
  if flat_data.dtype != dtype:
281
417
  flat_data = np.ascontiguousarray(flat_data, dtype=dtype)
282
418
  return flat_data
@@ -364,7 +500,7 @@ def padding_logic(subr_start_z, subr_end_z, dtasrc_start_z, dtasrc_end_z):
364
500
 
365
501
 
366
502
  def get_reconstruction_space(span_info, min_scanwise_z, end_scanwise_z, phase_margin_pix):
367
- """Utility function which, given the span_info object, creates the auxiliary collection arrays
503
+ """Utility function, so far used only by the unit test, which, given the span_info object, creates the auxiliary collection arrays
368
504
  and initialises the my_z_min, my_z_end variable keeping into account the scan direction
369
505
  and the min_scanwise_z, end_scanwise_z input arguments
370
506
  Parameters
@@ -404,9 +540,17 @@ def get_reconstruction_space(span_info, min_scanwise_z, end_scanwise_z, phase_ma
404
540
  # the accumulators
405
541
  gridded_radios = np.zeros([n_gridded_angles, radios_h, span_info.detector_shape_vh[1]], np.float32)
406
542
  gridded_cumulated_weights = np.zeros([n_gridded_angles, radios_h, span_info.detector_shape_vh[1]], np.float32)
407
- diagnostic_radios = np.zeros((2,) + gridded_radios.shape[1:], np.float32)
408
- diagnostic_weights = np.zeros((2,) + gridded_radios.shape[1:], np.float32)
409
- diagnostic_proj_angle = np.zeros([2], "f")
543
+
544
+ # this utility function is meant for testing the reconstruction only, not the diagnostic collection.
545
+ # However we build diagnostic targets all the same to feed something through the API
546
+ # which contemplates the diagnostics. So that the unit test runs correctly
547
+
548
+ diagnostic_radios = np.zeros((4,) + gridded_radios.shape[1:], np.float32)
549
+ diagnostic_weights = np.zeros((4,) + gridded_radios.shape[1:], np.float32)
550
+ diagnostic_proj_angle = np.zeros([4], "f")
551
+
552
+ diagnostic_searched_angles_rad_clipped = (0.5 + np.arange(2)) * (2 * np.pi / (2))
553
+ diagnostic_zpix_transl = np.zeros([4], "f")
410
554
 
411
555
  gridded_angles_rad = np.arange(n_gridded_angles) * 2 * np.pi / n_gridded_angles
412
556
  gridded_angles_deg = np.rad2deg(gridded_angles_rad)
@@ -422,6 +566,8 @@ def get_reconstruction_space(span_info, min_scanwise_z, end_scanwise_z, phase_ma
422
566
  "diagnostic_radios": diagnostic_radios,
423
567
  "diagnostic_weights": diagnostic_weights,
424
568
  "diagnostic_proj_angle": diagnostic_proj_angle,
569
+ "diagnostic_searched_angles_rad_clipped": diagnostic_searched_angles_rad_clipped,
570
+ "diagnostic_zpix_transl": diagnostic_zpix_transl,
425
571
  "gridded_angles_rad": gridded_angles_rad,
426
572
  "gridded_angles_deg": gridded_angles_deg,
427
573
  },