nabu 2024.1.9__py3-none-any.whl → 2024.2.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (151) hide show
  1. nabu/__init__.py +1 -1
  2. nabu/app/bootstrap.py +2 -3
  3. nabu/app/cast_volume.py +4 -2
  4. nabu/app/cli_configs.py +5 -0
  5. nabu/app/composite_cor.py +1 -1
  6. nabu/app/create_distortion_map_from_poly.py +5 -6
  7. nabu/app/diag_to_pix.py +7 -19
  8. nabu/app/diag_to_rot.py +14 -29
  9. nabu/app/double_flatfield.py +32 -44
  10. nabu/app/parse_reconstruction_log.py +3 -0
  11. nabu/app/reconstruct.py +53 -15
  12. nabu/app/reconstruct_helical.py +2 -2
  13. nabu/app/stitching.py +27 -13
  14. nabu/app/tests/test_reduce_dark_flat.py +4 -1
  15. nabu/cuda/kernel.py +11 -2
  16. nabu/cuda/processing.py +2 -2
  17. nabu/cuda/src/cone.cu +77 -0
  18. nabu/cuda/src/hierarchical_backproj.cu +271 -0
  19. nabu/cuda/utils.py +0 -6
  20. nabu/estimation/alignment.py +5 -19
  21. nabu/estimation/cor.py +173 -599
  22. nabu/estimation/cor_sino.py +356 -26
  23. nabu/estimation/focus.py +63 -11
  24. nabu/estimation/tests/test_cor.py +124 -58
  25. nabu/estimation/tests/test_focus.py +6 -6
  26. nabu/estimation/tilt.py +2 -1
  27. nabu/estimation/utils.py +5 -33
  28. nabu/io/__init__.py +1 -1
  29. nabu/io/cast_volume.py +1 -1
  30. nabu/io/reader.py +416 -21
  31. nabu/io/tests/test_readers.py +422 -0
  32. nabu/io/tests/test_writers.py +1 -102
  33. nabu/io/writer.py +4 -433
  34. nabu/opencl/kernel.py +14 -3
  35. nabu/opencl/processing.py +8 -0
  36. nabu/pipeline/config_validators.py +5 -2
  37. nabu/pipeline/datadump.py +12 -5
  38. nabu/pipeline/estimators.py +162 -188
  39. nabu/pipeline/fullfield/chunked.py +168 -92
  40. nabu/pipeline/fullfield/chunked_cuda.py +7 -3
  41. nabu/pipeline/fullfield/computations.py +2 -7
  42. nabu/pipeline/fullfield/dataset_validator.py +0 -4
  43. nabu/pipeline/fullfield/nabu_config.py +37 -13
  44. nabu/pipeline/fullfield/processconfig.py +22 -13
  45. nabu/pipeline/fullfield/reconstruction.py +13 -9
  46. nabu/pipeline/helical/helical_chunked_regridded.py +1 -1
  47. nabu/pipeline/helical/helical_chunked_regridded_cuda.py +1 -0
  48. nabu/pipeline/helical/helical_reconstruction.py +1 -1
  49. nabu/pipeline/params.py +21 -1
  50. nabu/pipeline/processconfig.py +1 -12
  51. nabu/pipeline/reader.py +146 -0
  52. nabu/pipeline/tests/test_estimators.py +44 -72
  53. nabu/pipeline/utils.py +4 -2
  54. nabu/pipeline/writer.py +10 -2
  55. nabu/preproc/ccd_cuda.py +1 -1
  56. nabu/preproc/ctf.py +14 -7
  57. nabu/preproc/ctf_cuda.py +2 -3
  58. nabu/preproc/double_flatfield.py +5 -12
  59. nabu/preproc/double_flatfield_cuda.py +2 -2
  60. nabu/preproc/flatfield.py +5 -1
  61. nabu/preproc/flatfield_cuda.py +5 -1
  62. nabu/preproc/phase.py +24 -73
  63. nabu/preproc/phase_cuda.py +5 -8
  64. nabu/preproc/tests/test_ctf.py +11 -7
  65. nabu/preproc/tests/test_flatfield.py +67 -122
  66. nabu/preproc/tests/test_paganin.py +54 -30
  67. nabu/processing/azim.py +206 -0
  68. nabu/processing/convolution_cuda.py +1 -1
  69. nabu/processing/fft_cuda.py +15 -17
  70. nabu/processing/histogram.py +2 -0
  71. nabu/processing/histogram_cuda.py +2 -1
  72. nabu/processing/kernel_base.py +3 -0
  73. nabu/processing/muladd_cuda.py +1 -0
  74. nabu/processing/padding_opencl.py +1 -1
  75. nabu/processing/roll_opencl.py +1 -0
  76. nabu/processing/rotation_cuda.py +2 -2
  77. nabu/processing/tests/test_fft.py +17 -10
  78. nabu/processing/unsharp_cuda.py +1 -1
  79. nabu/reconstruction/cone.py +104 -40
  80. nabu/reconstruction/fbp.py +3 -0
  81. nabu/reconstruction/fbp_base.py +7 -2
  82. nabu/reconstruction/filtering.py +20 -7
  83. nabu/reconstruction/filtering_cuda.py +7 -1
  84. nabu/reconstruction/hbp.py +424 -0
  85. nabu/reconstruction/mlem.py +99 -0
  86. nabu/reconstruction/reconstructor.py +2 -0
  87. nabu/reconstruction/rings_cuda.py +19 -19
  88. nabu/reconstruction/sinogram_cuda.py +1 -0
  89. nabu/reconstruction/sinogram_opencl.py +3 -1
  90. nabu/reconstruction/tests/test_cone.py +10 -5
  91. nabu/reconstruction/tests/test_deringer.py +7 -6
  92. nabu/reconstruction/tests/test_fbp.py +124 -10
  93. nabu/reconstruction/tests/test_filtering.py +13 -11
  94. nabu/reconstruction/tests/test_halftomo.py +30 -4
  95. nabu/reconstruction/tests/test_mlem.py +91 -0
  96. nabu/reconstruction/tests/test_reconstructor.py +8 -3
  97. nabu/resources/dataset_analyzer.py +142 -92
  98. nabu/resources/gpu.py +1 -0
  99. nabu/resources/nxflatfield.py +134 -125
  100. nabu/resources/templates/id16a_fluo.conf +42 -0
  101. nabu/resources/tests/test_extract.py +10 -0
  102. nabu/resources/tests/test_nxflatfield.py +2 -2
  103. nabu/stitching/alignment.py +80 -24
  104. nabu/stitching/config.py +105 -68
  105. nabu/stitching/definitions.py +1 -0
  106. nabu/stitching/frame_composition.py +68 -60
  107. nabu/stitching/overlap.py +91 -51
  108. nabu/stitching/single_axis_stitching.py +32 -0
  109. nabu/stitching/slurm_utils.py +6 -6
  110. nabu/stitching/stitcher/__init__.py +0 -0
  111. nabu/stitching/stitcher/base.py +124 -0
  112. nabu/stitching/stitcher/dumper/__init__.py +3 -0
  113. nabu/stitching/stitcher/dumper/base.py +94 -0
  114. nabu/stitching/stitcher/dumper/postprocessing.py +356 -0
  115. nabu/stitching/stitcher/dumper/preprocessing.py +60 -0
  116. nabu/stitching/stitcher/post_processing.py +555 -0
  117. nabu/stitching/stitcher/pre_processing.py +1068 -0
  118. nabu/stitching/stitcher/single_axis.py +484 -0
  119. nabu/stitching/stitcher/stitcher.py +0 -0
  120. nabu/stitching/stitcher/y_stitcher.py +13 -0
  121. nabu/stitching/stitcher/z_stitcher.py +45 -0
  122. nabu/stitching/stitcher_2D.py +278 -0
  123. nabu/stitching/tests/test_config.py +12 -37
  124. nabu/stitching/tests/test_frame_composition.py +33 -59
  125. nabu/stitching/tests/test_overlap.py +149 -7
  126. nabu/stitching/tests/test_utils.py +1 -1
  127. nabu/stitching/tests/test_y_preprocessing_stitching.py +132 -0
  128. nabu/stitching/tests/{test_z_stitching.py → test_z_postprocessing_stitching.py} +167 -561
  129. nabu/stitching/tests/test_z_preprocessing_stitching.py +431 -0
  130. nabu/stitching/utils/__init__.py +1 -0
  131. nabu/stitching/utils/post_processing.py +281 -0
  132. nabu/stitching/utils/tests/test_post-processing.py +21 -0
  133. nabu/stitching/{utils.py → utils/utils.py} +79 -52
  134. nabu/stitching/y_stitching.py +27 -0
  135. nabu/stitching/z_stitching.py +32 -2263
  136. nabu/testutils.py +1 -152
  137. nabu/thirdparty/tomocupy_remove_stripe.py +43 -9
  138. nabu/utils.py +158 -61
  139. {nabu-2024.1.9.dist-info → nabu-2024.2.0.dist-info}/METADATA +10 -3
  140. {nabu-2024.1.9.dist-info → nabu-2024.2.0.dist-info}/RECORD +144 -121
  141. nabu/io/tiffwriter_zmm.py +0 -99
  142. nabu/pipeline/fallback_utils.py +0 -149
  143. nabu/pipeline/helical/tests/test_accumulator.py +0 -158
  144. nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -355
  145. nabu/pipeline/helical/tests/test_strategy.py +0 -61
  146. nabu/pipeline/helical/utils.py +0 -51
  147. nabu/pipeline/tests/test_chunk_reader.py +0 -74
  148. {nabu-2024.1.9.dist-info → nabu-2024.2.0.dist-info}/LICENSE +0 -0
  149. {nabu-2024.1.9.dist-info → nabu-2024.2.0.dist-info}/WHEEL +0 -0
  150. {nabu-2024.1.9.dist-info → nabu-2024.2.0.dist-info}/entry_points.txt +0 -0
  151. {nabu-2024.1.9.dist-info → nabu-2024.2.0.dist-info}/top_level.txt +0 -0
nabu/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2024.1.9"
1
+ __version__ = "2024.2.0"
2
2
  __nabu_modules__ = [
3
3
  "app",
4
4
  "cuda",
nabu/app/bootstrap.py CHANGED
@@ -12,15 +12,14 @@ def bootstrap():
12
12
  args = parse_params_values(BootstrapConfig, parser_description="Initialize a nabu configuration file")
13
13
 
14
14
  do_bootstrap = bool(args["bootstrap"])
15
- do_convert = args["convert"] != ""
16
15
  no_comments = bool(args["nocomments"])
16
+ overwrite = bool(args["overwrite"])
17
17
 
18
18
  if do_bootstrap:
19
19
  print(
20
20
  "The --bootstrap option is now the default behavior of the nabu-config command. This option is therefore not needed anymore."
21
21
  )
22
-
23
- if path.isfile(args["output"]):
22
+ if path.isfile(args["output"]) and not (overwrite):
24
23
  rep = input("File %s already exists. Overwrite ? [y/N]" % args["output"])
25
24
  if rep.lower() != "y":
26
25
  print("Stopping")
nabu/app/cast_volume.py CHANGED
@@ -7,6 +7,7 @@ import sys
7
7
  import logging
8
8
  from argparse import RawTextHelpFormatter
9
9
  import numpy
10
+ from silx.io.url import DataUrl
10
11
  from tomoscan.esrf.volume.utils import guess_volumes
11
12
  from tomoscan.factory import Factory
12
13
  from tomoscan.esrf.volume import (
@@ -26,7 +27,6 @@ from nabu.pipeline.params import files_formats
26
27
  from nabu.utils import convert_str_to_tuple
27
28
  from nabu.io.cast_volume import _min_max_from_histo
28
29
 
29
- from silx.io.url import DataUrl
30
30
 
31
31
  _logger = logging.getLogger(__name__)
32
32
 
@@ -212,7 +212,9 @@ def main(argv=None):
212
212
  elif options.output_type is None:
213
213
  raise ValueError("'output_type' or 'output_volume' is expected")
214
214
  else:
215
- output_volume = get_default_output_volume(input_volume=input_volume, output_type=output_format)
215
+ output_volume = get_default_output_volume(
216
+ input_volume=input_volume, output_type=output_format # pylint: disable=E0606
217
+ )
216
218
  try:
217
219
  output_data_type = numpy.dtype(getattr(numpy, options.output_data_type))
218
220
  except Exception as e:
nabu/app/cli_configs.py CHANGED
@@ -41,6 +41,11 @@ BootstrapConfig = {
41
41
  "default": "",
42
42
  },
43
43
  "helical": {"help": "Prepare configuration file for helical", "default": 0, "required": False, "type": int},
44
+ "overwrite": {
45
+ "help": "Whether to overwrite the output file if exists",
46
+ "action": "store_const",
47
+ "const": 1,
48
+ },
44
49
  }
45
50
 
46
51
  # Default configuration for "zsplit" command
nabu/app/composite_cor.py CHANGED
@@ -99,7 +99,7 @@ def composite_cor_entry_point(args_dict):
99
99
  # if "near_pos" not in args.cor_options:
100
100
  # scan = NXtomo()
101
101
  # scan.load(file_path=nexus_name, data_path=args.entry_name)
102
- # estimated_near = scan.instrument.detector.estimated_cor_from_motor
102
+ # estimated_near = scan.instrument.detector.x_rotation_axis_pixel_position
103
103
  #
104
104
  # cor_options = args.cor_options + f" ; near_pos = {estimated_near} "
105
105
  #
@@ -1,12 +1,11 @@
1
- from .. import version
1
+ import sys
2
2
  import numpy as np
3
3
  import h5py
4
- import argparse
5
- import sys
4
+ from .. import version
5
+ from ..utils import DictToObj
6
+ from ..resources.logger import Logger
6
7
  from .cli_configs import CreateDistortionMapHorizontallyMatchedFromPolyConfig
7
8
  from .utils import parse_params_values
8
- from ..utils import DictToObj
9
- from ..resources.logger import Logger, LoggerOrPrint
10
9
 
11
10
 
12
11
  def create_distortion_maps_entry_point(user_args=None):
@@ -52,7 +51,7 @@ def create_distortion_maps_entry_point(user_args=None):
52
51
 
53
52
  logger = Logger("create_distortion_maps", level=user_args["loglevel"], logfile="create_distortion_maps.log")
54
53
 
55
- coords_source_x, coords_source_z, new_axis_pos = create_maps_x_and_z(args_dict)
54
+ coords_source_x, coords_source_z, new_axis_pos = create_maps_x_and_z(args_dict) # pylint: disable=E0606
56
55
 
57
56
  with h5py.File(args_dict["target_file"], "w") as f:
58
57
  f["coords_source_x"] = coords_source_x
nabu/app/diag_to_pix.py CHANGED
@@ -1,28 +1,16 @@
1
- from .. import version
2
- from os import environ
3
-
4
- import shutil
5
1
  import os
2
+ from multiprocessing import Pool
6
3
  import sys
7
- import re
8
- import h5py
9
4
  import numpy as np
10
- import silx.math.fft
5
+ from scipy.ndimage import gaussian_filter
6
+ import h5py
11
7
  from silx.io.dictdump import h5todict
12
-
13
-
14
- from ..resources.logger import LoggerOrPrint
15
-
8
+ from nxtomo.application.nxtomo import NXtomo
9
+ from .. import version
10
+ from ..utils import DictToObj, get_available_threads
16
11
  from .utils import parse_params_values
17
12
  from .cli_configs import DiagToPixConfig
18
-
19
- import h5py
20
- from nabu.utils import DictToObj
21
- from nabu.pipeline.estimators import oversample
22
- from scipy.ndimage import gaussian_filter
23
- from nxtomo.application.nxtomo import NXtomo
24
- from multiprocessing import Pool
25
- from ..utils import get_available_threads
13
+ from ..pipeline.estimators import oversample
26
14
 
27
15
 
28
16
  """
nabu/app/diag_to_rot.py CHANGED
@@ -1,30 +1,16 @@
1
- from .. import version
2
- from os import environ
3
-
4
- import argparse
5
- import shutil
6
1
  import os
7
2
  import sys
8
- import re
9
- import h5py
10
- import numpy as np
11
-
12
-
13
- from ..resources.logger import LoggerOrPrint
14
- from .utils import parse_params_values
15
- from .cli_configs import DiagToRotConfig
16
-
17
- from ..utils import get_available_threads
18
-
19
- from nxtomo.application.nxtomo import NXtomo
20
-
21
3
  from multiprocessing import Pool
22
-
23
- import h5py
24
- from nabu.utils import DictToObj
25
- from nabu.pipeline.estimators import oversample
4
+ import numpy as np
26
5
  from scipy.ndimage import gaussian_filter
6
+ import h5py
27
7
  from silx.io.dictdump import h5todict
8
+ from nxtomo.application.nxtomo import NXtomo
9
+ from .. import version
10
+ from ..utils import get_available_threads, DictToObj
11
+ from ..pipeline.estimators import oversample
12
+ from .utils import parse_params_values
13
+ from .cli_configs import DiagToRotConfig
28
14
 
29
15
 
30
16
  """
@@ -225,12 +211,11 @@ def find_best_interpolating_line(args):
225
211
 
226
212
  error = (diff_enes / (orig_enes + 1.0e-30)).astype("d").sum()
227
213
 
228
- if error == error:
229
- if not (error > best_error):
230
- best_error = error
231
- best_error_pair = index_ovlp_a, index_ovlp_b
214
+ if not (error > best_error):
215
+ best_error = error
216
+ best_error_pair = index_ovlp_a, index_ovlp_b
232
217
 
233
- return best_error, best_error_pair
218
+ return best_error, best_error_pair # pylint: disable=E0606
234
219
 
235
220
 
236
221
  def main(user_args=None):
@@ -262,7 +247,7 @@ def main(user_args=None):
262
247
 
263
248
  scan = NXtomo()
264
249
  scan.load(file_path=args.original_scan, data_path=args.entry_name)
265
- args.near = scan.instrument.detector.estimated_cor_from_motor
250
+ args.near = scan.instrument.detector.x_rotation_axis_pixel_position
266
251
  else:
267
252
  pass
268
253
 
@@ -305,7 +290,7 @@ def main(user_args=None):
305
290
  """
306
291
  raise ValueError(message)
307
292
 
308
- arguments_for_multiprocessing.append((diag, overlap_list, args))
293
+ arguments_for_multiprocessing.append((diag, overlap_list, args)) # pylint: disable=E0606
309
294
 
310
295
  ncpus = get_available_threads()
311
296
  with Pool(processes=ncpus) as pool:
@@ -1,7 +1,6 @@
1
1
  import numpy as np
2
2
  from ..preproc.double_flatfield import DoubleFlatField
3
- from ..preproc.flatfield import FlatFieldDataUrls
4
- from ..io.reader import ChunkReader
3
+ from ..preproc.flatfield import FlatField
5
4
  from ..io.writer import NXProcessWriter
6
5
  from ..resources.dataset_analyzer import analyze_dataset
7
6
  from ..resources.nxflatfield import update_dataset_info_flats_darks
@@ -16,44 +15,45 @@ class DoubleFlatFieldChunks:
16
15
  ):
17
16
  self.logger = LoggerOrPrint(logger)
18
17
  self.dataset_info = analyze_dataset(dataset_path, extra_options={"hdf5_entry": h5_entry}, logger=logger)
18
+ self.chunk_size = min(chunk_size, self.dataset_info.radio_dims[-1])
19
19
  self.do_flatfield = bool(do_flatfield)
20
20
  if self.do_flatfield:
21
- update_dataset_info_flats_darks(self.dataset_info, flatfield_mode="force-compute")
21
+ update_dataset_info_flats_darks(self.dataset_info, flatfield_mode=True)
22
22
  self.output_file = output_file
23
23
  self.sigma = sigma if sigma is not None and abs(sigma) > 1e-5 else None
24
- self._init_reader(chunk_size)
25
- self._init_flatfield((None, None, 0, self.chunk_size))
26
- self._init_dff()
27
24
 
28
- def _init_reader(self, chunk_size, start_idx=0):
29
- self.chunk_size = min(chunk_size, self.dataset_info.radio_dims[-1])
30
- self.reader = ChunkReader(
31
- self.dataset_info.projections,
32
- sub_region=(None, None, start_idx, start_idx + self.chunk_size),
33
- convert_float=True,
34
- )
35
- self.projections = self.reader.files_data
25
+ def _get_config(self):
26
+ conf = {
27
+ "dataset": self.dataset_info.location,
28
+ "entry": self.dataset_info.hdf5_entry or None,
29
+ "dff_sigma": self.sigma,
30
+ "do_flatfield": self.do_flatfield,
31
+ }
32
+ return conf
33
+
34
+ def _read_projections(self, chunk_size, start_idx=0):
35
+ reader_kwargs = {"sub_region": (slice(None), slice(start_idx, start_idx + chunk_size), slice(None))}
36
+ if self.dataset_info.kind == "edf":
37
+ reader_kwargs = {"n_reading_threads": 4}
38
+ self.reader = self.dataset_info.get_reader(**reader_kwargs)
39
+ self.projections = self.reader.load_data()
36
40
 
37
- def _init_flatfield(self, subregion):
41
+ def _init_flatfield(self, start_z=None, end_z=None):
38
42
  if not self.do_flatfield:
39
43
  return
40
- self.flatfield = FlatFieldDataUrls(
41
- (self.dataset_info.n_angles, self.chunk_size, self.dataset_info.radio_dims[0]),
42
- self.dataset_info.flats,
43
- self.dataset_info.darks,
44
- sorted(self.dataset_info.projections.keys()),
45
- sub_region=subregion,
44
+ chunk_size = end_z - start_z if start_z is not None else self.chunk_size
45
+ self.flatfield = FlatField(
46
+ (self.dataset_info.n_angles, chunk_size, self.dataset_info.radio_dims[0]),
47
+ flats={k: arr[start_z:end_z, :] for k, arr in self.dataset_info.flats.items()},
48
+ darks={k: arr[start_z:end_z, :] for k, arr in self.dataset_info.darks.items()},
49
+ radios_indices=sorted(self.dataset_info.projections.keys()),
46
50
  )
47
51
 
48
- def _apply_flatfield(self):
52
+ def _apply_flatfield(self, start_z=None, end_z=None):
49
53
  if self.do_flatfield:
54
+ self._init_flatfield(start_z=start_z, end_z=end_z)
50
55
  self.flatfield.normalize_radios(self.projections)
51
56
 
52
- def _set_reader_subregion(self, subregion):
53
- self.reader._set_subregion(subregion)
54
- self.reader._init_reader()
55
- self.reader._loaded = False
56
-
57
57
  def _init_dff(self):
58
58
  self.double_flatfield = DoubleFlatField(
59
59
  self.projections.shape,
@@ -63,15 +63,6 @@ class DoubleFlatFieldChunks:
63
63
  sigma_filter=self.sigma,
64
64
  )
65
65
 
66
- def _get_config(self):
67
- conf = {
68
- "dataset": self.dataset_info.location,
69
- "entry": self.dataset_info.hdf5_entry or None,
70
- "dff_sigma": self.sigma,
71
- "do_flatfield": self.do_flatfield,
72
- }
73
- return conf
74
-
75
66
  def compute_double_flatfield(self):
76
67
  """
77
68
  Compute the double flatfield for the current dataset.
@@ -84,21 +75,18 @@ class DoubleFlatFieldChunks:
84
75
  for i in range(n_steps):
85
76
  self.logger.debug("Computing DFF batch %d/%d" % (i + 1, n_steps + int(extra_step)))
86
77
  subregion = (None, None, i * chunk_size, (i + 1) * chunk_size)
87
- self._set_reader_subregion(subregion)
88
- self._init_flatfield(subregion)
89
- self.reader.load_files()
90
- self._apply_flatfield()
78
+ self._read_projections(chunk_size, start_idx=i * chunk_size)
79
+ self._apply_flatfield(start_z=i * chunk_size, end_z=(i + 1) * chunk_size)
80
+ self._init_dff()
91
81
  dff = self.double_flatfield.compute_double_flatfield(self.projections, recompute=True)
92
82
  res[subregion[-2] : subregion[-1]] = dff[:]
93
83
  # Need to initialize objects with a different shape
94
84
  if extra_step:
95
85
  curr_idx = (i + 1) * self.chunk_size
96
86
  self.logger.debug("Computing DFF batch %d/%d" % (i + 2, n_steps + int(extra_step)))
97
- self._init_reader(n_z - curr_idx, start_idx=curr_idx)
98
- self._init_flatfield(self.reader.sub_region)
87
+ self._read_projections(n_z - curr_idx, start_idx=curr_idx)
88
+ self._apply_flatfield(start_z=(i + 1) * chunk_size, end_z=n_z)
99
89
  self._init_dff()
100
- self.reader.load_files()
101
- self._apply_flatfield()
102
90
  dff = self.double_flatfield.compute_double_flatfield(self.projections, recompute=True)
103
91
  res[curr_idx:] = dff[:]
104
92
  return res
@@ -73,11 +73,14 @@ def parse_logfile(fname, separator=" - "):
73
73
  start_text = "Going to reconstruct slices"
74
74
  end_text = "Merging reconstructions to"
75
75
 
76
+ start_line = None
76
77
  rec_log_bounds = []
77
78
  for i, line in enumerate(lines):
78
79
  if start_text in line:
79
80
  start_line = i
80
81
  if end_text in line:
82
+ if start_line is None:
83
+ raise ValueError("Could not find reconstruction start string indicator")
81
84
  rec_log_bounds.append((start_line, i))
82
85
  rec_file_basename = path.basename(line.split(end_text)[-1])
83
86
 
nabu/app/reconstruct.py CHANGED
@@ -1,4 +1,6 @@
1
+ from tomoscan.io import HDF5File
1
2
  from .. import version
3
+ from ..utils import list_match_queries
2
4
  from ..pipeline.config import parse_nabu_config_file
3
5
  from ..pipeline.config_validators import convert_to_int
4
6
  from .cli_configs import ReconstructConfig
@@ -64,17 +66,6 @@ def get_reconstructor(args, overwrite_options=None):
64
66
 
65
67
  #
66
68
 
67
- # A crash with scikit-cuda happens only on PPC64 platform if and nvidia-persistenced is running.
68
- # On such machines, a warm-up has to be done.
69
- import platform
70
-
71
- if platform.machine() == "ppc64le":
72
- try:
73
- from silx.math.fft.cufft import CUFFT
74
- except: # can't catch narrower - cublasNotInitialized requires cublas !
75
- CUFFT = None
76
- #
77
-
78
69
  logfile = get_log_file(args["logfile"], args["log_file"], forbidden=[args["input_file"]])
79
70
  conf_dict = parse_nabu_config_file(args["input_file"])
80
71
  update_reconstruction_start_end(conf_dict, args["slice"].strip())
@@ -101,18 +92,65 @@ def get_reconstructor(args, overwrite_options=None):
101
92
  return reconstructor
102
93
 
103
94
 
95
+ def list_hdf5_entries(fname):
96
+ with HDF5File(fname, "r") as f:
97
+ entries = list(f.keys())
98
+ return entries
99
+
100
+
104
101
  def main():
105
102
  args = parse_params_values(
106
103
  ReconstructConfig,
107
104
  parser_description=f"Perform a tomographic reconstruction.",
108
105
  program_version="nabu " + version,
109
106
  )
107
+ # Get extra options
108
+ extra_options = {
109
+ "gpu_mem_fraction": args["gpu_mem_fraction"],
110
+ "cpu_mem_fraction": args["cpu_mem_fraction"],
111
+ "chunk_size": args["max_chunk_size"] if args["max_chunk_size"] > 0 else None,
112
+ "margin": args["phase_margin"],
113
+ "force_grouped_mode": bool(args["force_use_grouped_pipeline"]),
114
+ }
115
+ #
116
+
117
+ logfile = get_log_file(args["logfile"], args["log_file"], forbidden=[args["input_file"]])
118
+ conf_dict = parse_nabu_config_file(args["input_file"])
119
+ update_reconstruction_start_end(conf_dict, args["slice"].strip())
110
120
 
111
- R = get_reconstructor(args)
112
- proc = R.process_config
121
+ # Imports are done here, otherwise "nabu --version" takes forever
122
+ from ..pipeline.fullfield.processconfig import ProcessConfig
123
+ from ..pipeline.fullfield.reconstruction import FullFieldReconstructor
124
+
125
+ #
113
126
 
114
- R.reconstruct()
115
- R.finalize_files_saving()
127
+ hdf5_entries = conf_dict["dataset"].get("hdf5_entry", "").strip(",")
128
+ # spit by coma and remove empty spaces
129
+ hdf5_entries = [e.strip() for e in hdf5_entries.split(",")]
130
+ # clear '/' at beginning of the entry (so both entry like 'entry0000' and '/entry0000' are handled)
131
+ hdf5_entries = [e.lstrip("/") for e in hdf5_entries]
132
+
133
+ if hdf5_entries != [""]:
134
+ file_hdf5_entries = list_hdf5_entries(conf_dict["dataset"]["location"])
135
+ hdf5_entries = list_match_queries(file_hdf5_entries, hdf5_entries)
136
+ if hdf5_entries == []:
137
+ raise ValueError("No entry found matching pattern '%s'" % conf_dict["dataset"]["hdf5_entry"])
138
+
139
+ for hdf5_entry in hdf5_entries:
140
+ if len(hdf5_entries) > 1:
141
+ print("-" * 80)
142
+ print("Processing entry: %s" % hdf5_entry)
143
+ print("-" * 80)
144
+ conf_dict["dataset"]["hdf5_entry"] = hdf5_entry
145
+ proc = ProcessConfig(conf_dict=conf_dict, create_logger=logfile) # logger is in append mode
146
+ logger = proc.logger
147
+ logger.info("Going to reconstruct slices (%d, %d)" % (proc.rec_region["start_z"], proc.rec_region["end_z"]))
148
+
149
+ R = FullFieldReconstructor(proc, logger=logger, extra_options=extra_options)
150
+ proc = R.process_config
151
+
152
+ R.reconstruct()
153
+ R.finalize_files_saving()
116
154
  return 0
117
155
 
118
156
 
@@ -49,10 +49,10 @@ def main_helical():
49
49
  proc = ProcessConfig(conf_dict=conf_dict, create_logger=logfile)
50
50
  logger = proc.logger
51
51
 
52
- if "rotate_projections" in proc.processing_steps:
52
+ if "tilt_correction" in proc.processing_steps:
53
53
  message = """ The rotate_projections step is activated. The Helical pipelines are not yet suited for projection rotation
54
54
  it will soon be implemented. For the moment
55
- you should desactivate the rotation options in nabu.conf
55
+ you should deactivate the rotation options in nabu.conf
56
56
  """
57
57
  raise ValueError(message)
58
58
 
nabu/app/stitching.py CHANGED
@@ -1,12 +1,12 @@
1
1
  import logging
2
- from nabu.utils import Progress
2
+ from pprint import pformat
3
+
4
+ from tqdm import tqdm
3
5
  from nabu.stitching.slurm_utils import split_stitching_configuration_to_slurm_job
4
6
  from .cli_configs import StitchingConfig
5
7
  from ..pipeline.config import parse_nabu_config_file
6
- from nabu.stitching.z_stitching import (
7
- z_stitching,
8
- StitchingPostProcAggregation,
9
- )
8
+ from nabu.stitching.single_axis_stitching import stitching
9
+ from nabu.stitching.utils.post_processing import StitchingPostProcAggregation
10
10
  from nabu.stitching.config import dict_to_config_obj
11
11
  from .utils import parse_params_values
12
12
 
@@ -30,11 +30,13 @@ def main():
30
30
  conf_dict = parse_nabu_config_file(args["input-file"], allow_no_value=True)
31
31
 
32
32
  stitching_config = dict_to_config_obj(conf_dict)
33
+ assert stitching_config.axis is not None, "axis must be defined to know how to stitch"
34
+ _logger.info(" when loaded axis is %s", stitching_config.axis)
33
35
  stitching_config.settle_inputs()
34
36
  if args["only_create_master_file"]:
35
37
  # option to ease creation of the master in the following cases:
36
- # * user has submitted all the job but has been quicked out of the cluster
37
- # * only a few slurm job for some random version (cluster update...) and user want to retriger only those job and process the aggragation only. On those cases no need to redo it all.
38
+ # * user has submitted all the job but has been kicked out of the cluster
39
+ # * only a few slurm job for some random version (cluster update...) and user want to retrigger only those job and process the aggregation only. On those cases no need to redo it all.
38
40
  tomo_objs = []
39
41
  for _, sub_config in split_stitching_configuration_to_slurm_job(stitching_config, yield_configuration=True):
40
42
  tomo_objs.append(sub_config.get_output_object().get_identifier().to_str())
@@ -47,34 +49,46 @@ def main():
47
49
 
48
50
  elif stitching_config.slurm_config.partition in (None, ""):
49
51
  # case 1: run locally
50
- _logger.info(f"run stitching locally with {stitching_config}")
52
+ _logger.info("run stitching locally with: %s", pformat(stitching_config.to_dict()))
51
53
 
52
- progress = Progress("z-stitching")
53
- progress.set_name("initialize z-stitching")
54
- progress.set_advancement(0)
55
- z_stitching(stitching_config, progress=progress)
54
+ main_progress = tqdm(total=100, desc="stitching", leave=True)
55
+ stitching(stitching_config, progress=main_progress)
56
56
  else:
57
57
  if not has_sluurp:
58
58
  raise ImportError(
59
- "sluurp not install. Please install it to distribute stitching on slurm (pip install sluurm)"
59
+ "sluurp not install. Please install it to distribute stitching on slurm (pip install slurm)"
60
60
  )
61
+ main_progress = tqdm(total=100, position=0, desc="stitching")
62
+
61
63
  # case 2: run on slurm
62
64
  # note: to speed up we could do shift research on pre processing and run it only once (if manual of course). Here it will be run for all part
63
65
  _logger.info(f"will distribute stitching")
64
66
 
65
67
  futures = {}
66
68
  # 2.1 launch jobs
69
+ slurm_job_progress_bars: dict = {}
67
70
  for i_job, (job, sub_config) in enumerate(
68
71
  split_stitching_configuration_to_slurm_job(stitching_config, yield_configuration=True)
69
72
  ):
70
73
  _logger.info(f"submit job nb {i_job}: handles {sub_config.slices}")
71
74
  output_volume = sub_config.get_output_object().get_identifier().to_str()
72
75
  futures[output_volume] = submit(job, timeout=999999)
76
+ # note on total=100: we only consider percentage in this case (providing advancement from slurm jobs)
77
+ slurm_job_progress_bars[job] = tqdm(
78
+ total=100,
79
+ position=i_job + 1,
80
+ desc=f" part {str(i_job).ljust(3)}",
81
+ delay=0.5, # avoid to mess with terminal and (near) future logs
82
+ bar_format="{l_bar}{bar}", # avoid using 'r_bar' as 'total' is set to 100 (percentage)
83
+ leave=False,
84
+ )
73
85
 
86
+ main_progress.n = 50
74
87
  # 2.2 wait for future to be done and concatenate the result
75
88
  post_processing = StitchingPostProcAggregation(
76
89
  futures=futures,
77
90
  stitching_config=stitching_config,
91
+ progress_bars=slurm_job_progress_bars,
78
92
  )
79
93
  post_processing.process()
80
94
 
@@ -3,7 +3,10 @@ import pytest
3
3
  from nabu.app.reduce_dark_flat import reduce_dark_flat
4
4
 
5
5
  #####
6
- from tomoscan.test.utils import NXtomoMockContext
6
+ try:
7
+ from tomoscan.tests.utils import NXtomoMockContext
8
+ except ImportError:
9
+ from tomoscan.test.utils import NXtomoMockContext
7
10
 
8
11
 
9
12
  @pytest.fixture(scope="function")
nabu/cuda/kernel.py CHANGED
@@ -1,6 +1,7 @@
1
1
  import pycuda.gpuarray as garray
2
2
  from pycuda.compiler import SourceModule
3
3
  from ..processing.kernel_base import KernelBase
4
+ from ..utils import catch_warnings # TODO use warnings.catch_warnings once python < 3.11 is dropped
4
5
 
5
6
 
6
7
  class CudaKernel(KernelBase):
@@ -37,16 +38,24 @@ class CudaKernel(KernelBase):
37
38
  signature=None,
38
39
  texrefs=None,
39
40
  automation_params=None,
41
+ silent_compilation_warnings=False,
40
42
  **sourcemodule_kwargs,
41
43
  ):
42
- super().__init__(kernel_name, filename=filename, src=src, automation_params=automation_params)
44
+ super().__init__(
45
+ kernel_name,
46
+ filename=filename,
47
+ src=src,
48
+ automation_params=automation_params,
49
+ silent_compilation_warnings=silent_compilation_warnings,
50
+ )
43
51
  self.compile_kernel_source(kernel_name, sourcemodule_kwargs)
44
52
  self.prepare(signature, texrefs)
45
53
 
46
54
  def compile_kernel_source(self, kernel_name, sourcemodule_kwargs):
47
55
  self.sourcemodule_kwargs = sourcemodule_kwargs
48
56
  self.kernel_name = kernel_name
49
- self.module = SourceModule(self.src, **self.sourcemodule_kwargs)
57
+ with catch_warnings(action=("ignore" if self.silent_compilation_warnings else None)): # pylint: disable=E1123
58
+ self.module = SourceModule(self.src, **self.sourcemodule_kwargs)
50
59
  self.func = self.module.get_function(kernel_name)
51
60
 
52
61
  def prepare(self, kernel_signature, texrefs):
nabu/cuda/processing.py CHANGED
@@ -49,7 +49,7 @@ class CudaProcessing(ProcessingBase):
49
49
  self.stream = stream
50
50
  self.device = self.ctx.get_device()
51
51
  self.device_name = self.device.name()
52
- self.device_id = self.device.get_attribute(dev_attrs.MULTI_GPU_BOARD_GROUP_ID)
52
+ self.device_id = self.device.get_attribute(dev_attrs.MULTI_GPU_BOARD_GROUP_ID) # pylint: disable=E0606
53
53
 
54
54
  def push_context(self):
55
55
  self.ctx.push()
@@ -64,7 +64,7 @@ class CudaProcessing(ProcessingBase):
64
64
  def kernel(
65
65
  self, kernel_name, filename=None, src=None, signature=None, texrefs=None, automation_params=None, **build_kwargs
66
66
  ):
67
- return CudaKernel(
67
+ return CudaKernel( # pylint: disable=E0606
68
68
  kernel_name,
69
69
  filename=filename,
70
70
  src=src,