nabu 2024.2.4__py3-none-any.whl → 2025.1.0.dev5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (160) hide show
  1. nabu/__init__.py +1 -1
  2. nabu/app/bootstrap_stitching.py +4 -2
  3. nabu/app/cast_volume.py +7 -13
  4. nabu/app/cli_configs.py +0 -5
  5. nabu/app/compare_volumes.py +1 -1
  6. nabu/app/composite_cor.py +2 -4
  7. nabu/app/correct_rot.py +0 -8
  8. nabu/app/diag_to_pix.py +5 -6
  9. nabu/app/diag_to_rot.py +10 -11
  10. nabu/app/multicor.py +1 -1
  11. nabu/app/parse_reconstruction_log.py +1 -0
  12. nabu/app/prepare_weights_double.py +1 -2
  13. nabu/app/reconstruct_helical.py +1 -5
  14. nabu/app/reduce_dark_flat.py +0 -2
  15. nabu/app/rotate.py +3 -1
  16. nabu/app/tests/test_reduce_dark_flat.py +2 -2
  17. nabu/app/validator.py +1 -4
  18. nabu/cuda/convolution.py +1 -1
  19. nabu/cuda/fft.py +1 -1
  20. nabu/cuda/medfilt.py +1 -1
  21. nabu/cuda/padding.py +1 -1
  22. nabu/cuda/src/cone.cu +19 -9
  23. nabu/cuda/src/hierarchical_backproj.cu +14 -0
  24. nabu/cuda/utils.py +2 -2
  25. nabu/estimation/alignment.py +17 -31
  26. nabu/estimation/cor.py +23 -29
  27. nabu/estimation/cor_sino.py +2 -8
  28. nabu/estimation/focus.py +4 -8
  29. nabu/estimation/tests/test_alignment.py +2 -0
  30. nabu/estimation/tests/test_tilt.py +1 -1
  31. nabu/estimation/tilt.py +5 -4
  32. nabu/io/cast_volume.py +5 -5
  33. nabu/io/detector_distortion.py +5 -6
  34. nabu/io/reader.py +3 -3
  35. nabu/io/reader_helical.py +5 -4
  36. nabu/io/tests/test_cast_volume.py +2 -2
  37. nabu/io/tests/test_readers.py +4 -4
  38. nabu/io/tests/test_writers.py +2 -2
  39. nabu/io/utils.py +8 -4
  40. nabu/io/writer.py +1 -2
  41. nabu/misc/fftshift.py +1 -1
  42. nabu/misc/fourier_filters.py +1 -1
  43. nabu/misc/histogram.py +1 -1
  44. nabu/misc/histogram_cuda.py +1 -1
  45. nabu/misc/padding_base.py +1 -1
  46. nabu/misc/rotation.py +1 -1
  47. nabu/misc/rotation_cuda.py +1 -1
  48. nabu/misc/tests/test_binning.py +1 -1
  49. nabu/misc/transpose.py +1 -1
  50. nabu/misc/unsharp.py +1 -1
  51. nabu/misc/unsharp_cuda.py +1 -1
  52. nabu/misc/unsharp_opencl.py +1 -1
  53. nabu/misc/utils.py +1 -1
  54. nabu/opencl/fft.py +1 -1
  55. nabu/opencl/padding.py +1 -1
  56. nabu/opencl/utils.py +8 -8
  57. nabu/pipeline/config.py +2 -2
  58. nabu/pipeline/config_validators.py +4 -3
  59. nabu/pipeline/datadump.py +3 -3
  60. nabu/pipeline/estimators.py +6 -6
  61. nabu/pipeline/fullfield/chunked.py +4 -5
  62. nabu/pipeline/fullfield/dataset_validator.py +0 -1
  63. nabu/pipeline/fullfield/nabu_config.py +2 -1
  64. nabu/pipeline/fullfield/reconstruction.py +9 -8
  65. nabu/pipeline/helical/dataset_validator.py +3 -4
  66. nabu/pipeline/helical/fbp.py +4 -4
  67. nabu/pipeline/helical/filtering.py +5 -4
  68. nabu/pipeline/helical/gridded_accumulator.py +9 -10
  69. nabu/pipeline/helical/helical_chunked_regridded.py +1 -0
  70. nabu/pipeline/helical/helical_reconstruction.py +10 -7
  71. nabu/pipeline/helical/helical_utils.py +1 -2
  72. nabu/pipeline/helical/nabu_config.py +1 -0
  73. nabu/pipeline/helical/span_strategy.py +1 -0
  74. nabu/pipeline/helical/weight_balancer.py +1 -2
  75. nabu/pipeline/tests/__init__.py +0 -0
  76. nabu/pipeline/utils.py +1 -1
  77. nabu/pipeline/writer.py +1 -1
  78. nabu/preproc/alignment.py +0 -10
  79. nabu/preproc/ctf.py +8 -8
  80. nabu/preproc/ctf_cuda.py +1 -1
  81. nabu/preproc/double_flatfield_cuda.py +2 -2
  82. nabu/preproc/double_flatfield_variable_region.py +0 -1
  83. nabu/preproc/flatfield.py +1 -1
  84. nabu/preproc/flatfield_cuda.py +1 -2
  85. nabu/preproc/flatfield_variable_region.py +3 -3
  86. nabu/preproc/phase.py +2 -4
  87. nabu/preproc/phase_cuda.py +2 -2
  88. nabu/preproc/shift_cuda.py +0 -1
  89. nabu/preproc/tests/test_ctf.py +3 -3
  90. nabu/preproc/tests/test_double_flatfield.py +1 -1
  91. nabu/preproc/tests/test_flatfield.py +1 -1
  92. nabu/preproc/tests/test_vshift.py +4 -1
  93. nabu/processing/azim.py +2 -2
  94. nabu/processing/convolution_cuda.py +6 -4
  95. nabu/processing/fft_base.py +1 -1
  96. nabu/processing/fft_cuda.py +19 -8
  97. nabu/processing/fft_opencl.py +9 -4
  98. nabu/processing/fftshift.py +1 -1
  99. nabu/processing/histogram.py +1 -1
  100. nabu/processing/muladd.py +0 -1
  101. nabu/processing/padding_base.py +1 -1
  102. nabu/processing/padding_cuda.py +0 -1
  103. nabu/processing/processing_base.py +1 -1
  104. nabu/processing/tests/test_fft.py +1 -1
  105. nabu/processing/tests/test_fftshift.py +1 -1
  106. nabu/processing/tests/test_medfilt.py +1 -3
  107. nabu/processing/tests/test_padding.py +1 -1
  108. nabu/processing/tests/test_roll.py +1 -1
  109. nabu/processing/unsharp_opencl.py +1 -1
  110. nabu/reconstruction/cone.py +9 -4
  111. nabu/reconstruction/fbp_base.py +2 -2
  112. nabu/reconstruction/filtering_cuda.py +1 -1
  113. nabu/reconstruction/hbp.py +16 -3
  114. nabu/reconstruction/mlem.py +0 -1
  115. nabu/reconstruction/projection.py +3 -5
  116. nabu/reconstruction/sinogram.py +1 -1
  117. nabu/reconstruction/sinogram_cuda.py +0 -1
  118. nabu/reconstruction/tests/test_cone.py +76 -3
  119. nabu/reconstruction/tests/test_deringer.py +2 -2
  120. nabu/reconstruction/tests/test_fbp.py +1 -1
  121. nabu/reconstruction/tests/test_halftomo.py +27 -1
  122. nabu/reconstruction/tests/test_mlem.py +3 -2
  123. nabu/reconstruction/tests/test_projector.py +7 -2
  124. nabu/reconstruction/tests/test_sino_normalization.py +0 -1
  125. nabu/resources/dataset_analyzer.py +4 -4
  126. nabu/resources/gpu.py +4 -4
  127. nabu/resources/logger.py +4 -4
  128. nabu/resources/nxflatfield.py +2 -2
  129. nabu/resources/tests/test_nxflatfield.py +4 -4
  130. nabu/stitching/alignment.py +1 -4
  131. nabu/stitching/config.py +19 -16
  132. nabu/stitching/frame_composition.py +8 -10
  133. nabu/stitching/overlap.py +2 -2
  134. nabu/stitching/slurm_utils.py +2 -2
  135. nabu/stitching/stitcher/base.py +2 -0
  136. nabu/stitching/stitcher/dumper/base.py +0 -1
  137. nabu/stitching/stitcher/dumper/postprocessing.py +1 -1
  138. nabu/stitching/stitcher/post_processing.py +6 -6
  139. nabu/stitching/stitcher/pre_processing.py +13 -11
  140. nabu/stitching/stitcher/single_axis.py +3 -4
  141. nabu/stitching/stitcher_2D.py +2 -1
  142. nabu/stitching/tests/test_config.py +7 -8
  143. nabu/stitching/tests/test_sample_normalization.py +1 -1
  144. nabu/stitching/tests/test_slurm_utils.py +1 -2
  145. nabu/stitching/tests/test_z_postprocessing_stitching.py +1 -1
  146. nabu/stitching/tests/test_z_preprocessing_stitching.py +4 -4
  147. nabu/stitching/utils/tests/__init__.py +0 -0
  148. nabu/stitching/utils/tests/test_post-processing.py +1 -0
  149. nabu/stitching/utils/utils.py +10 -12
  150. nabu/tests.py +0 -3
  151. nabu/testutils.py +30 -8
  152. nabu/utils.py +28 -18
  153. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev5.dist-info}/METADATA +24 -25
  154. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev5.dist-info}/RECORD +159 -158
  155. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev5.dist-info}/WHEEL +1 -1
  156. nabu/io/tests/test_detector_distortion.py +0 -178
  157. /nabu/{stitching → app}/tests/__init__.py +0 -0
  158. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev5.dist-info}/LICENSE +0 -0
  159. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev5.dist-info}/entry_points.txt +0 -0
  160. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev5.dist-info}/top_level.txt +0 -0
nabu/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2024.2.4"
1
+ __version__ = "2025.1.0-dev5"
2
2
  __nabu_modules__ = [
3
3
  "app",
4
4
  "cuda",
@@ -23,12 +23,14 @@ def guess_tomo_objects(my_str: str) -> tuple:
23
23
  try:
24
24
  # create_tomo_object_from_identifier will raise an exception is the string does not match an identifier
25
25
  return (Factory.create_tomo_object_from_identifier(my_str),)
26
- except Exception:
26
+ except Exception as exc:
27
+ print("Error:", str(exc))
27
28
  pass
28
29
 
29
30
  try:
30
31
  volumes = guess_volumes(my_str)
31
- except Exception:
32
+ except Exception as exc:
33
+ print("Error:", str(exc))
32
34
  pass
33
35
  else:
34
36
  if len(volumes) > 0:
nabu/app/cast_volume.py CHANGED
@@ -1,6 +1,3 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
-
4
1
  import argparse
5
2
  import os
6
3
  import sys
@@ -49,7 +46,7 @@ def main(argv=None):
49
46
 
50
47
  volume_help = f"""To define a volume you can either provide: \n
51
48
  * an url (recommanded way) - see details lower \n
52
- * a path. For hdf5 and multitiff we expect a file path. For edf, tif and jp2k we expect a folder path. In this case we will try to deduce the Volume from it. \n
49
+ * a path. For hdf5 and multitiff we expect a file path. For edf, tif and jp2k we expect a folder path. In this case we will try to deduce the Volume from it. \n
53
50
  url must be defined like: \n{_volume_url_helps}
54
51
  """
55
52
 
@@ -150,16 +147,13 @@ def main(argv=None):
150
147
  except Exception as e:
151
148
  raise ValueError(f"Fail to build output volume from {options.output_volume}") from e
152
149
 
150
+ # ruff: noqa: SIM102
153
151
  if output_format is not None:
154
152
  if not (
155
- isinstance(output_volume, EDFVolume)
156
- and output_format == "edf"
157
- or isinstance(output_format, HDF5Volume)
158
- and output_format == "hdf5"
159
- or isinstance(output_format, JP2KVolume)
160
- and output_format == "jp2"
161
- or isinstance(output_format, (TIFFVolume, MultiTIFFVolume))
162
- and output_format == "tiff"
153
+ (isinstance(output_volume, EDFVolume) and output_format == "edf")
154
+ or (isinstance(output_format, HDF5Volume) and output_format == "hdf5")
155
+ or (isinstance(output_format, JP2KVolume) and output_format == "jp2")
156
+ or (isinstance(output_format, (TIFFVolume, MultiTIFFVolume)) and output_format == "tiff")
163
157
  ):
164
158
  raise ValueError(
165
159
  "Requested 'output_type' and output volume url are incoherent. 'output_type' is optional when url provided"
@@ -264,7 +258,7 @@ def main(argv=None):
264
258
  # update output volume from options
265
259
  output_volume.overwrite = options.overwrite
266
260
  if options.compression_ratios is not None:
267
- output_volume.cratios = list([int(value) for value in convert_str_to_tuple(options.compression_ratios)])
261
+ output_volume.cratios = [int(value) for value in convert_str_to_tuple(options.compression_ratios)]
268
262
 
269
263
  # do volume casting
270
264
  cast_volume(
nabu/app/cli_configs.py CHANGED
@@ -462,11 +462,6 @@ DiagToRotConfig = {
462
462
  "--diag_file": dict(
463
463
  required=True, help="The reconstruction file obtained by nabu-helical using the diag_zpro_run option", type=str
464
464
  ),
465
- "--entry_name": dict(
466
- required=False,
467
- help="entry_name. Defauls is entry0000",
468
- default="entry0000",
469
- ),
470
465
  "--near": dict(
471
466
  required=False,
472
467
  help="This is a relative offset respect to the center of the radios. The cor will be searched around the provided value. If not given the optinal parameter original_scan must be the original nexus file; and the estimated core will be taken there. The netry_name parameter also must be provided in this case",
@@ -61,7 +61,7 @@ def compare_volumes(fname1, fname2, h5_path, chunk_size, do_stats, stop_at_thres
61
61
  finally:
62
62
  f1.close()
63
63
  f2.close()
64
- return result
64
+ return result
65
65
 
66
66
 
67
67
  def compare_volumes_cli():
nabu/app/composite_cor.py CHANGED
@@ -1,14 +1,12 @@
1
- import logging
2
1
  import os
3
2
  import sys
4
3
  import numpy as np
5
4
  import re
6
5
 
7
6
  from nabu.resources.dataset_analyzer import HDF5DatasetAnalyzer
8
- from nabu.pipeline.estimators import CompositeCOREstimator, estimate_cor
7
+ from nabu.pipeline.estimators import CompositeCOREstimator
9
8
  from nabu.resources.nxflatfield import update_dataset_info_flats_darks
10
9
  from nabu.resources.utils import extract_parameters
11
- from nxtomo.application.nxtomo import NXtomo
12
10
  from .. import version
13
11
  from .cli_configs import CompositeCorConfig
14
12
  from .utils import parse_params_values
@@ -24,7 +22,7 @@ class NumpyArrayEncoder(json.JSONEncoder):
24
22
 
25
23
 
26
24
  def main(user_args=None):
27
- "Application to extract with the composite cor finder the center of rotation for a scan or a series of scans"
25
+ """Application to extract with the composite cor finder the center of rotation for a scan or a series of scans"""
28
26
 
29
27
  if user_args is None:
30
28
  user_args = sys.argv[1:]
nabu/app/correct_rot.py CHANGED
@@ -1,23 +1,15 @@
1
1
  from .. import version
2
- from os import environ
3
2
 
4
- import argparse
5
- import shutil
6
- import os
7
3
  import sys
8
- import re
9
- import h5py
10
4
  import numpy as np
11
5
 
12
6
 
13
- from ..resources.logger import LoggerOrPrint
14
7
  from .utils import parse_params_values
15
8
  from .cli_configs import CorrectRotConfig
16
9
  from silx.io.dictdump import h5todict
17
10
 
18
11
  from nxtomo.application.nxtomo import NXtomo
19
12
 
20
- import h5py
21
13
  from nabu.utils import DictToObj
22
14
 
23
15
 
nabu/app/diag_to_pix.py CHANGED
@@ -31,7 +31,7 @@ from ..pipeline.estimators import oversample
31
31
  we must bring to the pixel size
32
32
 
33
33
  An example of collection is this :
34
-
34
+
35
35
  |_____ diagnostics
36
36
  | |
37
37
  |__ 0
@@ -72,10 +72,10 @@ def transform_images(diag, ovs):
72
72
 
73
73
  def detailed_merit(diag, shift):
74
74
  # res will become the merit summed over all the pairs theta, theta+180
75
- res = 0.0
75
+ # res = 0.0
76
76
 
77
77
  # need to account for the weight also. So this will become the used weight for the pairs theta, theta+180
78
- res_w = 0.0
78
+ # res_w = 0.0
79
79
 
80
80
  ## The following two variables are very important information to be collected.
81
81
  ## On the the z translation over a 360 turn
@@ -149,7 +149,7 @@ def build_total_merit_list(diag, oversample_factor, args):
149
149
  # calculats the merit at all the tested extra adjustment shifts.
150
150
 
151
151
  transform_images(diag, [oversample_factor, 1])
152
- h_ima = diag.radios[0].shape[0]
152
+ # h_ima = diag.radios[0].shape[0]
153
153
  # search_radius_v = min(oversample_factor * args.search_radius_v, h_ima - 1)
154
154
  search_radius_v = oversample_factor * args.search_radius_v
155
155
 
@@ -233,7 +233,6 @@ def main(user_args=None):
233
233
  # For each key there is a sequence of radio, the corresponding sequence of weights map, the corresponding z translation, and angles
234
234
 
235
235
  zpix_mm = None
236
- observed_oneturn_total_shift_zpix = None
237
236
 
238
237
  argument_list = [
239
238
  (DictToObj(h5todict(args.diag_file, os.path.join(diag_url, my_key))), oversample_factor, args)
@@ -244,7 +243,7 @@ def main(user_args=None):
244
243
  with Pool(processes=ncpus) as pool:
245
244
  all_res_plus_infos = pool.starmap(build_total_merit_list, argument_list)
246
245
 
247
- observed_oneturn_total_shift_zpix, zpix_mm = None, None
246
+ _, zpix_mm = None, None
248
247
 
249
248
  # needs to flatten the result of pool.map
250
249
  for_all_pairs_detailed_merit_lists = []
nabu/app/diag_to_rot.py CHANGED
@@ -31,7 +31,7 @@ from .cli_configs import DiagToRotConfig
31
31
  The collection is not done here. Here we exploit the result of a previous collection to deduce, looking at the correlations, the cor
32
32
 
33
33
  An example of collection is this :
34
-
34
+
35
35
  |_____ diagnostics
36
36
  | |
37
37
  |__ 0
@@ -68,7 +68,8 @@ def transform_images(diag, args):
68
68
  But beforehand it is beneficial to remove low spatial frequencies.
69
69
  And we do oversampling on the fly.
70
70
 
71
- Parameters:
71
+ Parameters
72
+ ----------
72
73
  diag: object
73
74
  used member of diag are radios and weights
74
75
  args: object
@@ -196,7 +197,6 @@ def find_best_interpolating_line(args):
196
197
  z_b = np.nanmax(all_z_transl)
197
198
 
198
199
  best_error = np.nan
199
- best_off_pair = None
200
200
 
201
201
  for index_ovlp_a in index_overlap_list_a:
202
202
  for index_ovlp_b in index_overlap_list_b:
@@ -204,7 +204,7 @@ def find_best_interpolating_line(args):
204
204
  indexes = (np.arange(all_energies.shape[0]))[~np.isnan(index_ovlps)].astype("i")
205
205
 
206
206
  index_ovlps = index_ovlps[~np.isnan(index_ovlps)]
207
- index_ovlps = np.round_(index_ovlps).astype("i")
207
+ index_ovlps = np.round(index_ovlps).astype("i")
208
208
 
209
209
  diff_enes = all_res[(indexes, index_ovlps)]
210
210
  orig_enes = all_energies[(indexes, index_ovlps)]
@@ -313,13 +313,12 @@ def main(user_args=None):
313
313
  and this is not enough to do correlation + interpolation between sections
314
314
  """
315
315
  raise RuntimeError(message)
316
- else:
317
- if n_pairings_with_data < 1:
318
- message = f""" The diagnostics collection has probably been run over a too thin section of the scan
316
+ elif n_pairings_with_data < 1:
317
+ message = f""" The diagnostics collection has probably been run over a too thin section of the scan
319
318
  or you scan does not allow to form pairs of theta, theta+360. I only found {n_pairings_with_data}
320
319
  pairings
321
320
  """
322
- raise RuntimeError(message)
321
+ raise RuntimeError(message)
323
322
 
324
323
  # all_merits, all_energies, all_z_transls = zip( result_list )
325
324
 
@@ -345,15 +344,15 @@ def main(user_args=None):
345
344
 
346
345
  def do_height_by_height(args, overlap_list, all_diff, all_energies, all_z_transl):
347
346
  # now we find the best cor for each chunk, or nan if no overlap is found
348
- z_a = np.min(all_z_transl)
349
- z_b = np.max(all_z_transl)
347
+ # z_a = np.min(all_z_transl)
348
+ # z_b = np.max(all_z_transl)
350
349
 
351
350
  grouped_diff = {}
352
351
  grouped_energy = {}
353
352
 
354
353
  for diff, energy, z in zip(all_diff, all_energies, all_z_transl):
355
354
  found = z
356
- for key in grouped_diff.keys():
355
+ for key in grouped_diff:
357
356
  if abs(key - z) < 2.0: # these are in pixel units
358
357
  found = key
359
358
  break
nabu/app/multicor.py CHANGED
@@ -51,6 +51,7 @@ def main():
51
51
  #####
52
52
  # Remove the first reconstructed file (not used here)
53
53
  last_file = list(pipeline.writer.writer.browse_data_files())[-1]
54
+ # ruff: noqa: SIM105, S110
54
55
  try:
55
56
  remove(last_file)
56
57
  except:
@@ -59,7 +60,6 @@ def main():
59
60
 
60
61
  cors = get_user_cors(args["cor"])
61
62
 
62
- all_recs = []
63
63
  rec_instance = pipeline.reconstruction
64
64
 
65
65
  for cor in cors:
@@ -1,3 +1,4 @@
1
+ # ruff: noqa
1
2
  import numpy as np
2
3
  from os import path
3
4
  from datetime import datetime
@@ -4,7 +4,6 @@ from scipy.special import erf # pylint: disable=all
4
4
  import sys
5
5
  import os
6
6
  from scipy.ndimage import gaussian_filter
7
- from nxtomo.nxobject.nxdetector import ImageKey
8
7
  from nabu.resources.nxflatfield import update_dataset_info_flats_darks
9
8
  from nabu.resources.dataset_analyzer import HDF5DatasetAnalyzer
10
9
  from ..io.reader import load_images_from_dataurl_dict
@@ -57,7 +56,7 @@ def main(argv=None):
57
56
  beam_profile = 0
58
57
  my_flats = load_images_from_dataurl_dict(dataset_info.flats)
59
58
 
60
- for key, flat in my_flats.items():
59
+ for flat in my_flats.values():
61
60
  beam_profile += flat
62
61
  beam_profile = beam_profile / len(list(dataset_info.flats.keys()))
63
62
 
@@ -1,7 +1,6 @@
1
1
  from .. import version
2
2
  from ..resources.utils import is_hdf5_extension
3
3
  from ..pipeline.config import parse_nabu_config_file
4
- from ..pipeline.config_validators import convert_to_int
5
4
  from .cli_configs import ReconstructConfig
6
5
  from .utils import parse_params_values
7
6
  from .reconstruct import update_reconstruction_start_end, get_log_file
@@ -39,7 +38,7 @@ def main_helical():
39
38
  try:
40
39
  from silx.math.fft.cufft import CUFFT
41
40
  except: # can't catch narrower - cublasNotInitialized requires cublas !
42
- CUFFT = None
41
+ CUFFT = None # noqa: F841
43
42
  #
44
43
 
45
44
  logfile = get_log_file(args["logfile"], args["log_file"], forbidden=[args["input_file"]])
@@ -58,9 +57,6 @@ def main_helical():
58
57
 
59
58
  # Determine which reconstructor to use
60
59
  reconstructor_cls = None
61
- phase_method = None
62
- if "phase" in proc.processing_steps:
63
- phase_method = proc.processing_options["phase"]["method"]
64
60
 
65
61
  # fix the reconstruction roi if not given
66
62
  if "reconstruction" in proc.processing_steps:
@@ -1,6 +1,4 @@
1
1
  import sys
2
- import logging
3
- import argparse
4
2
  from typing import Optional
5
3
 
6
4
  from nabu.app.cli_configs import ReduceDarkFlatConfig
nabu/app/rotate.py CHANGED
@@ -7,6 +7,8 @@ from multiprocessing.pool import ThreadPool
7
7
  import numpy as np
8
8
  from tomoscan.io import HDF5File
9
9
  from tomoscan.esrf.scan.nxtomoscan import NXtomoScan
10
+
11
+ from nabu.utils import first_generator_item
10
12
  from ..io.utils import get_first_hdf5_entry
11
13
  from ..processing.rotation import Rotation
12
14
  from ..resources.logger import Logger, LoggerOrPrint
@@ -61,7 +63,7 @@ class HDF5ImagesStackRotation:
61
63
  self.output_file = output_file
62
64
  copy(self.input_file, output_file)
63
65
 
64
- first_proj_url = self.dataset_info.projections[list(self.dataset_info.projections.keys())[0]]
66
+ first_proj_url = self.dataset_info.projections[first_generator_item(self.dataset_info.projections.keys())]
65
67
  self.data_path = first_proj_url.data_path()
66
68
  dirname, basename = posixpath.split(self.data_path)
67
69
  self._data_path_dirname = dirname
@@ -9,7 +9,7 @@ except ImportError:
9
9
  from tomoscan.test.utils import NXtomoMockContext
10
10
 
11
11
 
12
- @pytest.fixture(scope="function")
12
+ @pytest.fixture
13
13
  def hdf5_scan(tmp_path):
14
14
  """simple fixture to create a scan and provide it to another function"""
15
15
  test_dir = tmp_path / "my_hdf5_scan"
@@ -26,7 +26,7 @@ def hdf5_scan(tmp_path):
26
26
 
27
27
  @pytest.mark.parametrize("dark_method", (None, "first", "mean"))
28
28
  @pytest.mark.parametrize("flat_method", (None, "last", "median"))
29
- def test_reduce_dark_flat_hdf5(tmp_path, hdf5_scan, dark_method, flat_method): # noqa F811
29
+ def test_reduce_dark_flat_hdf5(tmp_path, hdf5_scan, dark_method, flat_method):
30
30
  """simply test output - processing is tested at tomoscan side"""
31
31
  # test with default url
32
32
  default_darks_path = os.path.join(hdf5_scan.path, hdf5_scan.get_dataset_basename() + "_darks.hdf5")
nabu/app/validator.py CHANGED
@@ -1,6 +1,3 @@
1
- #!/usr/bin/env python
2
- # -*- coding: utf-8 -*-
3
-
4
1
  import argparse
5
2
  import sys
6
3
  import os
@@ -19,7 +16,7 @@ def get_scans(path, entries: str):
19
16
  if entries == "__all__":
20
17
  entries = NXtomoScan.get_valid_entries(path)
21
18
  for entry in entries:
22
- res.append(NXtomoScan(path, entry))
19
+ res.append(NXtomoScan(path, entry)) # noqa: PERF401
23
20
  else:
24
21
  raise TypeError(f"{path} does not looks like a folder containing .EDF or a valid nexus file ")
25
22
  return res
nabu/cuda/convolution.py CHANGED
@@ -1,4 +1,4 @@
1
- from ..processing.convolution_cuda import *
1
+ from ..processing.convolution_cuda import * # noqa: F403
2
2
  from ..utils import deprecation_warning
3
3
 
4
4
  deprecation_warning(
nabu/cuda/fft.py CHANGED
@@ -1,4 +1,4 @@
1
- from ..processing.fft_cuda import *
1
+ from ..processing.fft_cuda import * # noqa: F403
2
2
  from ..utils import deprecation_warning
3
3
 
4
4
  deprecation_warning("nabu.cuda.fft has been moved to nabu.processing.fft_cuda", do_print=True, func_name="fft_cuda")
nabu/cuda/medfilt.py CHANGED
@@ -1,4 +1,4 @@
1
- from ..processing.medfilt_cuda import *
1
+ from ..processing.medfilt_cuda import * # noqa: F403
2
2
  from ..utils import deprecation_warning
3
3
 
4
4
  deprecation_warning(
nabu/cuda/padding.py CHANGED
@@ -1,4 +1,4 @@
1
- from ..processing.padding_cuda import *
1
+ from ..processing.padding_cuda import * # noqa: F403
2
2
  from ..utils import deprecation_warning
3
3
 
4
4
  deprecation_warning(
nabu/cuda/src/cone.cu CHANGED
@@ -26,23 +26,33 @@ along with the ASTRA Toolbox. If not, see <http://www.gnu.org/licenses/>.
26
26
  */
27
27
 
28
28
 
29
- static const unsigned int g_anglesPerWeightBlock = 16;
30
- static const unsigned int g_detBlockU = 32;
31
- static const unsigned int g_detBlockV = 32;
29
+ // static const unsigned int g_anglesPerWeightBlock = 16;
30
+ // static const unsigned int g_detBlockU = 32;
31
+ // static const unsigned int g_detBlockV = 32;
32
32
 
33
33
 
34
34
  __global__ void devFDK_preweight(void* D_projData, unsigned int projPitch, unsigned int startAngle, unsigned int endAngle, float fSrcOrigin, float fDetOrigin, float fZShift, float fDetUSize, float fDetVSize, unsigned int iProjAngles, unsigned int iProjU, unsigned int iProjV)
35
35
  {
36
36
  float* projData = (float*)D_projData;
37
- int angle = startAngle + blockIdx.y * g_anglesPerWeightBlock + threadIdx.y;
37
+
38
+ const uint angle = blockDim.y * blockIdx.y + threadIdx.y + startAngle;
39
+
38
40
  if (angle >= endAngle)
39
41
  return;
40
42
 
41
- const int detectorU = (blockIdx.x%((iProjU+g_detBlockU-1)/g_detBlockU)) * g_detBlockU + threadIdx.x;
42
- const int startDetectorV = (blockIdx.x/((iProjU+g_detBlockU-1)/g_detBlockU)) * g_detBlockV;
43
- int endDetectorV = startDetectorV + g_detBlockV;
44
- if (endDetectorV > iProjV)
45
- endDetectorV = iProjV;
43
+ // Astra FDK kernel used this indexing (with the appropriate grid)
44
+ // const int detectorU = (blockIdx.x%((iProjU+g_detBlockU-1)/g_detBlockU)) * g_detBlockU + threadIdx.x;
45
+ // const int startDetectorV = (blockIdx.x/((iProjU+g_detBlockU-1)/g_detBlockU)) * g_detBlockV;
46
+ // Instead we choose a simpler scheme which does not assume pitch memory allocation
47
+
48
+ const uint detectorU = blockDim.x * blockIdx.x + threadIdx.x;
49
+ const int startDetectorV = 0;
50
+
51
+ int endDetectorV = iProjV; // startDetectorV + g_detBlockV;
52
+ // if (endDetectorV > iProjV)
53
+ // endDetectorV = iProjV;
54
+ if (detectorU >= iProjU) return;
55
+
46
56
 
47
57
  // We need the length of the central ray and the length of the ray(s) to
48
58
  // our detector pixel(s).
@@ -6,6 +6,20 @@
6
6
  Please cite :
7
7
  reference to be added...
8
8
 
9
+
10
+
11
+ # Generalized Hierarchical Backprojection (GHBP)
12
+ # for fast tomographic reconstruction from ultra high resolution images at non-negligible fan angles.
13
+ #
14
+ # Authors/Contributions:
15
+ # - Jonas Graetz, Fraunhofer IIS / Universitat Wurzburg: Algorithm Design and original OpenCL/Python implementation.
16
+ # - Alessandro Mirone, ESRF: CUDA translation, ESRF / BM18 integration, testing <mirone@esrf.fr>
17
+ # - Pierre Paleo, ESRF: ESRF / BM18 integration, testing <pierre.paleo@esrf.fr>
18
+ #
19
+ # JG was funded by the German Federal Ministry of Education and Research (BMBF), grant 05E2019,
20
+ # funding the development of BM18 at ESRF in collaboration with the Fraunhofer Gesellschaft,
21
+ # the Julius-Maximilians-Universitat Wurzburg, and the University of Passau
22
+
9
23
  """
10
24
  */
11
25
 
nabu/cuda/utils.py CHANGED
@@ -197,12 +197,12 @@ def cuarray_shape_dtype(cuarray):
197
197
 
198
198
 
199
199
  def get_shape_dtype(arr):
200
- if isinstance(arr, garray.GPUArray) or isinstance(arr, np.ndarray):
200
+ if isinstance(arr, garray.GPUArray | np.ndarray):
201
201
  return arr.shape, arr.dtype
202
202
  elif isinstance(arr, cuda.Array):
203
203
  return cuarray_shape_dtype(arr)
204
204
  else:
205
- raise ValueError("Unknown array type %s" % str(type(arr)))
205
+ raise TypeError("Unknown array type %s" % str(type(arr)))
206
206
 
207
207
 
208
208
  def copy_array(dst, src, check=False, src_dtype=None, dst_x_in_bytes=0, dst_y=0):
@@ -87,20 +87,20 @@ class AlignmentBase:
87
87
  if not len(shape_stack) == 3:
88
88
  raise ValueError(
89
89
  "A stack of 2-dimensional images is required. Shape of stack: %s"
90
- % (" ".join(("%d" % x for x in shape_stack)))
90
+ % (" ".join("%d" % x for x in shape_stack))
91
91
  )
92
92
  if not len(shape_pos) == 1:
93
93
  raise ValueError(
94
94
  "Positions need to be a 1-dimensional array. Shape of the positions variable: %s"
95
- % (" ".join(("%d" % x for x in shape_pos)))
95
+ % (" ".join("%d" % x for x in shape_pos))
96
96
  )
97
97
  if not shape_stack[0] == shape_pos[0]:
98
98
  raise ValueError(
99
99
  "The same number of images and positions is required."
100
100
  + " Shape of stack: %s, shape of positions variable: %s"
101
101
  % (
102
- " ".join(("%d" % x for x in shape_stack)),
103
- " ".join(("%d" % x for x in shape_pos)),
102
+ " ".join("%d" % x for x in shape_stack),
103
+ " ".join("%d" % x for x in shape_pos),
104
104
  )
105
105
  )
106
106
 
@@ -110,18 +110,18 @@ class AlignmentBase:
110
110
  shape_2 = np.squeeze(img_2).shape
111
111
  if not len(shape_1) == 2:
112
112
  raise ValueError(
113
- "Images need to be 2-dimensional. Shape of image #1: %s" % (" ".join(("%d" % x for x in shape_1)))
113
+ "Images need to be 2-dimensional. Shape of image #1: %s" % (" ".join("%d" % x for x in shape_1))
114
114
  )
115
115
  if not len(shape_2) == 2:
116
116
  raise ValueError(
117
- "Images need to be 2-dimensional. Shape of image #2: %s" % (" ".join(("%d" % x for x in shape_2)))
117
+ "Images need to be 2-dimensional. Shape of image #2: %s" % (" ".join("%d" % x for x in shape_2))
118
118
  )
119
119
  if not np.all(shape_1 == shape_2):
120
120
  raise ValueError(
121
121
  "Images need to be of the same shape. Shape of image #1: %s, image #2: %s"
122
122
  % (
123
- " ".join(("%d" % x for x in shape_1)),
124
- " ".join(("%d" % x for x in shape_2)),
123
+ " ".join("%d" % x for x in shape_1),
124
+ " ".join("%d" % x for x in shape_2),
125
125
  )
126
126
  )
127
127
 
@@ -153,7 +153,7 @@ class AlignmentBase:
153
153
  if not (len(f_vals.shape) == 2):
154
154
  raise ValueError(
155
155
  "The fitted values should form a 2-dimensional array. Array of shape: [%s] was given."
156
- % (" ".join(("%d" % s for s in f_vals.shape)))
156
+ % (" ".join("%d" % s for s in f_vals.shape))
157
157
  )
158
158
  if fy is None:
159
159
  fy_half_size = (f_vals.shape[0] - 1) / 2
@@ -161,7 +161,7 @@ class AlignmentBase:
161
161
  elif not (len(fy.shape) == 1 and np.all(fy.size == f_vals.shape[0])):
162
162
  raise ValueError(
163
163
  "Vertical coordinates should have the same length as values matrix. Sizes of fy: %d, f_vals: [%s]"
164
- % (fy.size, " ".join(("%d" % s for s in f_vals.shape)))
164
+ % (fy.size, " ".join("%d" % s for s in f_vals.shape))
165
165
  )
166
166
  if fx is None:
167
167
  fx_half_size = (f_vals.shape[1] - 1) / 2
@@ -169,7 +169,7 @@ class AlignmentBase:
169
169
  elif not (len(fx.shape) == 1 and np.all(fx.size == f_vals.shape[1])):
170
170
  raise ValueError(
171
171
  "Horizontal coordinates should have the same length as values matrix. Sizes of fx: %d, f_vals: [%s]"
172
- % (fx.size, " ".join(("%d" % s for s in f_vals.shape)))
172
+ % (fx.size, " ".join("%d" % s for s in f_vals.shape))
173
173
  )
174
174
 
175
175
  fy, fx = np.meshgrid(fy, fx, indexing="ij")
@@ -190,14 +190,7 @@ class AlignmentBase:
190
190
  vertex_max_yx = [np.max(fy), np.max(fx)]
191
191
  if np.any(vertex_yx < vertex_min_yx) or np.any(vertex_yx > vertex_max_yx):
192
192
  raise ValueError(
193
- "Fitted (y: {}, x: {}) positions are outside the input margins y: [{}, {}], and x: [{}, {}]".format(
194
- vertex_yx[0],
195
- vertex_yx[1],
196
- vertex_min_yx[0],
197
- vertex_max_yx[0],
198
- vertex_min_yx[1],
199
- vertex_max_yx[1],
200
- )
193
+ f"Fitted (y: {vertex_yx[0]}, x: {vertex_yx[1]}) positions are outside the input margins y: [{vertex_min_yx[0]}, {vertex_max_yx[0]}], and x: [{vertex_min_yx[1]}, {vertex_max_yx[1]}]"
201
194
  )
202
195
  return vertex_yx
203
196
 
@@ -225,10 +218,10 @@ class AlignmentBase:
225
218
  float
226
219
  Estimated function max, according to the coordinates in fx.
227
220
  """
228
- if not len(f_vals.shape) in (1, 2):
221
+ if len(f_vals.shape) not in (1, 2):
229
222
  raise ValueError(
230
223
  "The fitted values should be either one or a collection of 1-dimensional arrays. Array of shape: [%s] was given."
231
- % (" ".join(("%d" % s for s in f_vals.shape)))
224
+ % (" ".join("%d" % s for s in f_vals.shape))
232
225
  )
233
226
  num_vals = f_vals.shape[0]
234
227
 
@@ -264,16 +257,9 @@ class AlignmentBase:
264
257
  upper_bound_ok = vertex_x < vertex_max_x
265
258
  if not np.all(lower_bound_ok * upper_bound_ok):
266
259
  if len(f_vals.shape) == 1:
267
- message = "Fitted position {} is outide the input margins [{}, {}]".format(
268
- vertex_x, vertex_min_x, vertex_max_x
269
- )
260
+ message = f"Fitted position {vertex_x} is outide the input margins [{vertex_min_x}, {vertex_max_x}]"
270
261
  else:
271
- message = "Fitted positions outside the input margins [{}, {}]: {} below and {} above".format(
272
- vertex_min_x,
273
- vertex_max_x,
274
- np.sum(1 - lower_bound_ok),
275
- np.sum(1 - upper_bound_ok),
276
- )
262
+ message = f"Fitted positions outside the input margins [{vertex_min_x}, {vertex_max_x}]: {np.sum(1 - lower_bound_ok)} below and {np.sum(1 - upper_bound_ok)} above"
277
263
  raise ValueError(message)
278
264
  if return_vertex_val:
279
265
  vertex_val = coeffs[0, :] + vertex_x * coeffs[1, :] / 2
@@ -354,7 +340,7 @@ class AlignmentBase:
354
340
  if not (len(img_shape) == 2):
355
341
  raise ValueError(
356
342
  "The input image should be either a 1 or 2-dimensional array. Array of shape: [%s] was given."
357
- % (" ".join(("%d" % s for s in cc.shape)))
343
+ % (" ".join("%d" % s for s in cc.shape))
358
344
  )
359
345
  other_axis = (axis + 1) % 2
360
346
  # get pixel having the maximum value of the correlation array