nabu 2024.2.4__py3-none-any.whl → 2025.1.0.dev4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (164) hide show
  1. doc/doc_config.py +32 -0
  2. nabu/__init__.py +1 -1
  3. nabu/app/bootstrap_stitching.py +4 -2
  4. nabu/app/cast_volume.py +7 -13
  5. nabu/app/cli_configs.py +0 -5
  6. nabu/app/compare_volumes.py +1 -1
  7. nabu/app/composite_cor.py +2 -4
  8. nabu/app/correct_rot.py +0 -8
  9. nabu/app/diag_to_pix.py +5 -6
  10. nabu/app/diag_to_rot.py +10 -11
  11. nabu/app/multicor.py +1 -1
  12. nabu/app/parse_reconstruction_log.py +1 -0
  13. nabu/app/prepare_weights_double.py +1 -2
  14. nabu/app/reconstruct_helical.py +1 -5
  15. nabu/app/reduce_dark_flat.py +0 -2
  16. nabu/app/rotate.py +3 -1
  17. nabu/app/tests/test_reduce_dark_flat.py +2 -2
  18. nabu/app/validator.py +1 -4
  19. nabu/cuda/convolution.py +1 -1
  20. nabu/cuda/fft.py +1 -1
  21. nabu/cuda/medfilt.py +1 -1
  22. nabu/cuda/padding.py +1 -1
  23. nabu/cuda/src/cone.cu +19 -9
  24. nabu/cuda/src/hierarchical_backproj.cu +16 -0
  25. nabu/cuda/utils.py +2 -2
  26. nabu/estimation/alignment.py +17 -31
  27. nabu/estimation/cor.py +23 -29
  28. nabu/estimation/cor_sino.py +2 -8
  29. nabu/estimation/focus.py +4 -8
  30. nabu/estimation/tests/test_alignment.py +2 -0
  31. nabu/estimation/tests/test_tilt.py +1 -1
  32. nabu/estimation/tilt.py +5 -4
  33. nabu/io/cast_volume.py +5 -5
  34. nabu/io/detector_distortion.py +5 -6
  35. nabu/io/reader.py +3 -3
  36. nabu/io/reader_helical.py +5 -4
  37. nabu/io/tests/test_cast_volume.py +2 -2
  38. nabu/io/tests/test_readers.py +4 -4
  39. nabu/io/tests/test_writers.py +2 -2
  40. nabu/io/utils.py +8 -4
  41. nabu/io/writer.py +1 -2
  42. nabu/misc/fftshift.py +1 -1
  43. nabu/misc/fourier_filters.py +1 -1
  44. nabu/misc/histogram.py +1 -1
  45. nabu/misc/histogram_cuda.py +1 -1
  46. nabu/misc/padding_base.py +1 -1
  47. nabu/misc/rotation.py +1 -1
  48. nabu/misc/rotation_cuda.py +1 -1
  49. nabu/misc/tests/test_binning.py +1 -1
  50. nabu/misc/transpose.py +1 -1
  51. nabu/misc/unsharp.py +1 -1
  52. nabu/misc/unsharp_cuda.py +1 -1
  53. nabu/misc/unsharp_opencl.py +1 -1
  54. nabu/misc/utils.py +1 -1
  55. nabu/opencl/fft.py +1 -1
  56. nabu/opencl/padding.py +1 -1
  57. nabu/opencl/utils.py +8 -8
  58. nabu/pipeline/config.py +2 -2
  59. nabu/pipeline/config_validators.py +4 -3
  60. nabu/pipeline/datadump.py +3 -3
  61. nabu/pipeline/estimators.py +6 -6
  62. nabu/pipeline/fullfield/chunked.py +4 -5
  63. nabu/pipeline/fullfield/dataset_validator.py +0 -1
  64. nabu/pipeline/fullfield/nabu_config.py +2 -1
  65. nabu/pipeline/fullfield/reconstruction.py +9 -8
  66. nabu/pipeline/helical/dataset_validator.py +3 -4
  67. nabu/pipeline/helical/fbp.py +4 -4
  68. nabu/pipeline/helical/filtering.py +5 -4
  69. nabu/pipeline/helical/gridded_accumulator.py +9 -10
  70. nabu/pipeline/helical/helical_chunked_regridded.py +1 -0
  71. nabu/pipeline/helical/helical_reconstruction.py +10 -7
  72. nabu/pipeline/helical/helical_utils.py +1 -2
  73. nabu/pipeline/helical/nabu_config.py +1 -0
  74. nabu/pipeline/helical/span_strategy.py +1 -0
  75. nabu/pipeline/helical/weight_balancer.py +1 -2
  76. nabu/pipeline/tests/__init__.py +0 -0
  77. nabu/pipeline/utils.py +1 -1
  78. nabu/pipeline/writer.py +1 -1
  79. nabu/preproc/alignment.py +0 -10
  80. nabu/preproc/ctf.py +8 -8
  81. nabu/preproc/ctf_cuda.py +1 -1
  82. nabu/preproc/double_flatfield_cuda.py +2 -2
  83. nabu/preproc/double_flatfield_variable_region.py +0 -1
  84. nabu/preproc/flatfield.py +1 -1
  85. nabu/preproc/flatfield_cuda.py +1 -2
  86. nabu/preproc/flatfield_variable_region.py +3 -3
  87. nabu/preproc/phase.py +2 -4
  88. nabu/preproc/phase_cuda.py +2 -2
  89. nabu/preproc/shift_cuda.py +0 -1
  90. nabu/preproc/tests/test_ctf.py +3 -3
  91. nabu/preproc/tests/test_double_flatfield.py +1 -1
  92. nabu/preproc/tests/test_flatfield.py +1 -1
  93. nabu/preproc/tests/test_vshift.py +4 -1
  94. nabu/processing/azim.py +2 -2
  95. nabu/processing/convolution_cuda.py +6 -4
  96. nabu/processing/fft_base.py +1 -1
  97. nabu/processing/fft_cuda.py +19 -8
  98. nabu/processing/fft_opencl.py +9 -4
  99. nabu/processing/fftshift.py +1 -1
  100. nabu/processing/histogram.py +1 -1
  101. nabu/processing/muladd.py +0 -1
  102. nabu/processing/padding_base.py +1 -1
  103. nabu/processing/padding_cuda.py +0 -1
  104. nabu/processing/processing_base.py +1 -1
  105. nabu/processing/tests/test_fft.py +1 -1
  106. nabu/processing/tests/test_fftshift.py +1 -1
  107. nabu/processing/tests/test_medfilt.py +1 -3
  108. nabu/processing/tests/test_padding.py +1 -1
  109. nabu/processing/tests/test_roll.py +1 -1
  110. nabu/processing/unsharp_opencl.py +1 -1
  111. nabu/reconstruction/astra.py +245 -0
  112. nabu/reconstruction/cone.py +9 -4
  113. nabu/reconstruction/fbp_base.py +2 -2
  114. nabu/reconstruction/filtering_cuda.py +1 -1
  115. nabu/reconstruction/hbp.py +16 -3
  116. nabu/reconstruction/mlem.py +0 -1
  117. nabu/reconstruction/projection.py +3 -5
  118. nabu/reconstruction/sinogram.py +1 -1
  119. nabu/reconstruction/sinogram_cuda.py +0 -1
  120. nabu/reconstruction/tests/test_cone.py +76 -3
  121. nabu/reconstruction/tests/test_deringer.py +2 -2
  122. nabu/reconstruction/tests/test_fbp.py +1 -1
  123. nabu/reconstruction/tests/test_halftomo.py +27 -1
  124. nabu/reconstruction/tests/test_mlem.py +3 -2
  125. nabu/reconstruction/tests/test_projector.py +7 -2
  126. nabu/reconstruction/tests/test_sino_normalization.py +0 -1
  127. nabu/resources/dataset_analyzer.py +4 -4
  128. nabu/resources/gpu.py +4 -4
  129. nabu/resources/logger.py +4 -4
  130. nabu/resources/nxflatfield.py +2 -2
  131. nabu/resources/tests/test_nxflatfield.py +4 -4
  132. nabu/stitching/alignment.py +1 -4
  133. nabu/stitching/config.py +19 -16
  134. nabu/stitching/frame_composition.py +8 -10
  135. nabu/stitching/overlap.py +2 -2
  136. nabu/stitching/slurm_utils.py +2 -2
  137. nabu/stitching/stitcher/base.py +2 -0
  138. nabu/stitching/stitcher/dumper/base.py +0 -1
  139. nabu/stitching/stitcher/dumper/postprocessing.py +1 -1
  140. nabu/stitching/stitcher/post_processing.py +6 -6
  141. nabu/stitching/stitcher/pre_processing.py +13 -11
  142. nabu/stitching/stitcher/single_axis.py +3 -4
  143. nabu/stitching/stitcher_2D.py +2 -1
  144. nabu/stitching/tests/test_config.py +7 -8
  145. nabu/stitching/tests/test_sample_normalization.py +1 -1
  146. nabu/stitching/tests/test_slurm_utils.py +1 -2
  147. nabu/stitching/tests/test_z_postprocessing_stitching.py +1 -1
  148. nabu/stitching/tests/test_z_preprocessing_stitching.py +4 -4
  149. nabu/stitching/utils/tests/__init__.py +0 -0
  150. nabu/stitching/utils/tests/test_post-processing.py +1 -0
  151. nabu/stitching/utils/utils.py +10 -12
  152. nabu/tests.py +0 -3
  153. nabu/testutils.py +30 -8
  154. nabu/utils.py +28 -18
  155. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev4.dist-info}/METADATA +25 -25
  156. nabu-2025.1.0.dev4.dist-info/RECORD +320 -0
  157. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev4.dist-info}/WHEEL +1 -1
  158. nabu/io/tests/test_detector_distortion.py +0 -178
  159. nabu/resources/tests/test_extract.py +0 -9
  160. nabu-2024.2.4.dist-info/RECORD +0 -318
  161. /nabu/{stitching → app}/tests/__init__.py +0 -0
  162. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev4.dist-info}/LICENSE +0 -0
  163. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev4.dist-info}/entry_points.txt +0 -0
  164. {nabu-2024.2.4.dist-info → nabu-2025.1.0.dev4.dist-info}/top_level.txt +0 -0
@@ -87,20 +87,20 @@ class AlignmentBase:
87
87
  if not len(shape_stack) == 3:
88
88
  raise ValueError(
89
89
  "A stack of 2-dimensional images is required. Shape of stack: %s"
90
- % (" ".join(("%d" % x for x in shape_stack)))
90
+ % (" ".join("%d" % x for x in shape_stack))
91
91
  )
92
92
  if not len(shape_pos) == 1:
93
93
  raise ValueError(
94
94
  "Positions need to be a 1-dimensional array. Shape of the positions variable: %s"
95
- % (" ".join(("%d" % x for x in shape_pos)))
95
+ % (" ".join("%d" % x for x in shape_pos))
96
96
  )
97
97
  if not shape_stack[0] == shape_pos[0]:
98
98
  raise ValueError(
99
99
  "The same number of images and positions is required."
100
100
  + " Shape of stack: %s, shape of positions variable: %s"
101
101
  % (
102
- " ".join(("%d" % x for x in shape_stack)),
103
- " ".join(("%d" % x for x in shape_pos)),
102
+ " ".join("%d" % x for x in shape_stack),
103
+ " ".join("%d" % x for x in shape_pos),
104
104
  )
105
105
  )
106
106
 
@@ -110,18 +110,18 @@ class AlignmentBase:
110
110
  shape_2 = np.squeeze(img_2).shape
111
111
  if not len(shape_1) == 2:
112
112
  raise ValueError(
113
- "Images need to be 2-dimensional. Shape of image #1: %s" % (" ".join(("%d" % x for x in shape_1)))
113
+ "Images need to be 2-dimensional. Shape of image #1: %s" % (" ".join("%d" % x for x in shape_1))
114
114
  )
115
115
  if not len(shape_2) == 2:
116
116
  raise ValueError(
117
- "Images need to be 2-dimensional. Shape of image #2: %s" % (" ".join(("%d" % x for x in shape_2)))
117
+ "Images need to be 2-dimensional. Shape of image #2: %s" % (" ".join("%d" % x for x in shape_2))
118
118
  )
119
119
  if not np.all(shape_1 == shape_2):
120
120
  raise ValueError(
121
121
  "Images need to be of the same shape. Shape of image #1: %s, image #2: %s"
122
122
  % (
123
- " ".join(("%d" % x for x in shape_1)),
124
- " ".join(("%d" % x for x in shape_2)),
123
+ " ".join("%d" % x for x in shape_1),
124
+ " ".join("%d" % x for x in shape_2),
125
125
  )
126
126
  )
127
127
 
@@ -153,7 +153,7 @@ class AlignmentBase:
153
153
  if not (len(f_vals.shape) == 2):
154
154
  raise ValueError(
155
155
  "The fitted values should form a 2-dimensional array. Array of shape: [%s] was given."
156
- % (" ".join(("%d" % s for s in f_vals.shape)))
156
+ % (" ".join("%d" % s for s in f_vals.shape))
157
157
  )
158
158
  if fy is None:
159
159
  fy_half_size = (f_vals.shape[0] - 1) / 2
@@ -161,7 +161,7 @@ class AlignmentBase:
161
161
  elif not (len(fy.shape) == 1 and np.all(fy.size == f_vals.shape[0])):
162
162
  raise ValueError(
163
163
  "Vertical coordinates should have the same length as values matrix. Sizes of fy: %d, f_vals: [%s]"
164
- % (fy.size, " ".join(("%d" % s for s in f_vals.shape)))
164
+ % (fy.size, " ".join("%d" % s for s in f_vals.shape))
165
165
  )
166
166
  if fx is None:
167
167
  fx_half_size = (f_vals.shape[1] - 1) / 2
@@ -169,7 +169,7 @@ class AlignmentBase:
169
169
  elif not (len(fx.shape) == 1 and np.all(fx.size == f_vals.shape[1])):
170
170
  raise ValueError(
171
171
  "Horizontal coordinates should have the same length as values matrix. Sizes of fx: %d, f_vals: [%s]"
172
- % (fx.size, " ".join(("%d" % s for s in f_vals.shape)))
172
+ % (fx.size, " ".join("%d" % s for s in f_vals.shape))
173
173
  )
174
174
 
175
175
  fy, fx = np.meshgrid(fy, fx, indexing="ij")
@@ -190,14 +190,7 @@ class AlignmentBase:
190
190
  vertex_max_yx = [np.max(fy), np.max(fx)]
191
191
  if np.any(vertex_yx < vertex_min_yx) or np.any(vertex_yx > vertex_max_yx):
192
192
  raise ValueError(
193
- "Fitted (y: {}, x: {}) positions are outside the input margins y: [{}, {}], and x: [{}, {}]".format(
194
- vertex_yx[0],
195
- vertex_yx[1],
196
- vertex_min_yx[0],
197
- vertex_max_yx[0],
198
- vertex_min_yx[1],
199
- vertex_max_yx[1],
200
- )
193
+ f"Fitted (y: {vertex_yx[0]}, x: {vertex_yx[1]}) positions are outside the input margins y: [{vertex_min_yx[0]}, {vertex_max_yx[0]}], and x: [{vertex_min_yx[1]}, {vertex_max_yx[1]}]"
201
194
  )
202
195
  return vertex_yx
203
196
 
@@ -225,10 +218,10 @@ class AlignmentBase:
225
218
  float
226
219
  Estimated function max, according to the coordinates in fx.
227
220
  """
228
- if not len(f_vals.shape) in (1, 2):
221
+ if len(f_vals.shape) not in (1, 2):
229
222
  raise ValueError(
230
223
  "The fitted values should be either one or a collection of 1-dimensional arrays. Array of shape: [%s] was given."
231
- % (" ".join(("%d" % s for s in f_vals.shape)))
224
+ % (" ".join("%d" % s for s in f_vals.shape))
232
225
  )
233
226
  num_vals = f_vals.shape[0]
234
227
 
@@ -264,16 +257,9 @@ class AlignmentBase:
264
257
  upper_bound_ok = vertex_x < vertex_max_x
265
258
  if not np.all(lower_bound_ok * upper_bound_ok):
266
259
  if len(f_vals.shape) == 1:
267
- message = "Fitted position {} is outide the input margins [{}, {}]".format(
268
- vertex_x, vertex_min_x, vertex_max_x
269
- )
260
+ message = f"Fitted position {vertex_x} is outide the input margins [{vertex_min_x}, {vertex_max_x}]"
270
261
  else:
271
- message = "Fitted positions outside the input margins [{}, {}]: {} below and {} above".format(
272
- vertex_min_x,
273
- vertex_max_x,
274
- np.sum(1 - lower_bound_ok),
275
- np.sum(1 - upper_bound_ok),
276
- )
262
+ message = f"Fitted positions outside the input margins [{vertex_min_x}, {vertex_max_x}]: {np.sum(1 - lower_bound_ok)} below and {np.sum(1 - upper_bound_ok)} above"
277
263
  raise ValueError(message)
278
264
  if return_vertex_val:
279
265
  vertex_val = coeffs[0, :] + vertex_x * coeffs[1, :] / 2
@@ -354,7 +340,7 @@ class AlignmentBase:
354
340
  if not (len(img_shape) == 2):
355
341
  raise ValueError(
356
342
  "The input image should be either a 1 or 2-dimensional array. Array of shape: [%s] was given."
357
- % (" ".join(("%d" % s for s in cc.shape)))
343
+ % (" ".join("%d" % s for s in cc.shape))
358
344
  )
359
345
  other_axis = (axis + 1) % 2
360
346
  # get pixel having the maximum value of the correlation array
nabu/estimation/cor.py CHANGED
@@ -269,10 +269,9 @@ class CenterOfRotationSlidingWindow(CenterOfRotation):
269
269
  win_ind_max = np.argmax(diffs_mean)
270
270
 
271
271
  diffs_std = diffs_std.min() - diffs_std
272
- if not win_ind_max == np.argmax(diffs_std):
272
+ if win_ind_max != np.argmax(diffs_std):
273
273
  self.logger.warning(
274
- "Minimum mean difference and minimum std-dev of differences do not coincide. "
275
- + "This means that the validity of the found solution might be questionable."
274
+ "Minimum mean difference and minimum std-dev of differences do not coincide. This means that the validity of the found solution might be questionable."
276
275
  )
277
276
  validity_check_result = cor_result_validity["questionable"]
278
277
  else:
@@ -435,10 +434,9 @@ class CenterOfRotationGrowingWindow(CenterOfRotation):
435
434
  win_ind_max = np.argmax(diffs_mean)
436
435
 
437
436
  diffs_std = diffs_std.min() - diffs_std
438
- if not win_ind_max == np.argmax(diffs_std):
437
+ if win_ind_max != np.argmax(diffs_std):
439
438
  self.logger.warning(
440
- "Minimum mean difference and minimum std-dev of differences do not coincide. "
441
- + "This means that the validity of the found solution might be questionable."
439
+ "Minimum mean difference and minimum std-dev of differences do not coincide. This means that the validity of the found solution might be questionable."
442
440
  )
443
441
  validity_check_result = cor_result_validity["questionable"]
444
442
  else:
@@ -578,8 +576,7 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
578
576
 
579
577
  if lim_2 <= lim_1:
580
578
  message = (
581
- "Image shape or cropped selection too small for global search."
582
- + " After removal of the margins the search limits collide."
579
+ "Image shape or cropped selection too small for global search. After removal of the margins the search limits collide."
583
580
  + " The cropped size is %d\n" % (dim_radio)
584
581
  )
585
582
  raise ValueError(message)
@@ -616,11 +613,11 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
616
613
  if "positions are outside the input margins" in str(err):
617
614
  x_cor = min(x_cor + x_cor * self.step_fraction, x_cor + (dim_radio - x_cor) * self.step_fraction)
618
615
  continue
619
- except:
620
- message = "Unexpected error from base class CenterOfRotation.find_shift in CenterOfRotationAdaptiveSearch.find_shift : {err}".format(
621
- err=err
616
+ except Exception as err:
617
+ self.logger.error(
618
+ "Unexpected error from base class CenterOfRotation.find_shift in CenterOfRotationAdaptiveSearch.find_shift: %s"
619
+ % (str(err))
622
620
  )
623
- self.logger.error(message)
624
621
  raise
625
622
 
626
623
  p_1 = cor_position * 2
@@ -656,16 +653,13 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
656
653
  diff_energy = np.array((piece_1 - piece_2) * (piece_1 - piece_2), "d").sum()
657
654
  cost = diff_energy / energy
658
655
 
659
- if not np.isnan(cost):
660
- if tmp_sigma * 2 > abs(x_cor_rel - cor_position):
661
- found_centers.append([cost, abs(x_cor_rel - cor_position), cor_position, energy])
656
+ if not np.isnan(cost) and tmp_sigma * 2 > abs(x_cor_rel - cor_position):
657
+ found_centers.append([cost, abs(x_cor_rel - cor_position), cor_position, energy])
662
658
 
663
659
  x_cor = min(x_cor + x_cor * self.step_fraction, x_cor + (dim_radio - x_cor) * self.step_fraction)
664
660
 
665
661
  if len(found_centers) == 0:
666
- message = "Unable to find any valid CoR candidate in {my_class}.find_shift ".format(
667
- my_class=self.__class__.__name__
668
- )
662
+ message = f"Unable to find any valid CoR candidate in {self.__class__.__name__}.find_shift "
669
663
  raise ValueError(message)
670
664
 
671
665
  # ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@@ -674,6 +668,7 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
674
668
 
675
669
  filtered_found_centers = []
676
670
  for i in range(len(found_centers)):
671
+ # ruff: noqa: SIM102
677
672
  if i > 0:
678
673
  if abs(found_centers[i][2] - found_centers[i - 1][2]) < 0.5:
679
674
  filtered_found_centers.append(found_centers[i])
@@ -711,8 +706,7 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
711
706
  validity_check_result = cor_result_validity["sound"]
712
707
  else:
713
708
  self.logger.warning(
714
- "Minimum mean difference and minimum std-dev of differences do not coincide. "
715
- + "This means that the validity of the found solution might be questionable."
709
+ "Minimum mean difference and minimum std-dev of differences do not coincide. This means that the validity of the found solution might be questionable."
716
710
  )
717
711
  validity_check_result = cor_result_validity["questionable"]
718
712
 
@@ -802,7 +796,7 @@ class CenterOfRotationOctaveAccurate(CenterOfRotation):
802
796
  return res
803
797
  return res
804
798
 
805
- def _interpolate(self, input, shift, mode="mean", interpolation_method="linear"):
799
+ def _interpolate(self, input_, shift, mode="mean", interpolation_method="linear"):
806
800
  """Applies to the input a translation by a vector `shift`. Based on
807
801
  `scipy.ndimage.affine_transform` function.
808
802
  JL: This Octave function was initially used in the refine clause of the local_correlation (Octave find_shift).
@@ -811,7 +805,7 @@ class CenterOfRotationOctaveAccurate(CenterOfRotation):
811
805
 
812
806
  Parameters
813
807
  ----------
814
- input : array
808
+ input_ : array
815
809
  Array to which the translation is applied.
816
810
  shift : tuple, list or array of length 2.
817
811
  mode : str
@@ -862,19 +856,19 @@ class CenterOfRotationOctaveAccurate(CenterOfRotation):
862
856
 
863
857
  if mode == "mean":
864
858
  mode = "constant"
865
- cval = input.mean()
866
- return affine_transform(input, matrix, mode=mode, order=order, cval=cval)
859
+ cval = input_.mean()
860
+ return affine_transform(input_, matrix, mode=mode, order=order, cval=cval)
867
861
  elif mode not in admissible_modes:
868
862
  raise ValueError(f"Pad method is {mode} and should be in {admissible_modes}.")
869
863
 
870
- return affine_transform(input, matrix, mode=mode, order=order)
864
+ return affine_transform(input_, matrix, mode=mode, order=order)
871
865
 
872
866
  def _local_correlation(
873
867
  self,
874
868
  z1,
875
869
  z2,
876
- maxsize=[5, 5],
877
- cor_estimate=[0, 0],
870
+ maxsize=(5, 5),
871
+ cor_estimate=(0, 0),
878
872
  refine=None,
879
873
  pmcc=False,
880
874
  normalize=True,
@@ -1005,7 +999,7 @@ class CenterOfRotationOctaveAccurate(CenterOfRotation):
1005
999
  z1p /= z1p.mean()
1006
1000
 
1007
1001
  for k in range(cc.shape[0]):
1008
- for l in range(cc.shape[1]):
1002
+ for l in range(cc.shape[1]): # noqa: E741
1009
1003
  if pmcc:
1010
1004
  z2p = z2[z2beg[0] - k : z2end[0] - k, z2beg[1] - l : z2end[1] - l].flatten()
1011
1005
  std_z2p = z2p.std()
@@ -1096,7 +1090,7 @@ class CenterOfRotationOctaveAccurate(CenterOfRotation):
1096
1090
  padding_mode=None,
1097
1091
  low_pass=0.01,
1098
1092
  high_pass=None,
1099
- maxsize=[5, 5],
1093
+ maxsize=(5, 5),
1100
1094
  refine=None,
1101
1095
  pmcc=False,
1102
1096
  normalize=True,
@@ -21,12 +21,10 @@ class SinoCor:
21
21
  """
22
22
 
23
23
  def __init__(self, img_1, img_2, logger=None):
24
- """ """
25
24
  self.logger = LoggerOrPrint(logger)
26
25
  self.sx = img_1.shape[1]
27
26
 
28
27
  # algorithm cannot accept odd number of projs. This is handled in the SinoCORFinder class.
29
- nproj2 = img_1.shape[0]
30
28
 
31
29
  # extract upper and lower part of sinogram, flipping H the upper part
32
30
  self.data1 = img_1
@@ -141,9 +139,6 @@ class SinoCor:
141
139
  xc1 = self.rcor_abs - int(xwin / 2)
142
140
  xc2 = self.sx - self.rcor_abs - int(xwin / 2) - 1
143
141
 
144
- im1 = self.data1[:, xc1 : xc1 + xwin]
145
- im2 = self.data2[:, xc2 : xc2 + xwin]
146
-
147
142
  pixs = p_sign * (np.arange(neighborhood) - ng2)
148
143
  diff0 = 1000000000.0
149
144
 
@@ -289,9 +284,8 @@ class CenterOfRotationFourierAngles:
289
284
 
290
285
  def _px(self, detector_width, abs_pos, near_width, near_std, crop_around_cor, near_step):
291
286
  sym_range = None
292
- if abs_pos is not None:
293
- if crop_around_cor:
294
- sym_range = int(abs_pos - near_std * 2), int(abs_pos + near_std * 2)
287
+ if abs_pos is not None and crop_around_cor:
288
+ sym_range = int(abs_pos - near_std * 2), int(abs_pos + near_std * 2)
295
289
 
296
290
  window = near_width
297
291
  if sym_range is not None:
nabu/estimation/focus.py CHANGED
@@ -159,10 +159,8 @@ class CameraFocus(CenterOfRotation):
159
159
 
160
160
  if self.verbose:
161
161
  self.logger.info(
162
- "Fitted focus motor position:",
163
- focus_pos,
164
- "and corresponding image position:",
165
- focus_ind,
162
+ "Fitted focus motor position: %s and corresponding image position: %s"
163
+ % (str(focus_pos), str(focus_ind))
166
164
  )
167
165
  f, ax = plt.subplots(1, 1)
168
166
  self._add_plot_window(f, ax=ax)
@@ -383,10 +381,8 @@ class CameraFocus(CenterOfRotation):
383
381
 
384
382
  if self.verbose:
385
383
  self.logger.info(
386
- "Fitted focus motor position:",
387
- focus_pos,
388
- "and corresponding image position:",
389
- focus_ind,
384
+ "Fitted focus motor position: %s and corresponding image position: %s"
385
+ % (str(focus_pos), str(focus_ind))
390
386
  )
391
387
  self.logger.info("Fitted tilts (to be divided by pixel size, and converted to deg): (v, h) %s" % tilts_vh)
392
388
  fig = plt.figure()
@@ -2,6 +2,8 @@ import numpy as np
2
2
  import pytest
3
3
  from nabu.estimation.alignment import AlignmentBase
4
4
 
5
+ # ruff: noqa: NPY002
6
+
5
7
 
6
8
  @pytest.fixture(scope="class")
7
9
  def bootstrap_base(request):
@@ -1,7 +1,7 @@
1
1
  import pytest
2
2
  import numpy as np
3
3
  from nabu.estimation.tilt import CameraTilt
4
- from nabu.estimation.tests.test_cor import bootstrap_cor
4
+ from nabu.estimation.tests.test_cor import bootstrap_cor # noqa: F401
5
5
 
6
6
  try:
7
7
  import skimage.transform as skt # noqa: F401
nabu/estimation/tilt.py CHANGED
@@ -150,10 +150,11 @@ class CameraTilt(CenterOfRotation):
150
150
 
151
151
  if self.verbose:
152
152
  self.logger.info(
153
- "Fitted center of rotation (pixels):",
154
- cor_offset_pix,
155
- "and camera tilt (degrees):",
156
- tilt_deg,
153
+ "Fitted center of rotation (pixels): %s and camera tilt (degrees): %s"
154
+ % (
155
+ str(cor_offset_pix),
156
+ str(tilt_deg),
157
+ )
157
158
  )
158
159
  f, ax = plt.subplots(1, 1)
159
160
  self._add_plot_window(f, ax=ax)
nabu/io/cast_volume.py CHANGED
@@ -12,7 +12,6 @@ from tomoscan.esrf.volume import (
12
12
  )
13
13
  from tomoscan.io import HDF5File
14
14
  from silx.io.utils import get_data
15
- from silx.utils.enum import Enum as _Enum
16
15
  import numpy
17
16
  from silx.io.url import DataUrl
18
17
  from typing import Optional
@@ -21,7 +20,7 @@ import logging
21
20
  _logger = logging.getLogger(__name__)
22
21
 
23
22
 
24
- __all__ = ["get_default_output_volume", "cast_volume"]
23
+ __all__ = ["cast_volume", "get_default_output_volume"]
25
24
 
26
25
  _DEFAULT_OUTPUT_DIR = "vol_cast"
27
26
 
@@ -43,7 +42,7 @@ def get_default_output_volume(
43
42
  if not isinstance(input_volume, VolumeBase):
44
43
  raise TypeError(f"input_volume is expected to be an instance of {VolumeBase}")
45
44
  valid_file_formats = set(files_formats.values())
46
- if not output_type in valid_file_formats:
45
+ if output_type not in valid_file_formats:
47
46
  raise ValueError(f"output_type is not a valid value ({output_type}). Valid values are {valid_file_formats}")
48
47
 
49
48
  if isinstance(input_volume, (EDFVolume, TIFFVolume, JP2KVolume)):
@@ -167,6 +166,7 @@ def cast_volume(
167
166
  if not isinstance(output_volume, VolumeBase):
168
167
  raise TypeError(f"output_volume is expected to be a {VolumeBase}. {type(output_volume)} provided")
169
168
 
169
+ # ruff: noqa: SIM105, S110
170
170
  try:
171
171
  output_data_type = numpy.dtype(
172
172
  output_data_type
@@ -285,7 +285,7 @@ def find_histogram(volume: VolumeBase, scan: Optional[TomoScanBase] = None) -> O
285
285
  ]
286
286
  )
287
287
  else:
288
- data_path = "/".join((volume.url.data_path(), "histogram/results/data"))
288
+ data_path = f"{volume.url.data_path()}/histogram/results/data"
289
289
  else:
290
290
  # TODO: FIXME: in some case (if the users provides the full data_url and if the 'DATA_DATASET_NAME' is not used we
291
291
  # will endup with an invalid data_path. Hope this case will not happen. Anyway this is a case that we can't handle.)
@@ -343,7 +343,7 @@ def find_histogram(volume: VolumeBase, scan: Optional[TomoScanBase] = None) -> O
343
343
  return None
344
344
 
345
345
  with HDF5File(histogram_file, mode="r") as h5f:
346
- if not data_path in h5f:
346
+ if data_path not in h5f:
347
347
  _logger.info(f"{data_path} in {histogram_file} not found")
348
348
  return None
349
349
  else:
@@ -3,7 +3,6 @@ from scipy import sparse
3
3
 
4
4
 
5
5
  class DetectorDistortionBase:
6
- """ """
7
6
 
8
7
  def __init__(self, detector_full_shape_vh=(0, 0)):
9
8
  """This is the basis class.
@@ -110,7 +109,7 @@ class DetectorDistortionBase:
110
109
  The sub_region contained (x_start, x_end)={(x_start, x_end)} which would slice the
111
110
  full horizontal size which is {self.detector_full_shape_vh[1]}
112
111
  """
113
- raise ValueError()
112
+ raise ValueError(message)
114
113
 
115
114
  x_start, x_end = 0, self.detector_full_shape_vh[1]
116
115
 
@@ -165,7 +164,8 @@ class DetectorDistortionMapsXZ(DetectorDistortionBase):
165
164
  Pixel (i,j) of the corrected image is obtained by interpolating the raw data at position
166
165
  ( map_z(i,j), map_x(i,j) ).
167
166
 
168
- Parameters:
167
+ Parameters
168
+ ----------
169
169
  map_x : float 2D array
170
170
  map_z : float 2D array
171
171
  """
@@ -173,7 +173,6 @@ class DetectorDistortionMapsXZ(DetectorDistortionBase):
173
173
  self._build_full_transformation(map_x, map_z)
174
174
 
175
175
  def _build_full_transformation(self, map_x, map_z):
176
- """ """
177
176
  detector_full_shape_vh = map_x.shape
178
177
  if detector_full_shape_vh != map_z.shape:
179
178
  message = f""" map_x and map_z must have the same shape
@@ -185,7 +184,7 @@ class DetectorDistortionMapsXZ(DetectorDistortionBase):
185
184
 
186
185
  # padding
187
186
  sz, sx = detector_full_shape_vh
188
- total_detector_npixs = sz * sx
187
+ # total_detector_npixs = sz * sx
189
188
  xs = np.clip(np.array(coordinates[1].flat), [[0]], [[sx - 1]])
190
189
  zs = np.clip(np.array(coordinates[0].flat), [[0]], [[sz - 1]])
191
190
 
@@ -253,7 +252,7 @@ class DetectorDistortionMapsXZ(DetectorDistortionBase):
253
252
  The sub_region contained (x_start, x_end)={(x_start, x_end)} which would slice the
254
253
  full horizontal size which is {self.detector_full_shape_vh[1]}
255
254
  """
256
- raise ValueError()
255
+ raise ValueError(message)
257
256
 
258
257
  x_start, x_end = 0, self.detector_full_shape_vh[1]
259
258
 
nabu/io/reader.py CHANGED
@@ -619,7 +619,7 @@ class NXTomoReader(VolReaderBase):
619
619
  If provided, this function first argument must be the source buffer (3D array: stack of raw images),
620
620
  and the second argument must be the destination buffer (3D array, stack of output images). It can be None.
621
621
 
622
- Other parameters
622
+ Other Parameters
623
623
  ----------------
624
624
  The other parameters are passed to "processing_func" if this parameter is not None.
625
625
 
@@ -769,7 +769,7 @@ class NXDarksFlats:
769
769
  reduced_frames = [self._reduce_func[method](frames, axis=0) for frames in raw_frames]
770
770
  reader = getattr(self, "%s_reader" % what)
771
771
  if as_dict:
772
- return {k: v for k, v in zip([s.start for s in reader._image_key_slices], reduced_frames)}
772
+ return {k: v for k, v in zip([s.start for s in reader._image_key_slices], reduced_frames)} # noqa: C416
773
773
  return reduced_frames
774
774
 
775
775
  def get_raw_darks(self, force_reload=False, as_multiple_array=True):
@@ -792,7 +792,7 @@ class NXDarksFlats:
792
792
 
793
793
  def get_reduced_current(self, h5_path="{entry}/control/data", method="median"):
794
794
  current = self.get_raw_current(h5_path=h5_path)
795
- return {k: self._reduce_func[method](current[k]) for k in current.keys()}
795
+ return {k: self._reduce_func[method](current[k]) for k in current}
796
796
 
797
797
 
798
798
  class EDFStackReader(VolReaderBase):
nabu/io/reader_helical.py CHANGED
@@ -1,4 +1,5 @@
1
- from .reader import *
1
+ import numpy as np
2
+ from .reader import ChunkReader, get_compacted_dataslices
2
3
 
3
4
 
4
5
  class ChunkReaderHelical(ChunkReader):
@@ -101,9 +102,9 @@ class ChunkReaderHelical(ChunkReader):
101
102
  self._load_multi(sub_total_prange_slice)
102
103
  else:
103
104
  if self.dataset_subsampling > 1:
104
- assert (
105
- False
106
- ), " in helica pipeline, load file _load_single has not yet been adapted to angular subsampling "
105
+ raise ValueError(
106
+ "in helical pipeline, load file _load_single has not yet been adapted to angular subsampling"
107
+ )
107
108
  self._load_single(sub_total_prange_slice)
108
109
  self._loaded = True
109
110
 
@@ -12,7 +12,7 @@ from tomoscan.esrf.volume import (
12
12
  MultiTIFFVolume,
13
13
  TIFFVolume,
14
14
  )
15
- from nabu.io.writer import __have_jp2k__
15
+ from tomoscan.esrf.volume.jp2kvolume import has_glymur as __have_jp2k__
16
16
  from tomoscan.esrf.scan.edfscan import EDFTomoScan
17
17
  from tomoscan.esrf.scan.nxtomoscan import NXtomoScan
18
18
  import pytest
@@ -105,7 +105,7 @@ def test_find_histogram_hdf5_volume(tmp_path):
105
105
  scheme="silx",
106
106
  )
107
107
 
108
- assert find_histogram(volume=HDF5Volume(file_path=h5_file, data_path="entry")) == None
108
+ assert find_histogram(volume=HDF5Volume(file_path=h5_file, data_path="entry")) is None
109
109
 
110
110
 
111
111
  def test_find_histogram_single_frame_volume(tmp_path):
@@ -47,7 +47,7 @@ class TestNXReader:
47
47
  with pytest.raises(FileNotFoundError):
48
48
  reader = NXTomoReader("/invalid/path", self.nx_data_path)
49
49
  with pytest.raises(KeyError):
50
- reader = NXTomoReader(self.nx_fname, "/bad/data/path")
50
+ reader = NXTomoReader(self.nx_fname, "/bad/data/path") # noqa: F841
51
51
 
52
52
  def test_simple_reads(self):
53
53
  """
@@ -120,7 +120,7 @@ class TestNXReader:
120
120
 
121
121
  for test_case in test_cases:
122
122
  for wrong_shape in test_case["wrong_shapes"]:
123
- with pytest.raises(ValueError):
123
+ with pytest.raises(ValueError): # noqa: PT012
124
124
  data_buffer_wrong_shape = np.zeros(wrong_shape, dtype="f")
125
125
  reader = NXTomoReader(
126
126
  self.nx_fname,
@@ -171,7 +171,7 @@ class TestNXReader:
171
171
 
172
172
  distortion_corrector = DetectorDistortionBase(detector_full_shape_vh=data_desc.frame_shape)
173
173
  distortion_corrector.set_sub_region_transformation(target_sub_region=sub_region_xy)
174
- adapted_subregion = distortion_corrector.get_adapted_subregion(sub_region_xy)
174
+ # adapted_subregion = distortion_corrector.get_adapted_subregion(sub_region_xy)
175
175
  sub_region = (slice(None, None), slice(*sub_region_xy[2:]), slice(*sub_region_xy[:2]))
176
176
 
177
177
  reader_distortion_corr = NXTomoReaderDistortionCorrection(
@@ -328,7 +328,7 @@ class TestEDFReader:
328
328
 
329
329
  distortion_corrector = DetectorDistortionBase(detector_full_shape_vh=self.frame_shape)
330
330
  distortion_corrector.set_sub_region_transformation(target_sub_region=sub_region_xy)
331
- adapted_subregion = distortion_corrector.get_adapted_subregion(sub_region_xy)
331
+ # adapted_subregion = distortion_corrector.get_adapted_subregion(sub_region_xy)
332
332
  sub_region = (slice(None, None), slice(*sub_region_xy[2:]), slice(*sub_region_xy[:2]))
333
333
 
334
334
  reader_distortion_corr = EDFStackReaderDistortionCorrection(
@@ -80,7 +80,7 @@ class TestNXWriter:
80
80
  writer.write(self.data, "test_no_overwrite")
81
81
 
82
82
  writer2 = NXProcessWriter(fname, entry="entry0000", overwrite=False)
83
- with pytest.raises((RuntimeError, OSError)) as ex:
83
+ with pytest.raises((RuntimeError, OSError)):
84
84
  writer2.write(self.data, "test_no_overwrite")
85
85
 
86
- message = "Error should have been raised for trying to overwrite, but got the following: %s" % str(ex.value)
86
+ # message = "Error should have been raised for trying to overwrite, but got the following: %s" % str(ex.value)
nabu/io/utils.py CHANGED
@@ -8,6 +8,8 @@ from tomoscan.volumebase import VolumeBase
8
8
  from tomoscan.esrf import EDFVolume, HDF5Volume, TIFFVolume, JP2KVolume, MultiTIFFVolume
9
9
  from tomoscan.io import HDF5File
10
10
 
11
+ from nabu.utils import first_generator_item
12
+
11
13
 
12
14
  # This function might be moved elsewhere
13
15
  def get_compacted_dataslices(urls, subsampling=None, begin=0):
@@ -100,7 +102,7 @@ def get_compacted_dataslices(urls, subsampling=None, begin=0):
100
102
 
101
103
  def get_first_hdf5_entry(fname):
102
104
  with HDF5File(fname, "r") as fid:
103
- entry = list(fid.keys())[0]
105
+ entry = first_generator_item(fid.keys())
104
106
  return entry
105
107
 
106
108
 
@@ -189,7 +191,7 @@ class _BaseReader(contextlib.AbstractContextManager):
189
191
  if url.scheme() not in ("silx", "h5py"):
190
192
  raise ValueError("Valid scheme are silx and h5py")
191
193
  if url.data_slice() is not None:
192
- raise ValueError("Data slices are not managed. Data path should " "point to a bliss node (h5py.Group)")
194
+ raise ValueError("Data slices are not managed. Data path should point to a bliss node (h5py.Group)")
193
195
  self._url = url
194
196
  self._file_handler = None
195
197
 
@@ -207,7 +209,7 @@ class EntryReader(_BaseReader):
207
209
  else:
208
210
  entry = self._file_handler[self._url.data_path()]
209
211
  if not isinstance(entry, h5py.Group):
210
- raise ValueError("Data path should point to a bliss node (h5py.Group)")
212
+ raise TypeError("Data path should point to a bliss node (h5py.Group)")
211
213
  return entry
212
214
 
213
215
 
@@ -218,7 +220,7 @@ class DatasetReader(_BaseReader):
218
220
  self._file_handler = HDF5File(self._url.file_path(), mode="r")
219
221
  entry = self._file_handler[self._url.data_path()]
220
222
  if not isinstance(entry, h5py.Dataset):
221
- raise ValueError("Data path ({}) should point to a dataset (h5py.Dataset)".format(self._url.path()))
223
+ raise TypeError(f"Data path ({self._url.path()}) should point to a dataset (h5py.Dataset)")
222
224
  return entry
223
225
 
224
226
 
@@ -261,3 +263,5 @@ def get_output_volume(location: str, file_prefix: Optional[str], file_format: st
261
263
  return MultiTIFFVolume(file_path=location)
262
264
  else:
263
265
  return TIFFVolume(folder=location, volume_basename=file_prefix)
266
+ else:
267
+ raise ValueError