nabu 2025.1.0.dev5__py3-none-any.whl → 2025.1.0.dev13__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (66) hide show
  1. nabu/__init__.py +1 -1
  2. nabu/app/double_flatfield.py +18 -5
  3. nabu/app/multicor.py +25 -10
  4. nabu/app/reconstruct_helical.py +4 -4
  5. nabu/app/stitching.py +7 -2
  6. nabu/cuda/src/backproj.cu +10 -10
  7. nabu/cuda/src/cone.cu +4 -0
  8. nabu/cuda/utils.py +1 -1
  9. nabu/estimation/cor.py +3 -3
  10. nabu/io/cast_volume.py +16 -0
  11. nabu/io/reader.py +3 -2
  12. nabu/opencl/src/backproj.cl +10 -10
  13. nabu/pipeline/estimators.py +6 -6
  14. nabu/pipeline/fullfield/chunked.py +13 -13
  15. nabu/pipeline/fullfield/computations.py +4 -1
  16. nabu/pipeline/fullfield/get_double_flatfield.py +147 -0
  17. nabu/pipeline/fullfield/nabu_config.py +16 -4
  18. nabu/pipeline/fullfield/processconfig.py +25 -4
  19. nabu/pipeline/fullfield/reconstruction.py +9 -4
  20. nabu/pipeline/helical/gridded_accumulator.py +1 -1
  21. nabu/pipeline/helical/helical_reconstruction.py +2 -2
  22. nabu/pipeline/helical/nabu_config.py +1 -1
  23. nabu/pipeline/helical/weight_balancer.py +1 -1
  24. nabu/pipeline/params.py +8 -3
  25. nabu/preproc/shift.py +1 -1
  26. nabu/preproc/tests/test_ctf.py +1 -1
  27. nabu/preproc/tests/test_paganin.py +1 -3
  28. nabu/processing/fft_base.py +6 -2
  29. nabu/processing/fft_cuda.py +17 -167
  30. nabu/processing/fft_opencl.py +19 -2
  31. nabu/processing/padding_cuda.py +0 -1
  32. nabu/processing/processing_base.py +11 -5
  33. nabu/processing/tests/test_fft.py +1 -63
  34. nabu/reconstruction/cone.py +39 -9
  35. nabu/reconstruction/fbp.py +7 -0
  36. nabu/reconstruction/fbp_base.py +8 -0
  37. nabu/reconstruction/filtering.py +59 -25
  38. nabu/reconstruction/filtering_cuda.py +21 -20
  39. nabu/reconstruction/filtering_opencl.py +8 -14
  40. nabu/reconstruction/hbp.py +10 -10
  41. nabu/reconstruction/mlem.py +3 -0
  42. nabu/reconstruction/rings_cuda.py +41 -13
  43. nabu/reconstruction/tests/test_cone.py +35 -0
  44. nabu/reconstruction/tests/test_deringer.py +2 -2
  45. nabu/reconstruction/tests/test_fbp.py +35 -14
  46. nabu/reconstruction/tests/test_filtering.py +14 -5
  47. nabu/reconstruction/tests/test_halftomo.py +1 -1
  48. nabu/reconstruction/tests/test_reconstructor.py +1 -1
  49. nabu/resources/dataset_analyzer.py +34 -2
  50. nabu/resources/tests/test_extract.py +4 -2
  51. nabu/stitching/config.py +6 -1
  52. nabu/stitching/stitcher/dumper/__init__.py +1 -0
  53. nabu/stitching/stitcher/dumper/postprocessing.py +105 -1
  54. nabu/stitching/stitcher/post_processing.py +14 -4
  55. nabu/stitching/stitcher/pre_processing.py +1 -1
  56. nabu/stitching/stitcher/single_axis.py +8 -7
  57. nabu/stitching/stitcher/z_stitcher.py +8 -4
  58. nabu/stitching/utils/utils.py +2 -2
  59. nabu/testutils.py +2 -2
  60. nabu/utils.py +9 -2
  61. {nabu-2025.1.0.dev5.dist-info → nabu-2025.1.0.dev13.dist-info}/METADATA +9 -28
  62. {nabu-2025.1.0.dev5.dist-info → nabu-2025.1.0.dev13.dist-info}/RECORD +66 -65
  63. {nabu-2025.1.0.dev5.dist-info → nabu-2025.1.0.dev13.dist-info}/WHEEL +1 -1
  64. {nabu-2025.1.0.dev5.dist-info → nabu-2025.1.0.dev13.dist-info}/entry_points.txt +0 -0
  65. {nabu-2025.1.0.dev5.dist-info → nabu-2025.1.0.dev13.dist-info/licenses}/LICENSE +0 -0
  66. {nabu-2025.1.0.dev5.dist-info → nabu-2025.1.0.dev13.dist-info}/top_level.txt +0 -0
nabu/__init__.py CHANGED
@@ -1,4 +1,4 @@
1
- __version__ = "2025.1.0-dev5"
1
+ __version__ = "2025.1.0-dev13"
2
2
  __nabu_modules__ = [
3
3
  "app",
4
4
  "cuda",
@@ -11,14 +11,26 @@ from .utils import parse_params_values
11
11
 
12
12
  class DoubleFlatFieldChunks:
13
13
  def __init__(
14
- self, dataset_path, output_file, chunk_size=100, sigma=None, do_flatfield=True, h5_entry=None, logger=None
14
+ self,
15
+ dataset_path,
16
+ output_file,
17
+ dataset_info=None,
18
+ chunk_size=100,
19
+ sigma=None,
20
+ do_flatfield=True,
21
+ h5_entry=None,
22
+ logger=None,
15
23
  ):
16
24
  self.logger = LoggerOrPrint(logger)
17
- self.dataset_info = analyze_dataset(dataset_path, extra_options={"hdf5_entry": h5_entry}, logger=logger)
18
- self.chunk_size = min(chunk_size, self.dataset_info.radio_dims[-1])
19
25
  self.do_flatfield = bool(do_flatfield)
20
- if self.do_flatfield:
21
- update_dataset_info_flats_darks(self.dataset_info, flatfield_mode=True)
26
+ if dataset_info is not None:
27
+ self.dataset_info = dataset_info
28
+ else:
29
+ self.dataset_info = analyze_dataset(dataset_path, extra_options={"hdf5_entry": h5_entry}, logger=logger)
30
+ if self.do_flatfield:
31
+ update_dataset_info_flats_darks(self.dataset_info, flatfield_mode=True)
32
+
33
+ self.chunk_size = min(chunk_size, self.dataset_info.radio_dims[-1])
22
34
  self.output_file = output_file
23
35
  self.sigma = sigma if sigma is not None and abs(sigma) > 1e-5 else None
24
36
 
@@ -103,6 +115,7 @@ class DoubleFlatFieldChunks:
103
115
  )
104
116
  writer.write(arr, "double_flatfield", config=self._get_config())
105
117
  self.logger.info("Wrote %s" % writer.fname)
118
+ return writer.fname
106
119
 
107
120
 
108
121
  def dff_cli():
nabu/app/multicor.py CHANGED
@@ -59,23 +59,34 @@ def main():
59
59
  ######
60
60
 
61
61
  cors = get_user_cors(args["cor"])
62
+ options = reconstructor.process_config.processing_options["reconstruction"]
63
+ reconstruct_from_sinos_stack = (options["method"].lower() == "cone") or (
64
+ options["method"].lower() == "mlem" and options["implementation"].lower() == "corrct"
65
+ )
66
+ do_halftomo = pipeline.process_config.do_halftomo
62
67
 
63
68
  rec_instance = pipeline.reconstruction
64
69
 
70
+ # Get sinogram
71
+ if reconstruct_from_sinos_stack:
72
+ sino = pipeline._d_radios.transpose(axes=(1, 0, 2))
73
+ else:
74
+ # Get sinogram into contiguous array
75
+ # TODO Can't do memcpy2D ?! It used to work in cuda 11.
76
+ # For now: transfer to host... not optimal
77
+ sino = pipeline._d_radios[:, pipeline._d_radios.shape[1] // 2, :].get() # pylint: disable=E1136
78
+
65
79
  for cor in cors:
66
80
  # Re-configure with new CoR
67
81
  pipeline.processing_options["reconstruction"]["rotation_axis_position"] = cor
68
82
  pipeline.processing_options["save"]["file_prefix"] = file_prefix + "_%.03f" % cor
69
83
  pipeline._init_writer(create_subfolder=False, single_output_file_initialized=False)
70
84
 
71
- # Get sinogram into contiguous array
72
- # TODO Can't do memcpy2D ?! It used to work in cuda 11.
73
- # For now: transfer to host... not optimal
74
- sino = pipeline._d_radios[:, pipeline._d_radios.shape[1] // 2, :].get() # pylint: disable=E1136
75
-
76
- if pipeline.process_config.do_halftomo:
85
+ # Reconfigure center of rotation
86
+ if not (do_halftomo):
87
+ pipeline.reconstruction.reset_rot_center(cor)
88
+ else:
77
89
  # re-initialize FBP object, because in half-tomography the output slice size is a function of CoR
78
- options = pipeline.processing_options["reconstruction"]
79
90
  rec_instance = pipeline.FBPClass(
80
91
  sino.shape,
81
92
  angles=options["angles"],
@@ -92,11 +103,15 @@ def main():
92
103
  "filter_cutoff": options["fbp_filter_cutoff"],
93
104
  },
94
105
  )
95
- else:
96
- pipeline.reconstruction.reset_rot_center(cor)
97
106
 
98
107
  # Run reconstruction
99
- rec = rec_instance.fbp(sino)
108
+ if reconstruct_from_sinos_stack:
109
+ # Need to copy the sino each time, as it is modified by FDK
110
+ rec = rec_instance.reconstruct(sino.copy())
111
+ # take the middle slice
112
+ rec = rec[rec.shape[0] // 2]
113
+ else:
114
+ rec = rec_instance.fbp(sino)
100
115
  # if return_all_recs:
101
116
  # all_recs.append(rec)
102
117
  rec_3D = view_as_images_stack(rec) # writer wants 3D data
@@ -67,11 +67,11 @@ def main_helical():
67
67
 
68
68
  if proc.nabu_config["reconstruction"]["auto_size"]:
69
69
  if 2 * rot_center > Nx:
70
- w = int(round(2 * rot_center))
70
+ w = round(2 * rot_center)
71
71
  else:
72
- w = int(round(2 * Nx - 2 * rot_center))
73
- rec_config["start_x"] = int(round(rot_center - w / 2))
74
- rec_config["end_x"] = int(round(rot_center + w / 2))
72
+ w = round(2 * Nx - 2 * rot_center)
73
+ rec_config["start_x"] = round(rot_center - w / 2)
74
+ rec_config["end_x"] = round(rot_center + w / 2)
75
75
 
76
76
  rec_config["start_y"] = rec_config["start_x"]
77
77
  rec_config["end_y"] = rec_config["end_x"]
nabu/app/stitching.py CHANGED
@@ -67,12 +67,17 @@ def main():
67
67
  futures = {}
68
68
  # 2.1 launch jobs
69
69
  slurm_job_progress_bars: dict = {}
70
+
71
+ # set job name
72
+ final_output_object_identifier = stitching_config.get_output_object().get_identifier().to_str()
73
+ stitching_config.slurm_config.job_name = f"stitching-{final_output_object_identifier}"
74
+
70
75
  for i_job, (job, sub_config) in enumerate(
71
76
  split_stitching_configuration_to_slurm_job(stitching_config, yield_configuration=True)
72
77
  ):
73
78
  _logger.info(f"submit job nb {i_job}: handles {sub_config.slices}")
74
- output_volume = sub_config.get_output_object().get_identifier().to_str()
75
- futures[output_volume] = submit(job, timeout=999999)
79
+ output_object = sub_config.get_output_object().get_identifier().to_str()
80
+ futures[output_object] = submit(job, timeout=999999)
76
81
  # note on total=100: we only consider percentage in this case (providing advancement from slurm jobs)
77
82
  slurm_job_progress_bars[job] = tqdm(
78
83
  total=100,
nabu/cuda/src/backproj.cu CHANGED
@@ -18,8 +18,8 @@ inline __device__ int is_in_circle(int x, int y, float center_x, float center_y,
18
18
  This will return arr[y][x] where y is an int (exact access) and x is a float (linear interp horizontally)
19
19
  */
20
20
  static inline __device__ float linear_interpolation(float* arr, int Nx, float x, int y) {
21
- // check commented to gain a bit of speed - the check was done before function call
22
- // if (x < 0 || x >= Nx) return 0.0f; // texture address mode CLAMP_TO_EDGE
21
+ // if (x < 0 || x > Nx-1) return 0.0f; // texture address mode BORDER (CLAMP_TO_EDGE continues with edge)
22
+ if (x <= -0.5f || x >= Nx - 0.5f) return 0.0f; // texture address mode BORDER (CLAMP_TO_EDGE continues with edge)
23
23
  int xm = (int) floorf(x);
24
24
  int xp = (int) ceilf(x);
25
25
  if ((xm == xp) || (xp >= Nx)) return arr[y*Nx+xm];
@@ -127,15 +127,15 @@ __global__ void backproj(
127
127
  #endif
128
128
 
129
129
  #ifdef USE_TEXTURES
130
- if (h1 >= 0 && h1 < num_bins) sum1 += tex2D(tex_projections, h1 + 0.5f, proj + 0.5f);
131
- if (h2 >= 0 && h2 < num_bins) sum2 += tex2D(tex_projections, h2 + 0.5f, proj + 0.5f);
132
- if (h3 >= 0 && h3 < num_bins) sum3 += tex2D(tex_projections, h3 + 0.5f, proj + 0.5f);
133
- if (h4 >= 0 && h4 < num_bins) sum4 += tex2D(tex_projections, h4 + 0.5f, proj + 0.5f);
130
+ sum1 += tex2D(tex_projections, h1 + 0.5f, proj + 0.5f);
131
+ sum2 += tex2D(tex_projections, h2 + 0.5f, proj + 0.5f);
132
+ sum3 += tex2D(tex_projections, h3 + 0.5f, proj + 0.5f);
133
+ sum4 += tex2D(tex_projections, h4 + 0.5f, proj + 0.5f);
134
134
  #else
135
- if (h1 >= 0 && h1 < num_bins) sum1 += linear_interpolation(d_sino, num_bins, h1, proj);
136
- if (h2 >= 0 && h2 < num_bins) sum2 += linear_interpolation(d_sino, num_bins, h2, proj);
137
- if (h3 >= 0 && h3 < num_bins) sum3 += linear_interpolation(d_sino, num_bins, h3, proj);
138
- if (h4 >= 0 && h4 < num_bins) sum4 += linear_interpolation(d_sino, num_bins, h4, proj);
135
+ sum1 += linear_interpolation(d_sino, num_bins, h1, proj);
136
+ sum2 += linear_interpolation(d_sino, num_bins, h2, proj);
137
+ sum3 += linear_interpolation(d_sino, num_bins, h3, proj);
138
+ sum4 += linear_interpolation(d_sino, num_bins, h4, proj);
139
139
  #endif
140
140
  }
141
141
 
nabu/cuda/src/cone.cu CHANGED
@@ -80,7 +80,11 @@ __global__ void devFDK_preweight(void* D_projData, unsigned int projPitch, unsig
80
80
 
81
81
  const float fWeight = fW / fRayLength;
82
82
 
83
+ #ifndef RADIOS_LAYOUT
83
84
  projData[(detectorV*iProjAngles+angle)*projPitch+detectorU] *= fWeight;
85
+ #else
86
+ projData[(angle*iProjV+detectorV)*projPitch+detectorU] *= fWeight;
87
+ #endif
84
88
 
85
89
  fV += fDetVSize;
86
90
  }
nabu/cuda/utils.py CHANGED
@@ -197,7 +197,7 @@ def cuarray_shape_dtype(cuarray):
197
197
 
198
198
 
199
199
  def get_shape_dtype(arr):
200
- if isinstance(arr, garray.GPUArray | np.ndarray):
200
+ if isinstance(arr, (garray.GPUArray, np.ndarray)):
201
201
  return arr.shape, arr.dtype
202
202
  elif isinstance(arr, cuda.Array):
203
203
  return cuarray_shape_dtype(arr)
nabu/estimation/cor.py CHANGED
@@ -640,8 +640,8 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
640
640
  * self.sigma_fraction
641
641
  )
642
642
 
643
- M1 = int(round(cor_position + img_1.shape[1] // 2)) - int(round(tmp_sigma))
644
- M2 = int(round(cor_position + img_1.shape[1] // 2)) + int(round(tmp_sigma))
643
+ M1 = round(cor_position + img_1.shape[1] // 2) - round(tmp_sigma)
644
+ M2 = round(cor_position + img_1.shape[1] // 2) + round(tmp_sigma)
645
645
 
646
646
  piece_1 = img_filtered_1[:, M1:M2]
647
647
  piece_2 = img_filtered_2[:, img_1.shape[1] - M2 : img_1.shape[1] - M1]
@@ -678,7 +678,7 @@ class CenterOfRotationAdaptiveSearch(CenterOfRotation):
678
678
  filtered_found_centers.append(found_centers[i])
679
679
  continue
680
680
 
681
- if len(filtered_found_centers):
681
+ if len(filtered_found_centers) > 0:
682
682
  found_centers = filtered_found_centers
683
683
 
684
684
  min_choice = min(found_centers)
nabu/io/cast_volume.py CHANGED
@@ -1,4 +1,6 @@
1
1
  import os
2
+
3
+ from tomoscan.esrf.volume.singleframebase import VolumeSingleFrameBase
2
4
  from nabu.misc.utils import rescale_data
3
5
  from nabu.pipeline.params import files_formats
4
6
  from tomoscan.volumebase import VolumeBase
@@ -176,6 +178,17 @@ def cast_volume(
176
178
  if not isinstance(output_data_type, numpy.dtype):
177
179
  raise TypeError(f"output_data_type is expected to be a {numpy.dtype}. {type(output_data_type)} provided")
178
180
 
181
+ # Make sure the output volume has the same "start_index" as input volume, if relevant
182
+ if isinstance(input_volume, VolumeSingleFrameBase) and isinstance(output_volume, VolumeSingleFrameBase):
183
+ try:
184
+ first_file_name = next(input_volume.browse_data_files())
185
+ start_idx = int(first_file_name.split(".")[0].split("_")[-1])
186
+ except (StopIteration, ValueError, TypeError):
187
+ # StopIteration: Input volume has no file - should not happen
188
+ # ValueError / TypeError: fail to convert to int, something wrong when extracting slice number (non-default file name scheme)
189
+ start_idx = 0
190
+ output_volume.start_index = start_idx
191
+
179
192
  # start processing
180
193
  # check for data_min and data_max
181
194
  if data_min is None or data_max is None:
@@ -193,6 +206,9 @@ def cast_volume(
193
206
  data_min = data_min if data_min is not None else found_data_min
194
207
  data_max = data_max if data_max is not None else found_data_max
195
208
 
209
+ if isinstance(output_volume, JP2KVolume):
210
+ output_volume.rescale_data = False
211
+
196
212
  data = []
197
213
  for input_slice, frame_dumper in zip(
198
214
  input_volume.browse_slices(),
nabu/io/reader.py CHANGED
@@ -688,8 +688,9 @@ class NXTomoReader(VolReaderBase):
688
688
  user_selection_dim0 = self.sub_region[0]
689
689
  indices = np.arange(self.data_shape_total[0])
690
690
  data_selection_indices_axis0 = np.hstack(
691
- [indices[image_key_slice][user_selection_dim0] for image_key_slice in self._image_key_slices]
692
- )
691
+ [indices[image_key_slice] for image_key_slice in self._image_key_slices]
692
+ )[user_selection_dim0]
693
+
693
694
  self._source_selection = (data_selection_indices_axis0,) + self.sub_region[1:]
694
695
 
695
696
  def _get_temporary_buffer(self, convert_after_reading):
@@ -14,7 +14,7 @@ static inline int is_in_circle(float x, float y, float center_x, float center_y,
14
14
  This will return arr[y][x] where y is an int (exact access) and x is a float (linear interp horizontally)
15
15
  */
16
16
  static inline float linear_interpolation(global float* arr, int Nx, float x, int y) {
17
- if (x < 0 || x >= Nx) return 0.0f; // texture address mode CLAMP_TO_EDGE
17
+ if (x < -0.5f || x > Nx - 0.5f) return 0.0f; // texture address mode BORDER (CLAMP_TO_EDGE continues with edge)
18
18
  int xm = (int) floor(x);
19
19
  int xp = (int) ceil(x);
20
20
  if ((xm == xp) || (xp >= Nx)) return arr[y*Nx+xm];
@@ -53,7 +53,7 @@ kernel void backproj(
53
53
  uint Gy = get_global_size(1);
54
54
 
55
55
  #ifdef USE_TEXTURES
56
- const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP_TO_EDGE | CLK_FILTER_LINEAR;
56
+ const sampler_t sampler = CLK_NORMALIZED_COORDS_FALSE | CLK_ADDRESS_CLAMP | CLK_FILTER_LINEAR;
57
57
  #endif
58
58
 
59
59
  // (xr, yr) (xrp, yr)
@@ -111,15 +111,15 @@ kernel void backproj(
111
111
  #endif
112
112
 
113
113
  #ifdef USE_TEXTURES
114
- if (h1 >= 0 && h1 < num_bins) sum1 += read_imagef(d_sino, sampler, (float2) (h1 +0.5f,proj +0.5f)).x;
115
- if (h2 >= 0 && h2 < num_bins) sum2 += read_imagef(d_sino, sampler, (float2) (h2 +0.5f,proj +0.5f)).x;
116
- if (h3 >= 0 && h3 < num_bins) sum3 += read_imagef(d_sino, sampler, (float2) (h3 +0.5f,proj +0.5f)).x;
117
- if (h4 >= 0 && h4 < num_bins) sum4 += read_imagef(d_sino, sampler, (float2) (h4 +0.5f,proj +0.5f)).x;
114
+ sum1 += read_imagef(d_sino, sampler, (float2) (h1 +0.5f,proj +0.5f)).x;
115
+ sum2 += read_imagef(d_sino, sampler, (float2) (h2 +0.5f,proj +0.5f)).x;
116
+ sum3 += read_imagef(d_sino, sampler, (float2) (h3 +0.5f,proj +0.5f)).x;
117
+ sum4 += read_imagef(d_sino, sampler, (float2) (h4 +0.5f,proj +0.5f)).x;
118
118
  #else
119
- if (h1 >= 0 && h1 < num_bins) sum1 += linear_interpolation(d_sino, num_bins, h1, proj);
120
- if (h2 >= 0 && h2 < num_bins) sum2 += linear_interpolation(d_sino, num_bins, h2, proj);
121
- if (h3 >= 0 && h3 < num_bins) sum3 += linear_interpolation(d_sino, num_bins, h3, proj);
122
- if (h4 >= 0 && h4 < num_bins) sum4 += linear_interpolation(d_sino, num_bins, h4, proj);
119
+ sum1 += linear_interpolation(d_sino, num_bins, h1, proj);
120
+ sum2 += linear_interpolation(d_sino, num_bins, h2, proj);
121
+ sum3 += linear_interpolation(d_sino, num_bins, h3, proj);
122
+ sum4 += linear_interpolation(d_sino, num_bins, h4, proj);
123
123
  #endif
124
124
  }
125
125
 
@@ -150,7 +150,7 @@ class CORFinderBase:
150
150
  lookup_side = default_lookup_side
151
151
  else:
152
152
  lookup_side = initial_cor_pos
153
- self._lookup_side = initial_cor_pos
153
+ self._lookup_side = lookup_side
154
154
 
155
155
  def _init_cor_finder(self):
156
156
  cor_finder_cls = self.search_methods[self.method]["class"]
@@ -542,7 +542,7 @@ class CompositeCORFinder(CORFinderBase):
542
542
  else:
543
543
  my_flats = None
544
544
 
545
- if my_flats is not None and len(list(my_flats.keys())):
545
+ if my_flats is not None and len(list(my_flats.keys())) > 0:
546
546
  self.use_flat = True
547
547
  self.flatfield = FlatField(
548
548
  (len(self.absolute_indices), self.sy, self.sx),
@@ -750,15 +750,15 @@ class CompositeCORFinder(CORFinderBase):
750
750
  my_blurred_radio1 = np.fliplr(blurred_radio1)
751
751
  my_blurred_radio2 = np.fliplr(blurred_radio2)
752
752
 
753
- common_left = np.fliplr(my_radio1[:, ovsd_sx - my_z :])[:, : -int(math.ceil(self.ovs * self.high_pass * 2))]
753
+ common_left = np.fliplr(my_radio1[:, ovsd_sx - my_z :])[:, : -math.ceil(self.ovs * self.high_pass * 2)]
754
754
  # adopt a 'safe' margin considering high_pass value (possibly float)
755
- common_right = my_radio2[:, ovsd_sx - my_z : -int(math.ceil(self.ovs * self.high_pass * 2))]
755
+ common_right = my_radio2[:, ovsd_sx - my_z : -math.ceil(self.ovs * self.high_pass * 2)]
756
756
 
757
757
  common_blurred_left = np.fliplr(my_blurred_radio1[:, ovsd_sx - my_z :])[
758
- :, : -int(math.ceil(self.ovs * self.high_pass * 2))
758
+ :, : -math.ceil(self.ovs * self.high_pass * 2)
759
759
  ]
760
760
  # adopt a 'safe' margin considering high_pass value (possibly float)
761
- common_blurred_right = my_blurred_radio2[:, ovsd_sx - my_z : -int(math.ceil(self.ovs * self.high_pass * 2))]
761
+ common_blurred_right = my_blurred_radio2[:, ovsd_sx - my_z : -math.ceil(self.ovs * self.high_pass * 2)]
762
762
 
763
763
  if common_right.size == 0:
764
764
  continue
@@ -4,7 +4,7 @@ from math import ceil
4
4
  import numpy as np
5
5
  from silx.io.url import DataUrl
6
6
 
7
- from ...utils import get_num_threads, remove_items_from_list
7
+ from ...utils import get_num_threads, remove_items_from_list, get_subregion as get_subregion_xy
8
8
  from ...resources.logger import LoggerOrPrint
9
9
  from ...resources.utils import extract_parameters
10
10
  from ...misc.binning import binning as image_binning
@@ -456,7 +456,8 @@ class ChunkedPipeline:
456
456
  self.double_flatfield = self.DoubleFlatFieldClass(
457
457
  self.radios_shape,
458
458
  result_url=result_url,
459
- sub_region=self.sub_region[1:],
459
+ # DoubleFlatField expects sub_region as (start_x, end_x, start_y, end_y)
460
+ sub_region=get_subregion_xy(self.sub_region[1:][::-1]),
460
461
  input_is_mlog=False,
461
462
  output_is_mlog=False,
462
463
  average_is_on_log=avg_is_on_log,
@@ -628,6 +629,7 @@ class ChunkedPipeline:
628
629
  "clip_outer_circle": options["clip_outer_circle"],
629
630
  "outer_circle_value": options["outer_circle_value"],
630
631
  "filter_cutoff": options["fbp_filter_cutoff"],
632
+ "crop_filtered_data": options["crop_filtered_data"],
631
633
  },
632
634
  )
633
635
 
@@ -648,6 +650,11 @@ class ChunkedPipeline:
648
650
  },
649
651
  )
650
652
 
653
+ if options.get("crop_filtered_data", True) is False:
654
+ self.logger.warning(
655
+ "Using [reconstruction] crop_filtered_data = False. This will use a large amount of memory."
656
+ )
657
+
651
658
  self._allocate_recs(*self.process_config.rec_shape, n_slices=n_slices)
652
659
  n_a, _, n_x = self.radios_cropped_shape
653
660
  self._tmp_sino = self._allocate_array((n_a, n_x), "f", name="tmp_sino")
@@ -779,7 +786,7 @@ class ChunkedPipeline:
779
786
  @pipeline_step("sino_deringer", "Removing rings on sinograms")
780
787
  def _destripe_sinos(self):
781
788
  sinos = np.rollaxis(self.radios, 1, 0) # view
782
- self.sino_deringer.remove_rings(sinos) # TODO check it works with non-contiguous view
789
+ self.sino_deringer.remove_rings(sinos)
783
790
 
784
791
  @pipeline_step("reconstruction", "Reconstruction")
785
792
  def _reconstruct(self):
@@ -804,15 +811,7 @@ class ChunkedPipeline:
804
811
  """
805
812
  This reconstructs the entire sinograms stack at once
806
813
  """
807
-
808
- n_angles, n_z, n_x = self.radios.shape
809
-
810
- # FIXME
811
- # can't do a discontiguous single copy...
812
- sinos_contig = self._allocate_array((n_z, n_angles, n_x), np.float32, "sinos_cone")
813
- for i in range(n_z):
814
- sinos_contig[i] = self.radios[:, i, :]
815
- # ---
814
+ sinos_discontig = self.radios.transpose(axes=(1, 0, 2)) # view
816
815
 
817
816
  # In principle radios are not cropped at this stage,
818
817
  # so self.sub_region[2][0] can be used instead of self.get_slice_start_index() instead of self.sub_region[2][0]
@@ -820,7 +819,8 @@ class ChunkedPipeline:
820
819
  n_z_tot = self.process_config.radio_shape(binning=True)[0]
821
820
 
822
821
  self.reconstruction.reconstruct( # pylint: disable=E1101
823
- sinos_contig,
822
+ # sinos_contig,
823
+ sinos_discontig,
824
824
  output=self.recs,
825
825
  relative_z_position=((z_min + z_max) / self.process_config.binning_z / 2) - n_z_tot / 2,
826
826
  )
@@ -128,7 +128,10 @@ def estimate_required_memory(
128
128
  if process_config.rec_params["method"] == "cone":
129
129
  # In cone-beam reconstruction, need both sinograms and reconstruction inside GPU.
130
130
  # That's big!
131
- total_memory_needed += 2 * data_volume_size
131
+ mult_factor = 2
132
+ if rec_config["crop_filtered_data"] is False:
133
+ mult_factor = 4
134
+ total_memory_needed += mult_factor * data_volume_size
132
135
 
133
136
  if debug:
134
137
  print(
@@ -0,0 +1,147 @@
1
+ """
2
+ Double-flatfield:
3
+ - Compute the average of all projections, which gives one resulting image
4
+ - Apply some filter to this image (DFF)
5
+ - Subtract or divide this image from all the projections
6
+ """
7
+
8
+ from os import path
9
+ from silx.io.url import DataUrl
10
+ from silx.io.dictdump import h5todict
11
+
12
+ from nabu.io.utils import get_first_hdf5_entry
13
+
14
+ from ...utils import is_writeable
15
+ from ...app.double_flatfield import DoubleFlatFieldChunks
16
+ from ...resources.nxflatfield import data_url_exists
17
+
18
+ rel_file_path_template = "{scan_name}_dff.h5"
19
+ data_path_template = "{entry}/double_flatfield"
20
+
21
+
22
+ def get_possible_dff_urls(dataset_info, user_dir, output_dir):
23
+ """
24
+ See nabu.resources.nxflatfield.get_frame_possible_urls
25
+ """
26
+ entry = dataset_info.hdf5_entry or ""
27
+
28
+ def make_dataurl(dirname):
29
+ file_path = path.join(
30
+ dirname,
31
+ rel_file_path_template.format(scan_name=dataset_info.scan_basename),
32
+ )
33
+ return DataUrl(
34
+ file_path=file_path,
35
+ data_path=data_path_template.format(entry=entry),
36
+ scheme="silx",
37
+ )
38
+
39
+ urls = {"user": None, "dataset": None, "output": None}
40
+
41
+ if user_dir is not None:
42
+ urls["user"] = make_dataurl(user_dir)
43
+ urls["dataset"] = make_dataurl(dataset_info.scan_dirname)
44
+ if output_dir is not None:
45
+ urls["output"] = make_dataurl(output_dir)
46
+
47
+ return urls
48
+
49
+
50
+ def compute_and_save_dff(dataset_info, possible_urls, dff_options):
51
+ if possible_urls["user"] is not None:
52
+ dff_output_file = possible_urls["user"].file_path()
53
+ elif is_writeable(path.dirname(possible_urls["dataset"].file_path())):
54
+ dff_output_file = possible_urls["dataset"].file_path()
55
+ else:
56
+ dff_output_file = possible_urls["output"].file_path()
57
+
58
+ dataset_info.logger.info("Computing double flatfield")
59
+ dff = DoubleFlatFieldChunks(
60
+ None,
61
+ dff_output_file,
62
+ dataset_info=dataset_info,
63
+ chunk_size=dff_options.get("chunk_size", 100),
64
+ sigma=dff_options.get("dff_sigma", None),
65
+ do_flatfield=dff_options.get("do_flatfield", True),
66
+ logger=dataset_info.logger,
67
+ )
68
+ dff_image = dff.compute_double_flatfield()
69
+ return dff.write_double_flatfield(dff_image)
70
+
71
+
72
+ def check_existing_dff(dff_url, dff_options, logger):
73
+ # Check that the DFF exists at the given DataUrl, and that its configuration matches the wanted config
74
+ # Return the DFF file path
75
+ if not (data_url_exists(dff_url)):
76
+ raise ValueError("DFF file not found:", dff_url)
77
+
78
+ fname = dff_url.file_path()
79
+ entry = get_first_hdf5_entry(fname)
80
+ dff_file_options = h5todict(fname, path=entry + "/double_flatfield/configuration", asarray=False)
81
+
82
+ ff_file = dff_file_options.get("do_flatfield", True)
83
+ ff_user = dff_options.get("do_flatfield", True)
84
+ # Use "==" instead of "is" here, as h5todict() will return something like numpy.True_ instead of True
85
+ if ff_file != ff_user:
86
+ msg = "DFF was computed with flatfield=%s, but you asked flatfield=%s" % (ff_file, ff_user)
87
+ logger.error(msg)
88
+ return False
89
+
90
+ # Use this because h5todict() returns str("None") instead of None
91
+ def _correct_none(x):
92
+ if x in [None, "None"]:
93
+ return None
94
+ return x
95
+
96
+ sigma_file = _correct_none(dff_file_options.get("dff_sigma", None))
97
+ sigma_user = _correct_none(dff_options.get("dff_sigma", None))
98
+ if sigma_file != sigma_user:
99
+ msg = "DFF was computed with dff_sigma=%s, but you asked dff_sigma=%s" % (sigma_file, sigma_user)
100
+ logger.error(msg)
101
+ return False
102
+
103
+ return fname
104
+
105
+
106
+ # pylint: disable=E1136
107
+ def get_double_flatfield(dataset_info, mode, output_dir=None, darks_flats_dir=None, dff_options=None):
108
+ """
109
+ See nabu.resources.nxflatfield.update_dataset_info_flats_darks for the logic
110
+ """
111
+ if mode is False:
112
+ return
113
+ dff_options = dff_options or {}
114
+
115
+ possible_urls = get_possible_dff_urls(dataset_info, darks_flats_dir, output_dir)
116
+
117
+ if mode == "force-compute":
118
+ return compute_and_save_dff(dataset_info, possible_urls, dff_options)
119
+
120
+ def _can_load_from(folder_type):
121
+ if possible_urls.get(folder_type, None) is None:
122
+ return False
123
+ return data_url_exists(possible_urls[folder_type])
124
+
125
+ where_to_load_from = None
126
+ if possible_urls["user"] is not None and _can_load_from("user"):
127
+ where_to_load_from = "user"
128
+ elif _can_load_from("dataset"):
129
+ where_to_load_from = "dataset"
130
+ elif _can_load_from("output"):
131
+ where_to_load_from = "output"
132
+
133
+ if where_to_load_from is None:
134
+ if mode == "force-load":
135
+ raise ValueError("Could not load double-flatfield file (using 'force-load')")
136
+ else:
137
+ return compute_and_save_dff(dataset_info, possible_urls, dff_options)
138
+
139
+ fname = check_existing_dff(possible_urls[where_to_load_from], dff_options, dataset_info.logger)
140
+ if fname is False:
141
+ if mode == "force-load":
142
+ raise ValueError("Could not load double-flatfield file (using 'force-load'): wrong configuration")
143
+ return compute_and_save_dff(dataset_info, possible_urls, dff_options)
144
+ return fname
145
+
146
+ # One possible corner case: if mode == "force-load" and darks_flats_dir is not None (but the actual folder is empty)
147
+ # then nabu will load a DFF found elsewhere (if any). We might want to raise an error instead.
@@ -16,7 +16,7 @@ nabu_config = {
16
16
  "type": "advanced",
17
17
  },
18
18
  "nexus_version": {
19
- "default": "1.0",
19
+ "default": "1.4",
20
20
  "help": "Nexus version to use when browsing the HDF5 dataset. Default is 1.0.",
21
21
  "validator": float_validator,
22
22
  "type": "advanced",
@@ -110,10 +110,10 @@ nabu_config = {
110
110
  "validator": generic_options_validator,
111
111
  "type": "advanced",
112
112
  },
113
- "double_flatfield_enabled": {
113
+ "double_flatfield": {
114
114
  "default": "0",
115
- "help": "Whether to enable the 'double flat-field' filetering for correcting rings artefacts.",
116
- "validator": boolean_validator,
115
+ "help": "Whether to perform 'double flat-field' filtering (this can help to remove rings artefacts). Possible values:\n - 1 or True: enabled.\n - 0 or False: disabled\n - force-load: use an existing DFF file regardless of the dataset\n - force-compute: re-compute the DFF, ignore all existing .h5 files containing already computed DFF",
116
+ "validator": flatfield_enabled_validator,
117
117
  "type": "optional",
118
118
  },
119
119
  "dff_sigma": {
@@ -402,6 +402,12 @@ nabu_config = {
402
402
  "validator": nonnegative_integer_validator,
403
403
  "type": "advanced",
404
404
  },
405
+ "crop_filtered_data": {
406
+ "default": "1",
407
+ "help": "Whether to crop the data after the filtering step in FBP/FDK. This parameter should be always 1 unless you know what you are doing.",
408
+ "validator": boolean_validator,
409
+ "type": "advanced",
410
+ },
405
411
  "optim_algorithm": {
406
412
  "default": "chambolle-pock",
407
413
  "help": "Optimization algorithm for iterative methods",
@@ -606,6 +612,12 @@ renamed_keys = {
606
612
  "since": "2021.2.0",
607
613
  "message": "Option 'flatfield_enabled' has been renamed 'flatfield' in [preproc]",
608
614
  },
615
+ "double_flatfield_enabled": {
616
+ "section": "preproc",
617
+ "new_name": "double_flatfield",
618
+ "since": "2025.1.0",
619
+ "message": "Option 'double_flatfield_enabled' has been renamed 'double_flatfield' in [preproc]",
620
+ },
609
621
  "rotate_projections": {
610
622
  "section": "preproc",
611
623
  "new_name": "",