nabu 2022.3.0a1__py3-none-any.whl → 2023.1.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nabu/__init__.py +1 -1
- nabu/app/bootstrap.py +7 -1
- nabu/app/cast_volume.py +8 -2
- nabu/app/cli_configs.py +69 -0
- nabu/app/composite_cor.py +97 -0
- nabu/app/create_distortion_map_from_poly.py +118 -0
- nabu/app/nx_z_splitter.py +1 -1
- nabu/app/prepare_weights_double.py +21 -16
- nabu/app/reconstruct_helical.py +0 -1
- nabu/app/utils.py +10 -5
- nabu/cuda/processing.py +1 -0
- nabu/cuda/tests/test_padding.py +1 -0
- nabu/cuda/utils.py +1 -0
- nabu/distributed/__init__.py +0 -0
- nabu/distributed/utils.py +57 -0
- nabu/distributed/worker.py +543 -0
- nabu/estimation/cor.py +3 -7
- nabu/estimation/cor_sino.py +2 -1
- nabu/estimation/distortion.py +6 -4
- nabu/io/cast_volume.py +10 -1
- nabu/io/detector_distortion.py +305 -0
- nabu/io/reader.py +37 -7
- nabu/io/reader_helical.py +0 -3
- nabu/io/tests/test_cast_volume.py +16 -4
- nabu/io/tests/test_detector_distortion.py +178 -0
- nabu/io/tests/test_writers.py +2 -2
- nabu/io/tiffwriter_zmm.py +2 -3
- nabu/io/writer.py +84 -1
- nabu/io/writer_BACKUP_193259.py +556 -0
- nabu/io/writer_BACKUP_193381.py +556 -0
- nabu/io/writer_BASE_193259.py +548 -0
- nabu/io/writer_BASE_193381.py +548 -0
- nabu/io/writer_LOCAL_193259.py +550 -0
- nabu/io/writer_LOCAL_193381.py +550 -0
- nabu/io/writer_REMOTE_193259.py +557 -0
- nabu/io/writer_REMOTE_193381.py +557 -0
- nabu/misc/fourier_filters.py +2 -0
- nabu/misc/rotation.py +0 -1
- nabu/misc/tests/test_rotation.py +1 -0
- nabu/pipeline/config_validators.py +10 -0
- nabu/pipeline/datadump.py +1 -1
- nabu/pipeline/dataset_validator.py +0 -1
- nabu/pipeline/detector_distortion_provider.py +20 -0
- nabu/pipeline/estimators.py +35 -21
- nabu/pipeline/fallback_utils.py +1 -1
- nabu/pipeline/fullfield/chunked.py +30 -15
- nabu/pipeline/fullfield/chunked_black.py +881 -0
- nabu/pipeline/fullfield/chunked_cuda.py +34 -4
- nabu/pipeline/fullfield/chunked_fb.py +966 -0
- nabu/pipeline/fullfield/chunked_google.py +921 -0
- nabu/pipeline/fullfield/chunked_pep8.py +920 -0
- nabu/pipeline/fullfield/computations.py +7 -6
- nabu/pipeline/fullfield/dataset_validator.py +1 -1
- nabu/pipeline/fullfield/grouped_cuda.py +6 -0
- nabu/pipeline/fullfield/nabu_config.py +15 -3
- nabu/pipeline/fullfield/processconfig.py +5 -0
- nabu/pipeline/fullfield/reconstruction.py +1 -2
- nabu/pipeline/helical/gridded_accumulator.py +1 -8
- nabu/pipeline/helical/helical_chunked_regridded.py +48 -33
- nabu/pipeline/helical/helical_reconstruction.py +1 -9
- nabu/pipeline/helical/nabu_config.py +11 -14
- nabu/pipeline/helical/span_strategy.py +11 -4
- nabu/pipeline/helical/tests/test_accumulator.py +0 -3
- nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -6
- nabu/pipeline/helical/tests/test_strategy.py +0 -1
- nabu/pipeline/helical/weight_balancer.py +0 -1
- nabu/pipeline/params.py +4 -0
- nabu/pipeline/processconfig.py +6 -2
- nabu/pipeline/writer.py +9 -4
- nabu/preproc/distortion.py +4 -3
- nabu/preproc/double_flatfield.py +16 -4
- nabu/preproc/double_flatfield_cuda.py +3 -2
- nabu/preproc/double_flatfield_variable_region.py +13 -4
- nabu/preproc/flatfield.py +29 -7
- nabu/preproc/flatfield_cuda.py +0 -1
- nabu/preproc/flatfield_variable_region.py +5 -2
- nabu/preproc/phase.py +0 -1
- nabu/preproc/phase_cuda.py +0 -1
- nabu/preproc/tests/test_ctf.py +4 -3
- nabu/preproc/tests/test_flatfield.py +6 -7
- nabu/reconstruction/fbp_opencl.py +1 -1
- nabu/reconstruction/filtering.py +0 -1
- nabu/reconstruction/tests/test_fbp.py +1 -0
- nabu/resources/dataset_analyzer.py +0 -1
- nabu/resources/templates/bm05_pag.conf +34 -0
- nabu/resources/templates/id16_ctf.conf +2 -1
- nabu/resources/tests/test_nxflatfield.py +0 -1
- nabu/resources/tests/test_units.py +0 -1
- nabu/stitching/frame_composition.py +7 -1
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/METADATA +2 -7
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/RECORD +96 -75
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/WHEEL +1 -1
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/entry_points.txt +2 -1
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/LICENSE +0 -0
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/top_level.txt +0 -0
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/zip-safe +0 -0
@@ -3,7 +3,7 @@ from silx.image.tomography import get_next_power
|
|
3
3
|
from ...utils import check_supported
|
4
4
|
|
5
5
|
|
6
|
-
def estimate_required_memory(process_config, delta_z=None, delta_a=None, max_mem_allocation_GB=None):
|
6
|
+
def estimate_required_memory(process_config, delta_z=None, delta_a=None, max_mem_allocation_GB=None, debug=False):
|
7
7
|
"""
|
8
8
|
Estimate the memory (RAM) needed for a reconstruction.
|
9
9
|
|
@@ -117,16 +117,17 @@ def estimate_required_memory(process_config, delta_z=None, delta_a=None, max_mem
|
|
117
117
|
# That's big!
|
118
118
|
total_memory_needed += 2 * data_volume_size
|
119
119
|
|
120
|
-
|
121
|
-
|
122
|
-
|
123
|
-
|
120
|
+
if debug:
|
121
|
+
print(
|
122
|
+
"Mem for (delta_z=%s, delta_a=%s) ==> (Na=%d, Nz=%d, Nx=%d) : %.3f GB"
|
123
|
+
% (delta_z, delta_a, Na, Nz, Nx, total_memory_needed / 1e9)
|
124
|
+
)
|
124
125
|
|
125
126
|
return total_memory_needed
|
126
127
|
|
127
128
|
|
128
129
|
def estimate_max_chunk_size(
|
129
|
-
available_memory_GB, process_config, pipeline_part="
|
130
|
+
available_memory_GB, process_config, pipeline_part="all", n_rows=None, step=10, max_mem_allocation_GB=None
|
130
131
|
):
|
131
132
|
"""
|
132
133
|
Estimate the maximum size of the data chunk that can be loaded in memory.
|
@@ -32,7 +32,7 @@ class FullFieldDatasetValidator(DatasetValidatorBase):
|
|
32
32
|
if rec_params["enable_halftomo"]:
|
33
33
|
ny, nx = self._get_nx_ny()
|
34
34
|
what = (("start_x", "end_x", nx), ("start_y", "end_y", nx), ("start_z", "end_z", nz))
|
35
|
-
for
|
35
|
+
for start_name, end_name, numels in what:
|
36
36
|
self._check_start_end_idx(
|
37
37
|
rec_params[start_name], rec_params[end_name], numels, start_name=start_name, end_name=end_name
|
38
38
|
)
|
@@ -101,6 +101,8 @@ class CudaGroupedPipeline(GroupedPipeline):
|
|
101
101
|
self._d_radios.get(ary=self._h_radios)
|
102
102
|
self.radios = self._h_radios
|
103
103
|
super()._write_data(data=self.radios)
|
104
|
+
self.process_config.single_tiff_initialized = False
|
105
|
+
self.process_config.hst_vol_initialized = False
|
104
106
|
|
105
107
|
|
106
108
|
|
@@ -138,3 +140,7 @@ class CudaSinoStackPipeline(SinoStackPipeline):
|
|
138
140
|
def _write_data(self, data=None):
|
139
141
|
recs = self._d_recs.get() # not ideal - use self.recs ?
|
140
142
|
super()._write_data(data=recs)
|
143
|
+
# ?!
|
144
|
+
self.process_config.single_tiff_initialized = True
|
145
|
+
self.process_config.hst_vol_initialized = True
|
146
|
+
#
|
@@ -94,6 +94,18 @@ nabu_config = {
|
|
94
94
|
"validator": float_validator,
|
95
95
|
"type": "optional",
|
96
96
|
},
|
97
|
+
"detector_distortion_correction": {
|
98
|
+
"default": "",
|
99
|
+
"help": "Apply coordinate transformation on the raw data, at the reading stage. Default (empty) is None. Available are: None, identity(for testing the pipeline), map_xz. This latter method requires two URLs being passet by detector_distortion_correction_options: map_x and map_z pointing to two 2D arrays containing the position where each pixel can be interpolated at in the raw data",
|
100
|
+
"validator": detector_distortion_correction_validator,
|
101
|
+
"type": "advanced",
|
102
|
+
},
|
103
|
+
"detector_distortion_correction_options": {
|
104
|
+
"default": "",
|
105
|
+
"help": "Options for sinogram rings correction methods. The parameters are separated by commas and passed as 'name=value', for example: center_xz=(1000,100); angle_deg=5. Mind the semicolon separator (;).",
|
106
|
+
"validator": generic_options_validator,
|
107
|
+
"type": "advanced",
|
108
|
+
},
|
97
109
|
"double_flatfield_enabled": {
|
98
110
|
"default": "0",
|
99
111
|
"help": "Whether to enable the 'double flat-field' filetering for correcting rings artefacts.",
|
@@ -151,7 +163,7 @@ nabu_config = {
|
|
151
163
|
"sino_rings_options": {
|
152
164
|
"default": "sigma=1.0 ; levels=10",
|
153
165
|
"help": "Options for sinogram rings correction methods. The parameters are separated by commas and passed as 'name=value', for example: sigma=1.0;levels=10. Mind the semicolon separator (;).",
|
154
|
-
"validator":
|
166
|
+
"validator": generic_options_validator,
|
155
167
|
"type": "advanced",
|
156
168
|
},
|
157
169
|
"rotate_projections": {
|
@@ -251,7 +263,7 @@ nabu_config = {
|
|
251
263
|
"cor_options": {
|
252
264
|
"default": "",
|
253
265
|
"help": "Options for methods finding automatically the rotation axis position. The parameters are separated by commas and passed as 'name=value', for example: low_pass=1; high_pass=20. Mind the semicolon separator (;).",
|
254
|
-
"validator":
|
266
|
+
"validator": generic_options_validator,
|
255
267
|
"type": "advanced",
|
256
268
|
},
|
257
269
|
"cor_slice": {
|
@@ -408,7 +420,7 @@ nabu_config = {
|
|
408
420
|
},
|
409
421
|
"file_format": {
|
410
422
|
"default": "hdf5",
|
411
|
-
"help": "Output file format. Available are: hdf5, tiff, jp2, edf",
|
423
|
+
"help": "Output file format. Available are: hdf5, tiff, jp2, edf, vol",
|
412
424
|
"validator": output_file_format_validator,
|
413
425
|
"type": "optional",
|
414
426
|
},
|
@@ -560,6 +560,7 @@ class ProcessConfig(ProcessConfigBase):
|
|
560
560
|
)
|
561
561
|
rec_options = options["reconstruction"]
|
562
562
|
rec_options["rotation_axis_position"] = self.rotation_axis_position(binning=True)
|
563
|
+
rec_options["fbp_rotation_axis_position"] = rec_options["rotation_axis_position"]
|
563
564
|
rec_options["enable_halftomo"] = self.do_halftomo
|
564
565
|
rec_options["axis_correction"] = dataset_info.axis_correction
|
565
566
|
if dataset_info.axis_correction is not None:
|
@@ -572,6 +573,10 @@ class ProcessConfig(ProcessConfigBase):
|
|
572
573
|
# TODO improve halftomo handling
|
573
574
|
if self.do_halftomo:
|
574
575
|
rec_options["angles"] = rec_options["angles"][: (rec_options["angles"].size + 1) // 2]
|
576
|
+
if rec_options["rotation_axis_position"] < (self.radio_shape(binning=True)[-1] - 1) / 2.0:
|
577
|
+
rec_options["fbp_rotation_axis_position"] = (
|
578
|
+
self.radio_shape(binning=True)[-1] - rec_options["rotation_axis_position"]
|
579
|
+
)
|
575
580
|
# ---
|
576
581
|
# New key
|
577
582
|
rec_options["cor_estimated_auto"] = isinstance(nabu_config["reconstruction"]["rotation_axis_position"], str)
|
@@ -496,13 +496,11 @@ class FullFieldReconstructor:
|
|
496
496
|
)
|
497
497
|
|
498
498
|
def _get_chunk_length(self, task):
|
499
|
-
|
500
499
|
if self._pipeline_mode == "helical":
|
501
500
|
(start_z, end_z) = task["sub_region"]
|
502
501
|
return end_z - start_z
|
503
502
|
|
504
503
|
else:
|
505
|
-
|
506
504
|
(start_angle, end_angle), (start_z, end_z), _ = task["sub_region"]
|
507
505
|
|
508
506
|
if self._pipeline_mode == "chunked":
|
@@ -524,6 +522,7 @@ class FullFieldReconstructor:
|
|
524
522
|
kwargs = {}
|
525
523
|
if self.backend == "cuda":
|
526
524
|
kwargs["cuda_options"] = self.cuda_options
|
525
|
+
kwargs["use_grouped_mode"] = self._pipeline_mode == "grouped"
|
527
526
|
pipeline = self._pipeline_cls(*args, logger=self.logger, margin=task["margin"], **kwargs)
|
528
527
|
self.pipeline = pipeline
|
529
528
|
|
@@ -167,7 +167,6 @@ class GriddedAccumulator:
|
|
167
167
|
for i0, epsilon, i1, data, weight, original_angle in zip(
|
168
168
|
my_i0, my_epsilon, my_i1, radios_subset, radios_weights_subset, chunk_info.angles_rad[subchunk_slice]
|
169
169
|
):
|
170
|
-
|
171
170
|
data_token = data * weight
|
172
171
|
self.gridded_radios[i0] += data_token * (1 - epsilon)
|
173
172
|
self.gridded_radios[i1] += data_token * epsilon
|
@@ -186,7 +185,6 @@ class GriddedAccumulator:
|
|
186
185
|
# is coming from another turn
|
187
186
|
safe_angular_margin = 3.14 / 10
|
188
187
|
for i_diag in range(2):
|
189
|
-
|
190
188
|
if original_angle < self.diagnostic_angles[i_diag] + safe_angular_margin:
|
191
189
|
# we are searching for the first contributions ( the one at the lowest angle)
|
192
190
|
# for the two diagnostics. With the constraint that the second is at an higher angle
|
@@ -197,7 +195,6 @@ class GriddedAccumulator:
|
|
197
195
|
self.diagnostic_angles[i_diag] = original_angle
|
198
196
|
|
199
197
|
if abs(original_angle - self.diagnostic_angles[i_diag]) < safe_angular_margin:
|
200
|
-
|
201
198
|
if i0 == 0:
|
202
199
|
factor = 1 - epsilon
|
203
200
|
else:
|
@@ -224,13 +221,11 @@ class GriddedAccumulator:
|
|
224
221
|
self.floating_subregion = None, None, floating_start_z, floating_end_z
|
225
222
|
|
226
223
|
def _extract_preprocess_with_flats(self, data_raw, reframing_infos, chunk_info, output, it_is_weight=False):
|
227
|
-
|
228
224
|
if not it_is_weight:
|
229
225
|
if self.dark is not None:
|
230
226
|
data_raw = data_raw - self.dark[reframing_infos.dtasrc_start_z : reframing_infos.dtasrc_end_z]
|
231
227
|
|
232
228
|
if self.flats is not None:
|
233
|
-
|
234
229
|
for i, idx in enumerate(reframing_infos.subchunk_file_indexes):
|
235
230
|
flat = self._get_flat(idx, slice(reframing_infos.dtasrc_start_z, reframing_infos.dtasrc_end_z))
|
236
231
|
if self.dark is not None:
|
@@ -249,7 +244,7 @@ class GriddedAccumulator:
|
|
249
244
|
else:
|
250
245
|
take_data_from_this = data_raw
|
251
246
|
|
252
|
-
for
|
247
|
+
for data_read, list_subr_start_z, list_subr_end_z, fract_shift, x_shift, data_target in zip(
|
253
248
|
take_data_from_this,
|
254
249
|
reframing_infos.subr_start_z_list,
|
255
250
|
reframing_infos.subr_end_z_list,
|
@@ -257,7 +252,6 @@ class GriddedAccumulator:
|
|
257
252
|
reframing_infos.x_shifts_list,
|
258
253
|
output,
|
259
254
|
):
|
260
|
-
|
261
255
|
_fill_in_chunk_by_shift_crop_data(
|
262
256
|
data_target,
|
263
257
|
data_read,
|
@@ -299,7 +293,6 @@ def _fill_in_chunk_by_shift_crop_data(
|
|
299
293
|
x_shift=0.0,
|
300
294
|
extension_padding=True,
|
301
295
|
):
|
302
|
-
|
303
296
|
data_read_precisely_shifted = nd.shift(data_read, (-fract_shift, x_shift), order=1, mode="nearest")[:-1]
|
304
297
|
|
305
298
|
target_central_slicer, dtasrc_central_slicer = overlap_logic(
|
@@ -22,6 +22,10 @@ from ...reconstruction.sinogram import SinoBuilder, SinoNormalization
|
|
22
22
|
from ...misc.unsharp import UnsharpMask
|
23
23
|
from ...misc.histogram import PartialHistogram, hist_as_2Darray
|
24
24
|
from ..utils import use_options, pipeline_step
|
25
|
+
|
26
|
+
from ...resources.utils import extract_parameters
|
27
|
+
from ..detector_distortion_provider import DetectorDistortionProvider
|
28
|
+
|
25
29
|
from .utils import (
|
26
30
|
WriterConfiguratorHelical as WriterConfigurator,
|
27
31
|
) # .utils is the same as ..utils but internally we retouch the key associated to "tiffwriter" of Writers to
|
@@ -120,7 +124,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
120
124
|
self._init_pipeline()
|
121
125
|
|
122
126
|
def _set_params(self, process_config, sub_region, extra_options, phase_margin):
|
123
|
-
|
124
127
|
self.process_config = process_config
|
125
128
|
self.dataset_info = self.process_config.dataset_info
|
126
129
|
|
@@ -131,6 +134,9 @@ class HelicalChunkedRegriddedPipeline:
|
|
131
134
|
|
132
135
|
self.chunk_size = sub_region[-1] - sub_region[-2]
|
133
136
|
self.radios_buffer = None
|
137
|
+
|
138
|
+
self._set_detector_distortion_correction()
|
139
|
+
|
134
140
|
self.set_subregion(sub_region)
|
135
141
|
|
136
142
|
self._set_phase_margin(phase_margin)
|
@@ -240,7 +246,7 @@ class HelicalChunkedRegriddedPipeline:
|
|
240
246
|
fname, ext = path.splitext(fname_full)
|
241
247
|
dirname, file_prefix = path.split(fname)
|
242
248
|
output_dir = path.join(dirname, file_prefix)
|
243
|
-
file_prefix += str("_%
|
249
|
+
file_prefix += str("_%06d" % self._get_image_start_index())
|
244
250
|
|
245
251
|
self.logger.info("omitting config in data_dump because of too slow nexus writer ")
|
246
252
|
|
@@ -474,7 +480,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
474
480
|
#
|
475
481
|
|
476
482
|
def _init_pipeline(self):
|
477
|
-
|
478
483
|
self._get_size_of_a_raw_radio()
|
479
484
|
|
480
485
|
self._init_reader()
|
@@ -498,8 +503,17 @@ class HelicalChunkedRegriddedPipeline:
|
|
498
503
|
|
499
504
|
self._configure_regular_accumulator()
|
500
505
|
|
501
|
-
def
|
506
|
+
def _set_detector_distortion_correction(self):
|
507
|
+
if self.process_config.nabu_config["preproc"]["detector_distortion_correction"] is None:
|
508
|
+
self.detector_corrector = None
|
509
|
+
else:
|
510
|
+
self.detector_corrector = DetectorDistortionProvider(
|
511
|
+
detector_full_shape_vh=self.process_config.dataset_info.radio_dims[::-1],
|
512
|
+
correction_type=self.process_config.nabu_config["preproc"]["detector_distortion_correction"],
|
513
|
+
options=self.process_config.nabu_config["preproc"]["detector_distortion_correction_options"],
|
514
|
+
)
|
502
515
|
|
516
|
+
def _configure_regular_accumulator(self):
|
503
517
|
accumulator_cls = gridded_accumulator.GriddedAccumulator
|
504
518
|
|
505
519
|
self.regular_accumulator = accumulator_cls(
|
@@ -549,7 +563,8 @@ class HelicalChunkedRegriddedPipeline:
|
|
549
563
|
# dummy initialisation, it will be _set_subregion'ed and set_data_buffer'ed in the loops
|
550
564
|
self.chunk_reader = ChunkReaderHelical(
|
551
565
|
options["files"],
|
552
|
-
sub_region=
|
566
|
+
sub_region=None, # setting of subregion will be already done by calls to set_subregion
|
567
|
+
detector_corrector=self.detector_corrector,
|
553
568
|
convert_float=True,
|
554
569
|
binning=options["binning"],
|
555
570
|
dataset_subsampling=options["dataset_subsampling"],
|
@@ -584,6 +599,7 @@ class HelicalChunkedRegriddedPipeline:
|
|
584
599
|
distortion_correction=distortion_correction,
|
585
600
|
radios_srcurrent=options["radios_srcurrent"],
|
586
601
|
flats_srcurrent=options["flats_srcurrent"],
|
602
|
+
detector_corrector=self.detector_corrector,
|
587
603
|
## every flat will be read at a different heigth
|
588
604
|
### sub_region=self.sub_region,
|
589
605
|
binning=options["binning"],
|
@@ -608,17 +624,19 @@ class HelicalChunkedRegriddedPipeline:
|
|
608
624
|
self.double_flatfield = None
|
609
625
|
|
610
626
|
if options["processes_file"] not in (None, ""):
|
611
|
-
|
612
627
|
file_path = options["processes_file"]
|
613
628
|
data_path = (self.dataset_info.hdf5_entry or "entry") + "/double_flatfield/results/data"
|
614
629
|
|
615
630
|
if path.exists(file_path) and (data_path in h5py.File(file_path, "r")):
|
616
|
-
|
617
631
|
result_url = DataUrl(file_path=file_path, data_path=data_path)
|
618
632
|
self.logger.info("Loading double flatfield from %s" % result_url.file_path())
|
619
633
|
|
620
634
|
self.double_flatfield = self.DoubleFlatFieldClass(
|
621
|
-
self._get_shape("double_flatfield"),
|
635
|
+
self._get_shape("double_flatfield"),
|
636
|
+
result_url=result_url,
|
637
|
+
binning_x=binning_x,
|
638
|
+
binning_z=binning_z,
|
639
|
+
detector_corrector=self.detector_corrector,
|
622
640
|
)
|
623
641
|
|
624
642
|
def _init_weights_field(self):
|
@@ -630,21 +648,18 @@ class HelicalChunkedRegriddedPipeline:
|
|
630
648
|
self.weights_field = None
|
631
649
|
|
632
650
|
if options["processes_file"] not in (None, ""):
|
633
|
-
|
634
651
|
file_path = options["processes_file"]
|
635
652
|
data_path = (self.dataset_info.hdf5_entry or "entry") + "/weights_field/results/data"
|
636
653
|
|
637
654
|
if path.exists(file_path) and (data_path in h5py.File(file_path, "r")):
|
638
|
-
|
639
655
|
result_url = DataUrl(file_path=file_path, data_path=data_path)
|
640
|
-
self.logger.info("Loading
|
656
|
+
self.logger.info("Loading weights_field from %s" % result_url.file_path())
|
641
657
|
|
642
658
|
self.weights_field = self.DoubleFlatFieldClass(
|
643
659
|
self._get_shape("double_flatfield"), result_url=result_url, binning_x=binning_x, binning_z=binning_z
|
644
660
|
)
|
645
661
|
|
646
662
|
def _init_ccd_corrections(self):
|
647
|
-
|
648
663
|
if "ccd_correction" not in self.processing_steps:
|
649
664
|
return
|
650
665
|
|
@@ -690,11 +705,11 @@ class HelicalChunkedRegriddedPipeline:
|
|
690
705
|
self.unsharp_coeff = 0.0
|
691
706
|
self.unsharp_method = "log"
|
692
707
|
else:
|
708
|
+
options = self.processing_options["unsharp_mask"]
|
693
709
|
self.unsharp_sigma = options["unsharp_sigma"]
|
694
710
|
self.unsharp_coeff = options["unsharp_coeff"]
|
695
711
|
self.unsharp_method = options["unsharp_method"]
|
696
712
|
|
697
|
-
options = self.processing_options["unsharp_mask"]
|
698
713
|
self.unsharp_mask = self.UnsharpMaskClass(
|
699
714
|
self._get_shape("unsharp_mask"),
|
700
715
|
options["unsharp_sigma"],
|
@@ -755,9 +770,7 @@ class HelicalChunkedRegriddedPipeline:
|
|
755
770
|
start_y, end_y, start_x, end_x = self._rec_roi
|
756
771
|
|
757
772
|
if self.HBPClass is not None:
|
758
|
-
|
759
773
|
fan_source_distance_meters = self.process_config.nabu_config["reconstruction"]["fan_source_distance_meters"]
|
760
|
-
fan_voxels_size_micron = self.process_config.nabu_config["reconstruction"]["fan_voxels_size_micron"]
|
761
774
|
|
762
775
|
self.reconstruction_hbp = self.HBPClass(
|
763
776
|
self._get_shape("one_sino_slim"),
|
@@ -766,7 +779,7 @@ class HelicalChunkedRegriddedPipeline:
|
|
766
779
|
rot_center=rot_center,
|
767
780
|
extra_options={"axis_correction": np.zeros(self.radios.shape[0], "f")},
|
768
781
|
axis_source_meters=fan_source_distance_meters,
|
769
|
-
voxel_size_microns=
|
782
|
+
voxel_size_microns=options["pixel_size_cm"] * 1.0e4,
|
770
783
|
scale_factor=1.0 / options["pixel_size_cm"],
|
771
784
|
)
|
772
785
|
|
@@ -785,6 +798,7 @@ class HelicalChunkedRegriddedPipeline:
|
|
785
798
|
extra_options={
|
786
799
|
"scale_factor": 1.0 / options["pixel_size_cm"],
|
787
800
|
"axis_correction": np.zeros(self.radios.shape[0], "f"),
|
801
|
+
"clip_outer_circle": options["clip_outer_circle"],
|
788
802
|
}, # "padding_mode": options["padding_type"], },
|
789
803
|
)
|
790
804
|
|
@@ -823,7 +837,7 @@ class HelicalChunkedRegriddedPipeline:
|
|
823
837
|
|
824
838
|
if self._hdf5_output:
|
825
839
|
fname_start_index = None
|
826
|
-
file_prefix += str("_%
|
840
|
+
file_prefix += str("_%06d" % self._get_slice_start_index())
|
827
841
|
entry = getattr(self.dataset_info.dataset_scanner, "entry", None)
|
828
842
|
nx_info = {
|
829
843
|
"process_name": self._get_process_name(),
|
@@ -861,7 +875,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
861
875
|
return t
|
862
876
|
|
863
877
|
def _expand_slice(self, subchunk_slice):
|
864
|
-
|
865
878
|
start, stop, step = subchunk_slice.start, subchunk_slice.stop, subchunk_slice.step
|
866
879
|
if step is None:
|
867
880
|
step = 1
|
@@ -901,7 +914,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
901
914
|
data_raw = self.chunk_reader.data[: len(my_indexes)]
|
902
915
|
|
903
916
|
if (self.flatfield is not None) or (self.double_flatfield is not None):
|
904
|
-
|
905
917
|
sub_regions_per_radio = [self.trimmed_floating_subregion] * len(my_indexes)
|
906
918
|
|
907
919
|
if self.flatfield is not None:
|
@@ -920,7 +932,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
920
932
|
for data_read, list_subr_start_z, list_subr_end_z, fract_shit, x_shift, data_target in zip(
|
921
933
|
data_raw, subr_start_z_list, subr_end_z_list, fract_complement_shifts_v, x_shifts_list, output
|
922
934
|
):
|
923
|
-
|
924
935
|
_fill_in_chunk_by_shift_crop_data(
|
925
936
|
data_target,
|
926
937
|
data_read,
|
@@ -933,7 +944,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
933
944
|
)
|
934
945
|
|
935
946
|
def _read_data_and_apply_flats(self, sub_total_prange_slice, subchunk_slice, chunk_info):
|
936
|
-
|
937
947
|
my_integer_shifts_v = chunk_info.integer_shift_v[subchunk_slice]
|
938
948
|
fract_complement_shifts_v = chunk_info.fract_complement_to_integer_shift_v[subchunk_slice]
|
939
949
|
x_shifts_list = chunk_info.x_pix_per_proj[subchunk_slice]
|
@@ -1015,7 +1025,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1015
1025
|
radios[i][:] = _tmp_radio[:]
|
1016
1026
|
|
1017
1027
|
def _retrieve_phase(self):
|
1018
|
-
|
1019
1028
|
if "unsharp_mask" in self.processing_steps:
|
1020
1029
|
for i in range(self.gridded_radios.shape[0]):
|
1021
1030
|
self.gridded_radios[i] = self.phase_retrieval.apply_filter(self.gridded_radios[i])
|
@@ -1024,7 +1033,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1024
1033
|
self.radios[i] = self.phase_retrieval.apply_filter(self.gridded_radios[i])
|
1025
1034
|
|
1026
1035
|
def _nophase_put_to_radios(self, target, source):
|
1027
|
-
|
1028
1036
|
((up_margin, down_margin), (left_margin, right_margin)) = self.phase_margin
|
1029
1037
|
|
1030
1038
|
zslice = slice(up_margin or None, -down_margin or None)
|
@@ -1034,7 +1042,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1034
1042
|
target[i] = source[i][zslice, xslice]
|
1035
1043
|
|
1036
1044
|
def _apply_unsharp():
|
1037
|
-
|
1038
1045
|
((up_margin, down_margin), (left_margin, right_margin)) = self._phase_margin
|
1039
1046
|
|
1040
1047
|
zslice = slice(up_margin or None, -down_margin or None)
|
@@ -1075,7 +1082,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1075
1082
|
self.sinos = self.radios_slim
|
1076
1083
|
|
1077
1084
|
def _reconstruct(self, sinos=None, chunk_info=None, i_slice=0):
|
1078
|
-
|
1079
1085
|
if sinos is None:
|
1080
1086
|
sinos = self.sinos
|
1081
1087
|
|
@@ -1115,7 +1121,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1115
1121
|
data = self.recs_stack
|
1116
1122
|
my_kw_args = copy.copy(self._writer_exec_kwargs)
|
1117
1123
|
if "config" in my_kw_args:
|
1118
|
-
|
1119
1124
|
self.logger.info("omitting config in writer because of too slow nexus writer ")
|
1120
1125
|
my_kw_args["config"] = {"test": counter[0]}
|
1121
1126
|
counter[0] += 1
|
@@ -1148,13 +1153,29 @@ class HelicalChunkedRegriddedPipeline:
|
|
1148
1153
|
writer.write_data(data)
|
1149
1154
|
|
1150
1155
|
def balance_weights(self):
|
1151
|
-
|
1152
1156
|
options = self.processing_options["reconstruction"]
|
1153
1157
|
|
1154
1158
|
rot_center = options["rotation_axis_position"]
|
1155
1159
|
|
1156
1160
|
self.radios_weights[:] = rebalance(self.radios_weights, self.my_angles_rad, rot_center)
|
1157
1161
|
|
1162
|
+
# When standard scans are incomplete, due to motors errors, some angular range
|
1163
|
+
# is missing short of 360 degrees.
|
1164
|
+
# The weight accounting correctly deal with it, but still the padding
|
1165
|
+
# procedure with theta+180 data may fall on empty data
|
1166
|
+
# and this may cause problems, coming from the ramp filter,
|
1167
|
+
# in half tomo.
|
1168
|
+
# To correct this we complete with what we have at hand from the nearest
|
1169
|
+
# non empty data
|
1170
|
+
#
|
1171
|
+
to_be_filled = []
|
1172
|
+
for i in range(len(self.radios_weights) - 1, 0, -1):
|
1173
|
+
if self.radios_weights[i].sum():
|
1174
|
+
break
|
1175
|
+
to_be_filled.append(i)
|
1176
|
+
for i in to_be_filled:
|
1177
|
+
self.radios[i] = self.radios[to_be_filled[-1] - 1]
|
1178
|
+
|
1158
1179
|
def _post_primary_data_reduction(self, i_slice):
|
1159
1180
|
"""This will be used in the derived class to transfer data to gpu"""
|
1160
1181
|
self.radios_slim[:] = self.radios[:, i_slice, :]
|
@@ -1200,7 +1221,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1200
1221
|
self.gridded_radios[:] = 0
|
1201
1222
|
|
1202
1223
|
for pnum_start, pnum_end in zip(pnum_start_list, pnum_end_list):
|
1203
|
-
|
1204
1224
|
start_in_chunk = pnum_start - my_first_pnum
|
1205
1225
|
end_in_chunk = pnum_end - my_first_pnum
|
1206
1226
|
|
@@ -1252,7 +1272,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1252
1272
|
self._dump_sinogram()
|
1253
1273
|
|
1254
1274
|
if "reconstruction" in self.processing_steps:
|
1255
|
-
|
1256
1275
|
for i_slice in range(num_slices):
|
1257
1276
|
self._post_primary_data_reduction(i_slice) # charge on self.radios_slim
|
1258
1277
|
|
@@ -1273,7 +1292,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1273
1292
|
n_provided_angles = self.radios_slim.shape[0]
|
1274
1293
|
|
1275
1294
|
for first_angle_index in range(0, n_provided_angles, self.num_weight_radios_per_app):
|
1276
|
-
|
1277
1295
|
end_angle_index = min(n_provided_angles, first_angle_index + self.num_weight_radios_per_app)
|
1278
1296
|
self._d_radios_weights[: end_angle_index - first_angle_index].set(
|
1279
1297
|
self.radios_weights[first_angle_index:end_angle_index, i_slice]
|
@@ -1367,7 +1385,6 @@ class HelicalChunkedRegriddedPipeline:
|
|
1367
1385
|
# ---------------
|
1368
1386
|
reconstructed_volume_size = 0
|
1369
1387
|
if "reconstruction" in processing_steps:
|
1370
|
-
|
1371
1388
|
## radios_slim is used to process one slice at once, It will be on the gpu
|
1372
1389
|
## and cannot be reduced further, therefore no need to estimate it.
|
1373
1390
|
## Either it passes or it does not.
|
@@ -1445,7 +1462,6 @@ def _fill_in_chunk_by_shift_crop_data(
|
|
1445
1462
|
x_shift=0.0,
|
1446
1463
|
extension_padding=True,
|
1447
1464
|
):
|
1448
|
-
|
1449
1465
|
"""given a freshly read cube of data, it dispatches every slice to its proper vertical position and proper radio by shifting, cropping, and extending if necessary"""
|
1450
1466
|
data_read_precisely_shifted = nd.interpolation.shift(data_read, (-fract_shit, x_shift), order=1, mode="nearest")[
|
1451
1467
|
:-1
|
@@ -1509,7 +1525,6 @@ def rebalance(radios_weights, angles, ax_pos):
|
|
1509
1525
|
angle = angles[i]
|
1510
1526
|
|
1511
1527
|
for i_half_turn in range(-n_span - 1, n_span + 2):
|
1512
|
-
|
1513
1528
|
if i_half_turn == 0:
|
1514
1529
|
w_res[:] += radios_weights[i]
|
1515
1530
|
continue
|
@@ -108,7 +108,6 @@ class HelicalReconstructorRegridded:
|
|
108
108
|
z_max_mm = rec_cfg["end_z_mm"]
|
109
109
|
|
110
110
|
if z_min_mm != 0.0 or z_max_mm != 0.0:
|
111
|
-
|
112
111
|
z_min_mm += self.z_offset_mm
|
113
112
|
z_max_mm += self.z_offset_mm
|
114
113
|
|
@@ -171,7 +170,6 @@ class HelicalReconstructorRegridded:
|
|
171
170
|
|
172
171
|
# Gpu required memory size does not depend on the number of slices
|
173
172
|
def _compute_max_chunk_size(self):
|
174
|
-
|
175
173
|
cpu_mem = self.resources["mem_avail_GB"] * self.cpu_mem_fraction
|
176
174
|
|
177
175
|
user_max_chunk_size = self.extra_options["max_chunk_size"]
|
@@ -230,7 +228,6 @@ class HelicalReconstructorRegridded:
|
|
230
228
|
|
231
229
|
# different because of dry_run
|
232
230
|
def _build_tasks(self):
|
233
|
-
|
234
231
|
if self.dry_run:
|
235
232
|
self.tasks = []
|
236
233
|
else:
|
@@ -261,7 +258,6 @@ class HelicalReconstructorRegridded:
|
|
261
258
|
my_z_min = z_start
|
262
259
|
my_z_end = z_end
|
263
260
|
else:
|
264
|
-
|
265
261
|
if self.z_max <= self.z_min:
|
266
262
|
message = f"""" The input file provide start_z end_z {self.z_min,self.z_max}
|
267
263
|
but it is necessary that start_z < end_z
|
@@ -413,11 +409,7 @@ class HelicalReconstructorRegridded:
|
|
413
409
|
self.logger.info(f"Creating SpanStrategy object for helical in {duration} seconds")
|
414
410
|
if self.dry_run:
|
415
411
|
info_string = self._span_info.get_informative_string()
|
416
|
-
print("
|
417
|
-
print(info_string)
|
418
|
-
|
419
|
-
info_string = self._span_info.get_informative_string()
|
420
|
-
print(" SPAN_INFO informations ")
|
412
|
+
print(" Informations about the doable vertical span")
|
421
413
|
print(info_string)
|
422
414
|
return
|
423
415
|
|
@@ -1,5 +1,5 @@
|
|
1
1
|
from ..fullfield.nabu_config import *
|
2
|
-
|
2
|
+
import copy
|
3
3
|
|
4
4
|
## keep the text below for future inclusion in the documentation
|
5
5
|
# start_z, end_z , start_z_mm, end_z_mm
|
@@ -33,13 +33,16 @@ help_start_end_z = """ If both start_z_mm and end_z_mm are seto to zero, then st
|
|
33
33
|
otherwhise the slices whose height above the sample stage, in millimiters, between start_z_mm and end_z_mm are reconstructed
|
34
34
|
"""
|
35
35
|
|
36
|
+
# we need to deepcopy this in order not to mess the original nabu_config of the full-field pipeline
|
37
|
+
nabu_config = copy.deepcopy(nabu_config)
|
36
38
|
|
37
39
|
nabu_config["preproc"]["processes_file"] = {
|
38
40
|
"default": "",
|
39
|
-
"help": "Path
|
41
|
+
"help": "Path tgo the file where some operations should be stored for later use. By default it is 'xxx_nabu_processes.h5'",
|
40
42
|
"validator": optional_file_location_validator,
|
41
|
-
"type": "
|
43
|
+
"type": "required",
|
42
44
|
}
|
45
|
+
nabu_config["preproc"]["double_flatfield_enabled"]["default"] = 1
|
43
46
|
|
44
47
|
|
45
48
|
nabu_config["reconstruction"].update(
|
@@ -92,32 +95,26 @@ nabu_config["reconstruction"].update(
|
|
92
95
|
"validator": float_validator,
|
93
96
|
"type": "optional",
|
94
97
|
},
|
95
|
-
"fan_voxels_size_micron": {
|
96
|
-
"default": 1.0,
|
97
|
-
"help": "For HBP, for the description of the fan geometry, the size of the voxel. Defaults to a small value which implies parallel geometry",
|
98
|
-
"validator": float_validator,
|
99
|
-
"type": "optional",
|
100
|
-
},
|
101
98
|
"start_z_mm": {
|
102
|
-
"default": "",
|
99
|
+
"default": "0",
|
103
100
|
"help": help_start_end_z,
|
104
101
|
"validator": float_validator,
|
105
102
|
"type": "optional",
|
106
103
|
},
|
107
104
|
"end_z_mm": {
|
108
|
-
"default": "",
|
105
|
+
"default": "0",
|
109
106
|
"help": " To determine the reconstructed vertical range: the height in millimiters above the stage below which slices are reconstructed ",
|
110
107
|
"validator": float_validator,
|
111
108
|
"type": "optional",
|
112
109
|
},
|
113
110
|
"start_z": {
|
114
|
-
"default": "",
|
111
|
+
"default": "0",
|
115
112
|
"help": "the first slice of the reconstructed range. Numbered going in the direction of the scan and starting with number zero for the first doable slice",
|
116
113
|
"validator": slice_num_validator,
|
117
114
|
"type": "optional",
|
118
115
|
},
|
119
116
|
"end_z": {
|
120
|
-
"default": "",
|
117
|
+
"default": "-1",
|
121
118
|
"help": "the "
|
122
119
|
"end"
|
123
120
|
" slice of the reconstructed range. Numbered going in the direction of the scan and starting with number zero for the first doable slice",
|
@@ -139,7 +136,7 @@ nabu_config["pipeline"].update(
|
|
139
136
|
nabu_config["reconstruction"].update(
|
140
137
|
{
|
141
138
|
"angular_tolerance_steps": {
|
142
|
-
"default": "
|
139
|
+
"default": "3.0",
|
143
140
|
"help": "the angular tolerance, an angular width expressed in units of an angular step, which is tolerated in the criteria for deciding if a slice is reconstructable or not",
|
144
141
|
"validator": float_validator,
|
145
142
|
"type": "advanced",
|