nabu 2022.3.0a1__py3-none-any.whl → 2023.1.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nabu/__init__.py +1 -1
- nabu/app/bootstrap.py +7 -1
- nabu/app/cast_volume.py +8 -2
- nabu/app/cli_configs.py +69 -0
- nabu/app/composite_cor.py +97 -0
- nabu/app/create_distortion_map_from_poly.py +118 -0
- nabu/app/nx_z_splitter.py +1 -1
- nabu/app/prepare_weights_double.py +21 -16
- nabu/app/reconstruct_helical.py +0 -1
- nabu/app/utils.py +10 -5
- nabu/cuda/processing.py +1 -0
- nabu/cuda/tests/test_padding.py +1 -0
- nabu/cuda/utils.py +1 -0
- nabu/distributed/__init__.py +0 -0
- nabu/distributed/utils.py +57 -0
- nabu/distributed/worker.py +543 -0
- nabu/estimation/cor.py +3 -7
- nabu/estimation/cor_sino.py +2 -1
- nabu/estimation/distortion.py +6 -4
- nabu/io/cast_volume.py +10 -1
- nabu/io/detector_distortion.py +305 -0
- nabu/io/reader.py +37 -7
- nabu/io/reader_helical.py +0 -3
- nabu/io/tests/test_cast_volume.py +16 -4
- nabu/io/tests/test_detector_distortion.py +178 -0
- nabu/io/tests/test_writers.py +2 -2
- nabu/io/tiffwriter_zmm.py +2 -3
- nabu/io/writer.py +84 -1
- nabu/io/writer_BACKUP_193259.py +556 -0
- nabu/io/writer_BACKUP_193381.py +556 -0
- nabu/io/writer_BASE_193259.py +548 -0
- nabu/io/writer_BASE_193381.py +548 -0
- nabu/io/writer_LOCAL_193259.py +550 -0
- nabu/io/writer_LOCAL_193381.py +550 -0
- nabu/io/writer_REMOTE_193259.py +557 -0
- nabu/io/writer_REMOTE_193381.py +557 -0
- nabu/misc/fourier_filters.py +2 -0
- nabu/misc/rotation.py +0 -1
- nabu/misc/tests/test_rotation.py +1 -0
- nabu/pipeline/config_validators.py +10 -0
- nabu/pipeline/datadump.py +1 -1
- nabu/pipeline/dataset_validator.py +0 -1
- nabu/pipeline/detector_distortion_provider.py +20 -0
- nabu/pipeline/estimators.py +35 -21
- nabu/pipeline/fallback_utils.py +1 -1
- nabu/pipeline/fullfield/chunked.py +30 -15
- nabu/pipeline/fullfield/chunked_black.py +881 -0
- nabu/pipeline/fullfield/chunked_cuda.py +34 -4
- nabu/pipeline/fullfield/chunked_fb.py +966 -0
- nabu/pipeline/fullfield/chunked_google.py +921 -0
- nabu/pipeline/fullfield/chunked_pep8.py +920 -0
- nabu/pipeline/fullfield/computations.py +7 -6
- nabu/pipeline/fullfield/dataset_validator.py +1 -1
- nabu/pipeline/fullfield/grouped_cuda.py +6 -0
- nabu/pipeline/fullfield/nabu_config.py +15 -3
- nabu/pipeline/fullfield/processconfig.py +5 -0
- nabu/pipeline/fullfield/reconstruction.py +1 -2
- nabu/pipeline/helical/gridded_accumulator.py +1 -8
- nabu/pipeline/helical/helical_chunked_regridded.py +48 -33
- nabu/pipeline/helical/helical_reconstruction.py +1 -9
- nabu/pipeline/helical/nabu_config.py +11 -14
- nabu/pipeline/helical/span_strategy.py +11 -4
- nabu/pipeline/helical/tests/test_accumulator.py +0 -3
- nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -6
- nabu/pipeline/helical/tests/test_strategy.py +0 -1
- nabu/pipeline/helical/weight_balancer.py +0 -1
- nabu/pipeline/params.py +4 -0
- nabu/pipeline/processconfig.py +6 -2
- nabu/pipeline/writer.py +9 -4
- nabu/preproc/distortion.py +4 -3
- nabu/preproc/double_flatfield.py +16 -4
- nabu/preproc/double_flatfield_cuda.py +3 -2
- nabu/preproc/double_flatfield_variable_region.py +13 -4
- nabu/preproc/flatfield.py +29 -7
- nabu/preproc/flatfield_cuda.py +0 -1
- nabu/preproc/flatfield_variable_region.py +5 -2
- nabu/preproc/phase.py +0 -1
- nabu/preproc/phase_cuda.py +0 -1
- nabu/preproc/tests/test_ctf.py +4 -3
- nabu/preproc/tests/test_flatfield.py +6 -7
- nabu/reconstruction/fbp_opencl.py +1 -1
- nabu/reconstruction/filtering.py +0 -1
- nabu/reconstruction/tests/test_fbp.py +1 -0
- nabu/resources/dataset_analyzer.py +0 -1
- nabu/resources/templates/bm05_pag.conf +34 -0
- nabu/resources/templates/id16_ctf.conf +2 -1
- nabu/resources/tests/test_nxflatfield.py +0 -1
- nabu/resources/tests/test_units.py +0 -1
- nabu/stitching/frame_composition.py +7 -1
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/METADATA +2 -7
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/RECORD +96 -75
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/WHEEL +1 -1
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/entry_points.txt +2 -1
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/LICENSE +0 -0
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/top_level.txt +0 -0
- {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/zip-safe +0 -0
nabu/pipeline/estimators.py
CHANGED
@@ -4,7 +4,7 @@ nabu.pipeline.estimators: helper classes/functions to estimate parameters of a d
|
|
4
4
|
"""
|
5
5
|
import inspect
|
6
6
|
import numpy as np
|
7
|
-
import scipy.fft
|
7
|
+
import scipy.fft # pylint: disable=E0611
|
8
8
|
from silx.io import get_data
|
9
9
|
from ..preproc.flatfield import FlatFieldDataUrls
|
10
10
|
from ..estimation.cor import (
|
@@ -26,6 +26,7 @@ from ..io.reader import ChunkReader
|
|
26
26
|
from ..preproc.ccd import Log, CCDFilter
|
27
27
|
from ..misc import fourier_filters
|
28
28
|
from .params import cor_methods
|
29
|
+
from ..io.reader import load_images_from_dataurl_dict
|
29
30
|
|
30
31
|
|
31
32
|
def estimate_cor(method, dataset_info, do_flatfield=True, cor_options_str=None, logger=None):
|
@@ -97,15 +98,17 @@ class CORFinderBase:
|
|
97
98
|
|
98
99
|
def _init_cor_finder(self, method, cor_options):
|
99
100
|
self.method = method
|
100
|
-
|
101
|
+
if not isinstance(cor_options, (type(None), dict)):
|
102
|
+
raise TypeError(
|
103
|
+
f"cor_options is expected to be an optional instance of dict. Get {cor_options} ({type(cor_options)}) instead"
|
104
|
+
)
|
105
|
+
self.cor_options = cor_options or {}
|
101
106
|
|
102
107
|
cor_class = self.search_methods[method]["class"]
|
103
108
|
self.cor_finder = cor_class(logger=self.logger)
|
104
109
|
|
105
|
-
|
106
|
-
lookup_side = cor_options.get("side",
|
107
|
-
if self.dataset_info.is_halftomo:
|
108
|
-
lookup_side = "right"
|
110
|
+
default_lookup_side = "right" if self.dataset_info.is_halftomo else "center"
|
111
|
+
lookup_side = self.cor_options.get("side", default_lookup_side)
|
109
112
|
|
110
113
|
self.cor_exec_args = []
|
111
114
|
self.cor_exec_args.extend(self.search_methods[method].get("default_args", []))
|
@@ -366,7 +369,6 @@ class CompositeCORFinder:
|
|
366
369
|
spike_threshold=0.04,
|
367
370
|
logger=None,
|
368
371
|
):
|
369
|
-
|
370
372
|
self.dataset_info = dataset_info
|
371
373
|
self.logger = LoggerOrPrint(logger)
|
372
374
|
|
@@ -432,15 +434,22 @@ class CompositeCORFinder:
|
|
432
434
|
|
433
435
|
self.absolute_indices = sorted(self.dataset_info.projections.keys())
|
434
436
|
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
self.
|
439
|
-
|
440
|
-
|
441
|
-
|
442
|
-
|
437
|
+
my_flats = load_images_from_dataurl_dict(self.dataset_info.flats)
|
438
|
+
|
439
|
+
if my_flats is not None and len(list(my_flats.keys())):
|
440
|
+
self.use_flat = True
|
441
|
+
self.flatfield = FlatFieldDataUrls(
|
442
|
+
(len(self.absolute_indices), self.sy, self.sx),
|
443
|
+
self.dataset_info.flats,
|
444
|
+
self.dataset_info.darks,
|
445
|
+
radios_indices=self.absolute_indices,
|
446
|
+
dtype=np.float64,
|
447
|
+
)
|
448
|
+
else:
|
449
|
+
self.use_flat = False
|
450
|
+
|
443
451
|
self.sx, self.sy = self.dataset_info.radio_dims
|
452
|
+
self.mlog = Log((1,) + (self.sy, self.sx), clip_min=1e-6, clip_max=10.0)
|
444
453
|
self.rcor_abs = round(self.sx / 2.0)
|
445
454
|
self.cor_acc = round(self.sx / 2.0)
|
446
455
|
|
@@ -483,9 +492,15 @@ class CompositeCORFinder:
|
|
483
492
|
if self.dataset_info.is_halftomo:
|
484
493
|
default_dict["side"] = "right"
|
485
494
|
|
486
|
-
if cor_options is None or cor_options
|
495
|
+
if cor_options is None or cor_options == "":
|
487
496
|
cor_options = {}
|
488
|
-
|
497
|
+
if isinstance(cor_options, str):
|
498
|
+
try:
|
499
|
+
cor_options = extract_parameters(cor_options, sep=";")
|
500
|
+
except Exception as exc:
|
501
|
+
msg = "Could not extract parameters from cor_options: %s" % (str(exc))
|
502
|
+
self.logger.fatal(msg)
|
503
|
+
raise ValueError(msg)
|
489
504
|
default_dict.update(cor_options)
|
490
505
|
cor_options = default_dict
|
491
506
|
|
@@ -496,7 +511,8 @@ class CompositeCORFinder:
|
|
496
511
|
radio_dataset_idx = image_num
|
497
512
|
data_url = self.dataset_info.projections[radio_dataset_idx]
|
498
513
|
radio = get_data(data_url).astype(np.float64)
|
499
|
-
self.
|
514
|
+
if self.use_flat:
|
515
|
+
self.flatfield.normalize_single_radio(radio, radio_dataset_idx, dtype=radio.dtype)
|
500
516
|
if self.take_log:
|
501
517
|
self.mlog.take_logarithm(radio)
|
502
518
|
|
@@ -588,7 +604,6 @@ class CompositeCORFinder:
|
|
588
604
|
overlap_max = min(2 * ovsd_sx - 4, 2 * ovsd_sx - self.ovs * self.ovs * self.high_pass * 3)
|
589
605
|
|
590
606
|
elif self.cor_options["side"] == "near":
|
591
|
-
|
592
607
|
near_pos = self.cor_options["near_pos"]
|
593
608
|
near_width = self.cor_options["near_width"]
|
594
609
|
|
@@ -644,12 +659,11 @@ class CompositeCORFinder:
|
|
644
659
|
best_value = min_value
|
645
660
|
self.logger.debug(
|
646
661
|
"testing an overlap of %.2f pixels, actual best overlap is %.2f pixels over %d\r"
|
647
|
-
% (z / self.ovs, best_overlap / self.ovs, ovsd_sx),
|
662
|
+
% (z / self.ovs, best_overlap / self.ovs, ovsd_sx / self.ovs),
|
648
663
|
)
|
649
664
|
|
650
665
|
offset = (ovsd_sx - best_overlap) / self.ovs / 2
|
651
666
|
cor_abs = (self.sx - 1) / 2 + offset
|
652
|
-
self.logger.info("Found an optimal cor at %e (absolute)\n" % cor_abs)
|
653
667
|
|
654
668
|
return cor_abs
|
655
669
|
|
nabu/pipeline/fallback_utils.py
CHANGED
@@ -129,7 +129,7 @@ class WriterConfigurator:
|
|
129
129
|
raise ValueError(err)
|
130
130
|
|
131
131
|
def _init_separate_histogram_writer(self, hist_entry):
|
132
|
-
hist_fname = path.join(self.output_dir, "histogram_%
|
132
|
+
hist_fname = path.join(self.output_dir, "histogram_%06d.hdf5" % self.start_index)
|
133
133
|
self.histogram_writer = LegacyNXProcessWriter(
|
134
134
|
hist_fname,
|
135
135
|
entry=hist_entry,
|
@@ -22,6 +22,7 @@ from ...misc.histogram import PartialHistogram, hist_as_2Darray
|
|
22
22
|
from ..utils import use_options, pipeline_step, get_subregion
|
23
23
|
from ..datadump import DataDumpManager
|
24
24
|
from ..writer import WriterManager
|
25
|
+
from ..detector_distortion_provider import DetectorDistortionProvider
|
25
26
|
|
26
27
|
# For now we don't have a plain python/numpy backend for reconstruction
|
27
28
|
try:
|
@@ -61,7 +62,9 @@ class ChunkedPipeline:
|
|
61
62
|
# The first stage will skip these steps, and the second stage will do these stages after merging sinograms.
|
62
63
|
_reconstruction_steps = ["sino_rings_correction", "reconstruction", "save", "histogram"]
|
63
64
|
|
64
|
-
def __init__(
|
65
|
+
def __init__(
|
66
|
+
self, process_config, chunk_shape, margin=None, logger=None, use_grouped_mode=False, extra_options=None
|
67
|
+
):
|
65
68
|
"""
|
66
69
|
Initialize a "Chunked" pipeline.
|
67
70
|
|
@@ -96,15 +99,15 @@ class ChunkedPipeline:
|
|
96
99
|
of the volume).
|
97
100
|
"""
|
98
101
|
self.logger = LoggerOrPrint(logger)
|
99
|
-
self._set_params(process_config, chunk_shape, extra_options, margin)
|
102
|
+
self._set_params(process_config, chunk_shape, extra_options, margin, use_grouped_mode)
|
100
103
|
self._init_pipeline()
|
101
104
|
|
102
|
-
def _set_params(self, process_config, chunk_shape, extra_options, margin):
|
105
|
+
def _set_params(self, process_config, chunk_shape, extra_options, margin, use_grouped_mode):
|
103
106
|
self.process_config = process_config
|
104
107
|
self.dataset_info = self.process_config.dataset_info
|
105
108
|
self.processing_steps = self.process_config.processing_steps.copy()
|
106
109
|
self.processing_options = self.process_config.processing_options
|
107
|
-
self._set_chunk_shape(chunk_shape)
|
110
|
+
self._set_chunk_shape(chunk_shape, use_grouped_mode)
|
108
111
|
self.set_subregion(None)
|
109
112
|
self._set_margin(margin)
|
110
113
|
self._set_extra_options(extra_options)
|
@@ -112,7 +115,7 @@ class ChunkedPipeline:
|
|
112
115
|
self._steps_name2component = {}
|
113
116
|
self._steps_component2name = {}
|
114
117
|
|
115
|
-
def _set_chunk_shape(self, chunk_shape):
|
118
|
+
def _set_chunk_shape(self, chunk_shape, use_grouped_mode):
|
116
119
|
if len(chunk_shape) != 3:
|
117
120
|
raise ValueError("Expected chunk_shape to be a tuple of length 3 in the form (n_z, n_y, n_x)")
|
118
121
|
self.chunk_shape = tuple(int(c) for c in chunk_shape) # cast to int, as numpy.int64 can make pycuda crash
|
@@ -126,7 +129,7 @@ class ChunkedPipeline:
|
|
126
129
|
self.n_angles = self.radios_shape[0]
|
127
130
|
self.n_slices = self.radios_shape[1]
|
128
131
|
self._grouped_processing = False
|
129
|
-
if self.chunk_shape[0] < len(self.process_config.rotation_angles(subsampling=False)):
|
132
|
+
if use_grouped_mode or self.chunk_shape[0] < len(self.process_config.rotation_angles(subsampling=False)):
|
130
133
|
# TODO allow a certain tolerance in this case ?
|
131
134
|
# Reconstruction is still possible (albeit less accurate) if delta is small
|
132
135
|
self._grouped_processing = True
|
@@ -302,12 +305,21 @@ class ChunkedPipeline:
|
|
302
305
|
process_file = options.get("process_file", None)
|
303
306
|
if process_file is None:
|
304
307
|
# Standard case - start pipeline from raw data
|
305
|
-
|
308
|
+
if self.process_config.nabu_config["preproc"]["detector_distortion_correction"] is None:
|
309
|
+
self.detector_corrector = None
|
310
|
+
else:
|
311
|
+
self.detector_corrector = DetectorDistortionProvider(
|
312
|
+
detector_full_shape_vh=self.process_config.dataset_info.radio_dims[::-1],
|
313
|
+
correction_type=self.process_config.nabu_config["preproc"]["detector_distortion_correction"],
|
314
|
+
options=self.process_config.nabu_config["preproc"]["detector_distortion_correction_options"],
|
315
|
+
)
|
316
|
+
# ChunkReader always take a non-subsampled dictionary "files".
|
306
317
|
self.chunk_reader = ChunkReader(
|
307
318
|
self._read_options["files"],
|
308
319
|
sub_region=self.sub_region_xz,
|
309
320
|
data_buffer=self.radios,
|
310
321
|
pre_allocate=False,
|
322
|
+
detector_corrector=self.detector_corrector,
|
311
323
|
convert_float=True,
|
312
324
|
binning=options["binning"],
|
313
325
|
dataset_subsampling=options["dataset_subsampling"],
|
@@ -356,10 +368,15 @@ class ChunkedPipeline:
|
|
356
368
|
# Use chunk_reader.files instead of process_config.projs_indices(subsampling=True), because
|
357
369
|
# chunk_reader might read only a subset of the files (in "grouped mode")
|
358
370
|
self._ff_options["projs_indices"] = list(self.chunk_reader.files_subsampled.keys())
|
371
|
+
if self._ff_options.get("normalize_srcurrent", False):
|
372
|
+
a_start_idx, a_end_idx = self.sub_region[0]
|
373
|
+
subs = self.process_config.subsampling_factor
|
374
|
+
self._ff_options["radios_srcurrent"] = self._ff_options["radios_srcurrent"][a_start_idx:a_end_idx:subs]
|
359
375
|
|
360
376
|
distortion_correction = None
|
361
377
|
if self._ff_options["do_flat_distortion"]:
|
362
378
|
self.logger.info("Flats distortion correction will be applied")
|
379
|
+
self.FlatFieldClass = FlatFieldDataUrls # no GPU implementation available, force this backend
|
363
380
|
estimation_kwargs = {}
|
364
381
|
estimation_kwargs.update(self._ff_options["flat_distortion_params"])
|
365
382
|
estimation_kwargs["logger"] = self.logger
|
@@ -376,6 +393,7 @@ class ChunkedPipeline:
|
|
376
393
|
interpolation="linear",
|
377
394
|
distortion_correction=distortion_correction,
|
378
395
|
sub_region=self.sub_region_xz,
|
396
|
+
detector_corrector=self.detector_corrector,
|
379
397
|
binning=self._ff_options["binning"],
|
380
398
|
radios_srcurrent=self._ff_options["radios_srcurrent"],
|
381
399
|
flats_srcurrent=self._ff_options["flats_srcurrent"],
|
@@ -401,6 +419,7 @@ class ChunkedPipeline:
|
|
401
419
|
self.radios_shape,
|
402
420
|
result_url=result_url,
|
403
421
|
sub_region=self.sub_region_xz,
|
422
|
+
detector_corrector=self.detector_corrector,
|
404
423
|
input_is_mlog=False,
|
405
424
|
output_is_mlog=False,
|
406
425
|
average_is_on_log=avg_is_on_log,
|
@@ -518,7 +537,7 @@ class ChunkedPipeline:
|
|
518
537
|
self.reconstruction = self.FBPClass(
|
519
538
|
self.sinos_shape[1:],
|
520
539
|
angles=options["angles"],
|
521
|
-
rot_center=options["
|
540
|
+
rot_center=options["fbp_rotation_axis_position"],
|
522
541
|
filter_name=options["fbp_filter_type"],
|
523
542
|
slice_roi=self.process_config.rec_roi,
|
524
543
|
padding_mode=options["padding_type"],
|
@@ -573,13 +592,13 @@ class ChunkedPipeline:
|
|
573
592
|
# "processing_options": self.processing_options,
|
574
593
|
#
|
575
594
|
"nabu_config": self.process_config.nabu_config,
|
576
|
-
"entry": getattr(self.dataset_info.dataset_scanner, "entry",
|
595
|
+
"entry": getattr(self.dataset_info.dataset_scanner, "entry", "entry"),
|
577
596
|
}
|
578
597
|
writer_extra_options = {
|
579
598
|
"jpeg2000_compression_ratio": options["jpeg2000_compression_ratio"],
|
580
599
|
"float_clip_values": options["float_clip_values"],
|
581
600
|
"tiff_single_file": options.get("tiff_single_file", False),
|
582
|
-
"
|
601
|
+
"single_output_file_initialized": getattr(self.process_config, "single_output_file_initialized", False),
|
583
602
|
}
|
584
603
|
self.writer = WriterManager(
|
585
604
|
options["location"],
|
@@ -592,10 +611,6 @@ class ChunkedPipeline:
|
|
592
611
|
histogram=("histogram" in self.processing_steps),
|
593
612
|
extra_options=writer_extra_options,
|
594
613
|
)
|
595
|
-
if options.get("tiff_single_file", False) and not getattr(
|
596
|
-
self.process_config, "single_tiff_initialized", False
|
597
|
-
):
|
598
|
-
self.process_config.single_tiff_initialized = True
|
599
614
|
|
600
615
|
#
|
601
616
|
# Pipeline execution
|
@@ -728,7 +743,7 @@ class ChunkedPipeline:
|
|
728
743
|
self.writer.write_data(data)
|
729
744
|
self.logger.info("Wrote %s" % self.writer.fname)
|
730
745
|
self._write_histogram()
|
731
|
-
self.process_config.
|
746
|
+
self.process_config.single_output_file_initialized = True
|
732
747
|
|
733
748
|
def _write_histogram(self):
|
734
749
|
if "histogram" not in self.processing_steps:
|