nabu 2023.2.1__py3-none-any.whl → 2024.1.0rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doc/conf.py +1 -1
- doc/doc_config.py +32 -0
- nabu/__init__.py +2 -1
- nabu/app/bootstrap_stitching.py +1 -1
- nabu/app/cli_configs.py +122 -2
- nabu/app/composite_cor.py +27 -2
- nabu/app/correct_rot.py +70 -0
- nabu/app/create_distortion_map_from_poly.py +42 -18
- nabu/app/diag_to_pix.py +358 -0
- nabu/app/diag_to_rot.py +449 -0
- nabu/app/generate_header.py +4 -3
- nabu/app/histogram.py +2 -2
- nabu/app/multicor.py +6 -1
- nabu/app/parse_reconstruction_log.py +151 -0
- nabu/app/prepare_weights_double.py +83 -22
- nabu/app/reconstruct.py +5 -1
- nabu/app/reconstruct_helical.py +7 -0
- nabu/app/reduce_dark_flat.py +6 -3
- nabu/app/rotate.py +4 -4
- nabu/app/stitching.py +16 -2
- nabu/app/tests/test_reduce_dark_flat.py +18 -2
- nabu/app/validator.py +4 -4
- nabu/cuda/convolution.py +8 -376
- nabu/cuda/fft.py +4 -0
- nabu/cuda/kernel.py +4 -4
- nabu/cuda/medfilt.py +5 -158
- nabu/cuda/padding.py +5 -71
- nabu/cuda/processing.py +23 -2
- nabu/cuda/src/ElementOp.cu +78 -0
- nabu/cuda/src/backproj.cu +28 -2
- nabu/cuda/src/fourier_wavelets.cu +2 -2
- nabu/cuda/src/normalization.cu +23 -0
- nabu/cuda/src/padding.cu +2 -2
- nabu/cuda/src/transpose.cu +16 -0
- nabu/cuda/utils.py +39 -0
- nabu/estimation/alignment.py +10 -1
- nabu/estimation/cor.py +808 -38
- nabu/estimation/cor_sino.py +7 -9
- nabu/estimation/tests/test_cor.py +85 -3
- nabu/io/reader.py +26 -18
- nabu/io/tests/test_cast_volume.py +3 -3
- nabu/io/tests/test_detector_distortion.py +3 -3
- nabu/io/tiffwriter_zmm.py +2 -2
- nabu/io/utils.py +14 -4
- nabu/io/writer.py +5 -3
- nabu/misc/fftshift.py +6 -0
- nabu/misc/histogram.py +5 -285
- nabu/misc/histogram_cuda.py +8 -104
- nabu/misc/kernel_base.py +3 -121
- nabu/misc/padding_base.py +5 -69
- nabu/misc/processing_base.py +3 -107
- nabu/misc/rotation.py +5 -62
- nabu/misc/rotation_cuda.py +5 -65
- nabu/misc/transpose.py +6 -0
- nabu/misc/unsharp.py +3 -78
- nabu/misc/unsharp_cuda.py +5 -52
- nabu/misc/unsharp_opencl.py +8 -85
- nabu/opencl/fft.py +6 -0
- nabu/opencl/kernel.py +21 -6
- nabu/opencl/padding.py +5 -72
- nabu/opencl/processing.py +27 -5
- nabu/opencl/src/backproj.cl +3 -3
- nabu/opencl/src/fftshift.cl +65 -12
- nabu/opencl/src/padding.cl +2 -2
- nabu/opencl/src/roll.cl +96 -0
- nabu/opencl/src/transpose.cl +16 -0
- nabu/pipeline/config_validators.py +63 -3
- nabu/pipeline/dataset_validator.py +2 -2
- nabu/pipeline/estimators.py +193 -35
- nabu/pipeline/fullfield/chunked.py +34 -17
- nabu/pipeline/fullfield/chunked_cuda.py +7 -5
- nabu/pipeline/fullfield/computations.py +48 -13
- nabu/pipeline/fullfield/nabu_config.py +13 -13
- nabu/pipeline/fullfield/processconfig.py +10 -5
- nabu/pipeline/fullfield/reconstruction.py +1 -2
- nabu/pipeline/helical/fbp.py +5 -0
- nabu/pipeline/helical/filtering.py +12 -9
- nabu/pipeline/helical/gridded_accumulator.py +179 -33
- nabu/pipeline/helical/helical_chunked_regridded.py +262 -151
- nabu/pipeline/helical/helical_chunked_regridded_cuda.py +4 -11
- nabu/pipeline/helical/helical_reconstruction.py +56 -18
- nabu/pipeline/helical/span_strategy.py +1 -1
- nabu/pipeline/helical/tests/test_accumulator.py +4 -0
- nabu/pipeline/params.py +23 -2
- nabu/pipeline/processconfig.py +3 -8
- nabu/pipeline/tests/test_chunk_reader.py +78 -0
- nabu/pipeline/tests/test_estimators.py +120 -2
- nabu/pipeline/utils.py +25 -0
- nabu/pipeline/writer.py +2 -0
- nabu/preproc/ccd_cuda.py +9 -7
- nabu/preproc/ctf.py +21 -26
- nabu/preproc/ctf_cuda.py +25 -25
- nabu/preproc/double_flatfield.py +14 -2
- nabu/preproc/double_flatfield_cuda.py +7 -11
- nabu/preproc/flatfield_cuda.py +23 -27
- nabu/preproc/phase.py +19 -24
- nabu/preproc/phase_cuda.py +21 -21
- nabu/preproc/shift_cuda.py +58 -28
- nabu/preproc/tests/test_ctf.py +5 -5
- nabu/preproc/tests/test_double_flatfield.py +2 -2
- nabu/preproc/tests/test_vshift.py +13 -2
- nabu/processing/__init__.py +0 -0
- nabu/processing/convolution_cuda.py +375 -0
- nabu/processing/fft_base.py +163 -0
- nabu/processing/fft_cuda.py +256 -0
- nabu/processing/fft_opencl.py +54 -0
- nabu/processing/fftshift.py +134 -0
- nabu/processing/histogram.py +286 -0
- nabu/processing/histogram_cuda.py +103 -0
- nabu/processing/kernel_base.py +126 -0
- nabu/processing/medfilt_cuda.py +159 -0
- nabu/processing/muladd.py +29 -0
- nabu/processing/muladd_cuda.py +68 -0
- nabu/processing/padding_base.py +71 -0
- nabu/processing/padding_cuda.py +75 -0
- nabu/processing/padding_opencl.py +77 -0
- nabu/processing/processing_base.py +123 -0
- nabu/processing/roll_opencl.py +64 -0
- nabu/processing/rotation.py +63 -0
- nabu/processing/rotation_cuda.py +66 -0
- nabu/processing/tests/__init__.py +0 -0
- nabu/processing/tests/test_fft.py +268 -0
- nabu/processing/tests/test_fftshift.py +71 -0
- nabu/{misc → processing}/tests/test_histogram.py +2 -4
- nabu/{cuda → processing}/tests/test_medfilt.py +1 -1
- nabu/processing/tests/test_muladd.py +54 -0
- nabu/{cuda → processing}/tests/test_padding.py +119 -75
- nabu/processing/tests/test_roll.py +63 -0
- nabu/{misc → processing}/tests/test_rotation.py +3 -2
- nabu/processing/tests/test_transpose.py +72 -0
- nabu/{misc → processing}/tests/test_unsharp.py +41 -8
- nabu/processing/transpose.py +126 -0
- nabu/processing/unsharp.py +79 -0
- nabu/processing/unsharp_cuda.py +53 -0
- nabu/processing/unsharp_opencl.py +75 -0
- nabu/reconstruction/fbp.py +34 -10
- nabu/reconstruction/fbp_base.py +35 -16
- nabu/reconstruction/fbp_opencl.py +7 -12
- nabu/reconstruction/filtering.py +2 -2
- nabu/reconstruction/filtering_cuda.py +13 -14
- nabu/reconstruction/filtering_opencl.py +3 -4
- nabu/reconstruction/projection.py +2 -0
- nabu/reconstruction/rings.py +158 -1
- nabu/reconstruction/rings_cuda.py +218 -58
- nabu/reconstruction/sinogram_cuda.py +16 -12
- nabu/reconstruction/tests/test_deringer.py +116 -14
- nabu/reconstruction/tests/test_fbp.py +22 -31
- nabu/reconstruction/tests/test_filtering.py +11 -2
- nabu/resources/dataset_analyzer.py +89 -26
- nabu/resources/nxflatfield.py +2 -2
- nabu/resources/tests/test_nxflatfield.py +1 -1
- nabu/resources/utils.py +9 -2
- nabu/stitching/alignment.py +184 -0
- nabu/stitching/config.py +241 -39
- nabu/stitching/definitions.py +6 -0
- nabu/stitching/frame_composition.py +4 -2
- nabu/stitching/overlap.py +99 -3
- nabu/stitching/sample_normalization.py +60 -0
- nabu/stitching/slurm_utils.py +10 -10
- nabu/stitching/tests/test_alignment.py +99 -0
- nabu/stitching/tests/test_config.py +16 -1
- nabu/stitching/tests/test_overlap.py +68 -2
- nabu/stitching/tests/test_sample_normalization.py +49 -0
- nabu/stitching/tests/test_slurm_utils.py +5 -5
- nabu/stitching/tests/test_utils.py +3 -33
- nabu/stitching/tests/test_z_stitching.py +391 -22
- nabu/stitching/utils.py +144 -202
- nabu/stitching/z_stitching.py +309 -126
- nabu/testutils.py +18 -0
- nabu/thirdparty/tomocupy_remove_stripe.py +586 -0
- nabu/utils.py +32 -6
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/LICENSE +1 -1
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/METADATA +5 -5
- nabu-2024.1.0rc3.dist-info/RECORD +296 -0
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/WHEEL +1 -1
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/entry_points.txt +5 -1
- nabu/conftest.py +0 -14
- nabu/opencl/fftshift.py +0 -92
- nabu/opencl/tests/test_fftshift.py +0 -55
- nabu/opencl/tests/test_padding.py +0 -84
- nabu-2023.2.1.dist-info/RECORD +0 -252
- /nabu/cuda/src/{fftshift.cu → dfi_fftshift.cu} +0 -0
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/top_level.txt +0 -0
nabu/app/diag_to_rot.py
ADDED
@@ -0,0 +1,449 @@
|
|
1
|
+
from .. import version
|
2
|
+
from os import environ
|
3
|
+
|
4
|
+
import argparse
|
5
|
+
import shutil
|
6
|
+
import os
|
7
|
+
import sys
|
8
|
+
import re
|
9
|
+
import h5py
|
10
|
+
import numpy as np
|
11
|
+
|
12
|
+
|
13
|
+
from ..resources.logger import LoggerOrPrint
|
14
|
+
from .utils import parse_params_values
|
15
|
+
from .cli_configs import DiagToRotConfig
|
16
|
+
|
17
|
+
from ..utils import get_available_threads
|
18
|
+
|
19
|
+
from nxtomo.application.nxtomo import NXtomo
|
20
|
+
|
21
|
+
from multiprocessing import Pool
|
22
|
+
|
23
|
+
import h5py
|
24
|
+
from nabu.utils import DictToObj
|
25
|
+
from nabu.pipeline.estimators import oversample
|
26
|
+
from scipy.ndimage import gaussian_filter
|
27
|
+
from silx.io.dictdump import h5todict
|
28
|
+
|
29
|
+
|
30
|
+
"""
|
31
|
+
The operations here below rely on diag objects which are found in the result of a nab-helical run with the diag_zpro_run set to a number > 0
|
32
|
+
They are found in the configuration section of the nabu output, in several sequential dataset, with hdf5 dataset keys
|
33
|
+
which are 0,1,2.... corresponding to all the z-windows ( chunck) for which we have collected contributions at different angles
|
34
|
+
which are nothing else but pieces of ready to use preprocessed radio , and this for different angles.
|
35
|
+
In other words redundant contributions conccurring at the same prepocessed radiography but comming from different moment
|
36
|
+
of the helical scan are kept separate.
|
37
|
+
By forming pairs theta, theta +180
|
38
|
+
These Pairs should coincide on an overlapping region after flipping one,
|
39
|
+
where they both have signal ( part of them can be dark if the detector is out of view above or below)
|
40
|
+
For each key there is a sequence of radio, the corresponding sequence of weights map, the corresponding z translation, and angles
|
41
|
+
|
42
|
+
The number passed to diag_zpro_run object, is >1, and is interpreted by the diagnostic collection run
|
43
|
+
as the number of wished collected angles between 0 and 180. Lets call it num_0_180
|
44
|
+
|
45
|
+
The collection is not done here. Here we exploit the result of a previous collection to deduce, looking at the correlations, the cor
|
46
|
+
|
47
|
+
An example of collection is this :
|
48
|
+
|
49
|
+
|_____ diagnostics
|
50
|
+
| |
|
51
|
+
|__ 0
|
52
|
+
| |_ radios (4*num_0_180, chunky, chunkx)
|
53
|
+
| |
|
54
|
+
| |_ weights (4*num_0_180, chunky, chunkx)
|
55
|
+
| |
|
56
|
+
| |_ angles ( 4*num_0_180,)
|
57
|
+
|
|
58
|
+
|_ searched_rad ( 2*num_0_180,) these are all the searched angles between 0 and 360 in radians
|
59
|
+
|
|
60
|
+
|_ zmm_trans ( 4*num_0_180,) the z translation in mm
|
61
|
+
|
|
62
|
+
|_ zpix_transl ( 4*num_0_180,) the z translation in pix
|
63
|
+
|
|
64
|
+
|_ pixes_size_mm scalar
|
65
|
+
|
66
|
+
|
67
|
+
Here we follow the evolution of the rotation angle along the scan.
|
68
|
+
The final result can be left in its detailed form, giving the found cor at every analysed scan position,
|
69
|
+
or the result of the interpolation, giving the cor at the two extremal position of z_translation.
|
70
|
+
|
71
|
+
"""
|
72
|
+
|
73
|
+
|
74
|
+
def transform_images(diag, args):
|
75
|
+
"""
|
76
|
+
Filter and transform the radios and the weights.
|
77
|
+
|
78
|
+
Filter the radios, and oversample them along the horizontal line.
|
79
|
+
The method in general is similar to the composite cor finding.
|
80
|
+
Several overlapping positions are used to match redundant contributions at
|
81
|
+
different rotation stages ( theta and theta+180).
|
82
|
+
But beforehand it is beneficial to remove low spatial frequencies.
|
83
|
+
And we do oversampling on the fly.
|
84
|
+
|
85
|
+
Parameters:
|
86
|
+
diag: object
|
87
|
+
used member of diag are radios and weights
|
88
|
+
args: object
|
89
|
+
its member are the application parameters. Here we use only:
|
90
|
+
low_pass, high_pass, ovs ( oversampling factor for the horisontal dimension )
|
91
|
+
|
92
|
+
|
93
|
+
|
94
|
+
|
95
|
+
"""
|
96
|
+
|
97
|
+
diag.radios[:] = (diag.radios / diag.weights).astype("f")
|
98
|
+
|
99
|
+
new_radios = []
|
100
|
+
|
101
|
+
for ima in diag.radios:
|
102
|
+
ima = gaussian_filter(ima, [0, args.low_pass], mode="nearest")
|
103
|
+
ima = ima - gaussian_filter(ima, [0, args.high_pass], mode="nearest")
|
104
|
+
new_radios.append(ima)
|
105
|
+
|
106
|
+
diag.radios = [oversample(ima, [1, args.ovs]).astype("f") for ima in new_radios]
|
107
|
+
|
108
|
+
diag.weights = [oversample(ima, [1, args.ovs]).astype("f") for ima in diag.weights]
|
109
|
+
|
110
|
+
|
111
|
+
def total_merit_list(arg_tuple):
|
112
|
+
"""
|
113
|
+
builds three lists : all_merits, all_energies, all_z_transl
|
114
|
+
|
115
|
+
For every pair (theta, theta+180 ) add an item to the list which contains:
|
116
|
+
for "all_merits" a list of merit, one for every overlap in the overlap_list argument,
|
117
|
+
for "all_energies", same logic, but calculating the implied energy, implied in the calculation of the merit,
|
118
|
+
for "all_z_transl" we add the averaged z_transl for the considered pair
|
119
|
+
Parameters:
|
120
|
+
diag: object
|
121
|
+
used member of diag are radios, weights and zpix_transl
|
122
|
+
args: object
|
123
|
+
containing the application parameters.
|
124
|
+
Its used members are ovs, high_pass, low_pass
|
125
|
+
"""
|
126
|
+
|
127
|
+
(diag, overlap_list, args) = arg_tuple
|
128
|
+
|
129
|
+
orig_sy, ovsd_sx = diag.radios[0].shape
|
130
|
+
|
131
|
+
all_merits = []
|
132
|
+
all_energies = []
|
133
|
+
all_z_transls = []
|
134
|
+
|
135
|
+
# the following two lines are in accordance with the nabu collection layout for diagos
|
136
|
+
# there are n_angles_pi in [0,180[, and then again the same number of possibly valid radios
|
137
|
+
# (check for nan in z translation) in [180,360[, [360,540[, 540,720[
|
138
|
+
n_angles_pi = len(diag.radios) // 4
|
139
|
+
n_angles_2pi = len(diag.radios) // 2
|
140
|
+
|
141
|
+
# check for (theta, theta+180 )pairs whose first radio of the pair in in [0,180[ or [360,540[
|
142
|
+
for i in list(range(n_angles_pi)) + list(range(n_angles_2pi, n_angles_2pi + n_angles_pi)):
|
143
|
+
merits = []
|
144
|
+
energies = []
|
145
|
+
z_transl = []
|
146
|
+
|
147
|
+
if (not np.isnan(diag.zpix_transl[i])) and (not np.isnan(diag.zpix_transl[i + n_angles_pi])):
|
148
|
+
radio1 = diag.radios[i]
|
149
|
+
radio2 = diag.radios[i + n_angles_pi]
|
150
|
+
|
151
|
+
weight1 = diag.weights[i]
|
152
|
+
weight2 = diag.weights[i + n_angles_pi]
|
153
|
+
|
154
|
+
for overlap in overlap_list:
|
155
|
+
if overlap <= ovsd_sx:
|
156
|
+
my_overlap = overlap
|
157
|
+
my_radio1 = radio1
|
158
|
+
my_radio2 = radio2
|
159
|
+
my_weight1 = weight1
|
160
|
+
my_weight2 = weight2
|
161
|
+
else:
|
162
|
+
my_overlap = ovsd_sx - (overlap - ovsd_sx)
|
163
|
+
my_radio1 = np.fliplr(radio1)
|
164
|
+
my_radio2 = np.fliplr(radio2)
|
165
|
+
my_weight1 = np.fliplr(weight1)
|
166
|
+
my_weight2 = np.fliplr(weight2)
|
167
|
+
|
168
|
+
radio_common_left = np.fliplr(my_radio1[:, ovsd_sx - my_overlap :])[
|
169
|
+
:, : -(args.ovs * args.high_pass * 2)
|
170
|
+
]
|
171
|
+
radio_common_right = my_radio2[:, ovsd_sx - my_overlap : -(args.ovs * args.high_pass * 2)]
|
172
|
+
diff_common = radio_common_right - radio_common_left
|
173
|
+
|
174
|
+
weight_common_left = np.fliplr(my_weight1[:, ovsd_sx - my_overlap :])[
|
175
|
+
:, : -(args.ovs * args.high_pass * 2)
|
176
|
+
]
|
177
|
+
weight_common_right = my_weight2[:, ovsd_sx - my_overlap : -(args.ovs * args.high_pass * 2)]
|
178
|
+
weight_common = weight_common_right * weight_common_left
|
179
|
+
|
180
|
+
if args.use_l1_norm:
|
181
|
+
merits.append(abs(diff_common * weight_common).astype("d").sum())
|
182
|
+
energies.append(abs(weight_common).astype("d").sum())
|
183
|
+
else:
|
184
|
+
merits.append((diff_common * diff_common * weight_common).astype("d").sum())
|
185
|
+
energies.append(
|
186
|
+
(
|
187
|
+
(radio_common_left * radio_common_left + radio_common_right * radio_common_right)
|
188
|
+
* weight_common
|
189
|
+
)
|
190
|
+
.astype("d")
|
191
|
+
.sum()
|
192
|
+
)
|
193
|
+
else:
|
194
|
+
merits = [0] * (len(overlap_list))
|
195
|
+
energies = [0] * (len(overlap_list))
|
196
|
+
|
197
|
+
z_transl = 0.5 * (diag.zpix_transl[i] + diag.zpix_transl[i + n_angles_pi])
|
198
|
+
all_z_transls.append(z_transl)
|
199
|
+
|
200
|
+
all_merits.append(merits)
|
201
|
+
all_energies.append(energies)
|
202
|
+
|
203
|
+
return all_merits, all_energies, all_z_transls
|
204
|
+
|
205
|
+
|
206
|
+
def find_best_interpolating_line(args):
|
207
|
+
(all_z_transl, index_overlap_list_a, index_overlap_list_b, all_energies, all_res) = args
|
208
|
+
|
209
|
+
z_a = np.nanmin(all_z_transl)
|
210
|
+
z_b = np.nanmax(all_z_transl)
|
211
|
+
|
212
|
+
best_error = np.nan
|
213
|
+
best_off_pair = None
|
214
|
+
|
215
|
+
for index_ovlp_a in index_overlap_list_a:
|
216
|
+
for index_ovlp_b in index_overlap_list_b:
|
217
|
+
index_ovlps = np.interp(all_z_transl, [z_a, z_b], [index_ovlp_a, index_ovlp_b])
|
218
|
+
indexes = (np.arange(all_energies.shape[0]))[~np.isnan(index_ovlps)].astype("i")
|
219
|
+
|
220
|
+
index_ovlps = index_ovlps[~np.isnan(index_ovlps)]
|
221
|
+
index_ovlps = np.round_(index_ovlps).astype("i")
|
222
|
+
|
223
|
+
diff_enes = all_res[(indexes, index_ovlps)]
|
224
|
+
orig_enes = all_energies[(indexes, index_ovlps)]
|
225
|
+
|
226
|
+
error = (diff_enes / (orig_enes + 1.0e-30)).astype("d").sum()
|
227
|
+
|
228
|
+
if error == error:
|
229
|
+
if not (error > best_error):
|
230
|
+
best_error = error
|
231
|
+
best_error_pair = index_ovlp_a, index_ovlp_b
|
232
|
+
|
233
|
+
return best_error, best_error_pair
|
234
|
+
|
235
|
+
|
236
|
+
def main(user_args=None):
|
237
|
+
"""Find the cor as a function f z translation and write an hdf5 which contains interpolable tables.
|
238
|
+
This file can be used subsequently with the correct-rot utility.
|
239
|
+
"""
|
240
|
+
|
241
|
+
if user_args is None:
|
242
|
+
user_args = sys.argv[1:]
|
243
|
+
|
244
|
+
args = DictToObj(
|
245
|
+
parse_params_values(
|
246
|
+
DiagToRotConfig,
|
247
|
+
parser_description=main.__doc__,
|
248
|
+
program_version="nabu " + version,
|
249
|
+
user_args=user_args,
|
250
|
+
)
|
251
|
+
)
|
252
|
+
|
253
|
+
if args.near is None:
|
254
|
+
if args.original_scan is None:
|
255
|
+
raise ValueError(
|
256
|
+
"the parameter near was not provided but the original_scan parameter was not provided either"
|
257
|
+
)
|
258
|
+
if args.entry_name is None:
|
259
|
+
raise ValueError(
|
260
|
+
"the parameter near was not provided but the entry_name parameter for the original scan was not provided either"
|
261
|
+
)
|
262
|
+
|
263
|
+
scan = NXtomo()
|
264
|
+
scan.load(file_path=args.original_scan, data_path=args.entry_name)
|
265
|
+
args.near = scan.instrument.detector.estimated_cor_from_motor
|
266
|
+
else:
|
267
|
+
pass
|
268
|
+
|
269
|
+
args.ovs = 4
|
270
|
+
|
271
|
+
diag_url = os.path.join("/", args.entry_name, "reconstruction/configuration/diagnostics")
|
272
|
+
|
273
|
+
diag_keys = []
|
274
|
+
with h5py.File(args.diag_file, "r") as f:
|
275
|
+
diag_keys = list(f[diag_url].keys())
|
276
|
+
diag_keys = [diag_keys[i] for i in np.argsort(list(map(int, diag_keys)))]
|
277
|
+
|
278
|
+
all_merits = []
|
279
|
+
all_energies = []
|
280
|
+
all_z_transls = []
|
281
|
+
|
282
|
+
arguments_for_multiprocessing = []
|
283
|
+
|
284
|
+
for i_key, my_key in enumerate(diag_keys):
|
285
|
+
diag = DictToObj(h5todict(args.diag_file, os.path.join(diag_url, my_key)))
|
286
|
+
|
287
|
+
args.original_shape = diag.radios[0].shape
|
288
|
+
args.zpix_mm = diag.pixel_size_mm
|
289
|
+
|
290
|
+
transform_images(diag, args)
|
291
|
+
|
292
|
+
if i_key == 0:
|
293
|
+
orig_sy, ovsd_sx = diag.radios[0].shape # already transformed here, ovsd_sx is expanded
|
294
|
+
args.ovsd_sx = ovsd_sx
|
295
|
+
|
296
|
+
overlap_min = max(4, ovsd_sx - 2 * args.ovs * (args.near + args.near_width))
|
297
|
+
overlap_max = min(2 * ovsd_sx - 4, ovsd_sx - 2 * args.ovs * (args.near - args.near_width))
|
298
|
+
|
299
|
+
overlap_list = list(range(int(overlap_min), int(overlap_max) + 1))
|
300
|
+
|
301
|
+
if overlap_min > overlap_max:
|
302
|
+
message = f""" There is no safe search range in find_cor once the margins corresponding to the high_pass filter are discarded.
|
303
|
+
May be the near value (which is the offset respect to the center of the image) is too big, or too negative,
|
304
|
+
in short too close to the borders.
|
305
|
+
"""
|
306
|
+
raise ValueError(message)
|
307
|
+
|
308
|
+
arguments_for_multiprocessing.append((diag, overlap_list, args))
|
309
|
+
|
310
|
+
ncpus = get_available_threads()
|
311
|
+
with Pool(processes=ncpus) as pool:
|
312
|
+
result_list = pool.map(total_merit_list, arguments_for_multiprocessing)
|
313
|
+
|
314
|
+
for merits, energies, z_transls in result_list:
|
315
|
+
all_z_transls.extend(z_transls)
|
316
|
+
all_merits.extend(merits)
|
317
|
+
all_energies.append(energies)
|
318
|
+
|
319
|
+
n_pairings_with_data = 0
|
320
|
+
for en, me in zip(all_merits, all_energies):
|
321
|
+
if np.any(me):
|
322
|
+
n_pairings_with_data += 1
|
323
|
+
|
324
|
+
if args.linear_interpolation:
|
325
|
+
if n_pairings_with_data < 2:
|
326
|
+
message = f""" The diagnostics collection has probably been run over a too thin section of the scan
|
327
|
+
or you scan does not allow to form pairs of theta, theta+360. I only found {n_pairings_with_data} pairings
|
328
|
+
and this is not enough to do correlation + interpolation between sections
|
329
|
+
"""
|
330
|
+
raise RuntimeError(message)
|
331
|
+
else:
|
332
|
+
if n_pairings_with_data < 1:
|
333
|
+
message = f""" The diagnostics collection has probably been run over a too thin section of the scan
|
334
|
+
or you scan does not allow to form pairs of theta, theta+360. I only found {n_pairings_with_data}
|
335
|
+
pairings
|
336
|
+
"""
|
337
|
+
raise RuntimeError(message)
|
338
|
+
|
339
|
+
# all_merits, all_energies, all_z_transls = zip( result_list )
|
340
|
+
|
341
|
+
# merits, energies, z_transls = total_merit_list(diag, overlap_list, args)
|
342
|
+
|
343
|
+
# all_z_transls.extend(z_transls)
|
344
|
+
# all_merits.extend(merits)
|
345
|
+
# all_energies.append(energies)
|
346
|
+
|
347
|
+
all_merits = np.array(all_merits)
|
348
|
+
all_energies = np.array(all_energies)
|
349
|
+
|
350
|
+
all_merits.shape = -1, len(overlap_list)
|
351
|
+
all_energies.shape = -1, len(overlap_list)
|
352
|
+
|
353
|
+
if args.linear_interpolation:
|
354
|
+
do_linear_interpolation(args, overlap_list, all_merits, all_energies, all_z_transls)
|
355
|
+
else:
|
356
|
+
do_height_by_height(args, overlap_list, all_merits, all_energies, all_z_transls)
|
357
|
+
|
358
|
+
return 0
|
359
|
+
|
360
|
+
|
361
|
+
def do_height_by_height(args, overlap_list, all_diff, all_energies, all_z_transl):
|
362
|
+
# now we find the best cor for each chunk, or nan if no overlap is found
|
363
|
+
z_a = np.min(all_z_transl)
|
364
|
+
z_b = np.max(all_z_transl)
|
365
|
+
|
366
|
+
grouped_diff = {}
|
367
|
+
grouped_energy = {}
|
368
|
+
|
369
|
+
for diff, energy, z in zip(all_diff, all_energies, all_z_transl):
|
370
|
+
found = z
|
371
|
+
for key in grouped_diff.keys():
|
372
|
+
if abs(key - z) < 2.0: # these are in pixel units
|
373
|
+
found = key
|
374
|
+
break
|
375
|
+
grouped_diff[found] = grouped_diff.get(found, np.zeros([len(overlap_list)], "f")) + diff
|
376
|
+
grouped_energy[found] = grouped_energy.get(found, np.zeros([len(overlap_list)], "f")) + energy
|
377
|
+
|
378
|
+
z_list = list(grouped_energy.keys())
|
379
|
+
z_list.sort()
|
380
|
+
|
381
|
+
cor_list = []
|
382
|
+
for z in z_list:
|
383
|
+
diff = grouped_diff[z]
|
384
|
+
energy = grouped_energy[z]
|
385
|
+
|
386
|
+
best_error = np.nan
|
387
|
+
best_off = None
|
388
|
+
|
389
|
+
if not np.isnan(z):
|
390
|
+
for i_ovlp in range(len(overlap_list)):
|
391
|
+
error = diff[i_ovlp] / (energy[i_ovlp] + 1.0e-30)
|
392
|
+
|
393
|
+
if not (error > best_error):
|
394
|
+
best_error = error
|
395
|
+
best_off = i_ovlp
|
396
|
+
|
397
|
+
if best_off is not None:
|
398
|
+
offset = (args.ovsd_sx - overlap_list[best_off]) / args.ovs / 2
|
399
|
+
sy, sx = args.original_shape
|
400
|
+
cor_abs = (sx - 1) / 2 + offset
|
401
|
+
cor_list.append(cor_abs)
|
402
|
+
else:
|
403
|
+
cor_list.append(np.nan)
|
404
|
+
else:
|
405
|
+
# no overlap was available for that z
|
406
|
+
cor_list.append(np.nan)
|
407
|
+
|
408
|
+
with h5py.File(args.cor_file, "w") as f:
|
409
|
+
my_mask = ~np.isnan(np.array(cor_list))
|
410
|
+
f["cor"] = np.array(cor_list)[my_mask]
|
411
|
+
f["z_pix"] = np.array(z_list)[my_mask]
|
412
|
+
f["z_m"] = (np.array(z_list)[my_mask]) * args.zpix_mm * 1.0e-3
|
413
|
+
|
414
|
+
|
415
|
+
def do_linear_interpolation(args, overlap_list, all_res, all_energies, all_z_transl):
|
416
|
+
# now we consider all the linear regressions of the offset with z_transl
|
417
|
+
|
418
|
+
ncpus = get_available_threads()
|
419
|
+
|
420
|
+
index_overlap_list = np.arange(len(overlap_list)).astype("i")
|
421
|
+
arguments_list = [
|
422
|
+
(all_z_transl, piece, index_overlap_list, all_energies, all_res)
|
423
|
+
for piece in np.array_split(index_overlap_list, ncpus)
|
424
|
+
]
|
425
|
+
|
426
|
+
with Pool(processes=ncpus) as pool:
|
427
|
+
result_list = pool.map(find_best_interpolating_line, arguments_list)
|
428
|
+
|
429
|
+
error_list = [tok[0] for tok in result_list]
|
430
|
+
best_pos = np.argmin(error_list)
|
431
|
+
best_error, best_error_pair = result_list[best_pos]
|
432
|
+
|
433
|
+
# find the interpolated line
|
434
|
+
i_ovlp_a, i_ovlp_b = best_error_pair
|
435
|
+
offset_a = (args.ovsd_sx - overlap_list[i_ovlp_a]) / args.ovs / 2
|
436
|
+
offset_b = (args.ovsd_sx - overlap_list[i_ovlp_b]) / args.ovs / 2
|
437
|
+
|
438
|
+
sy, sx = args.original_shape
|
439
|
+
|
440
|
+
cor_abs_a = (sx - 1) / 2 + offset_a
|
441
|
+
cor_abs_b = (sx - 1) / 2 + offset_b
|
442
|
+
|
443
|
+
z_a = np.nanmin(all_z_transl)
|
444
|
+
z_b = np.nanmax(all_z_transl)
|
445
|
+
|
446
|
+
with h5py.File(args.cor_file, "w") as f:
|
447
|
+
f["cor"] = np.array([cor_abs_a, cor_abs_b])
|
448
|
+
f["z_pix"] = np.array([z_a, z_b])
|
449
|
+
f["z_m"] = np.array([z_a, z_b]) * args.zpix_mm * 1.0e-3
|
nabu/app/generate_header.py
CHANGED
@@ -1,12 +1,11 @@
|
|
1
1
|
import os
|
2
2
|
import numpy as np
|
3
3
|
from tomoscan.io import HDF5File
|
4
|
-
from
|
4
|
+
from fabio.edfimage import EdfImage
|
5
5
|
from ..io.utils import get_first_hdf5_entry
|
6
6
|
from .utils import parse_params_values
|
7
7
|
from .cli_configs import GenerateInfoConfig
|
8
8
|
|
9
|
-
|
10
9
|
edf_header_hdf5_path = {
|
11
10
|
# EDF Header
|
12
11
|
"count_time": "/technique/scan/exposure_time", # ms in HDF5
|
@@ -202,7 +201,9 @@ def generate_merged_info_file_content(
|
|
202
201
|
if first_edf_proj is None:
|
203
202
|
edf_header = simulate_edf_header(bliss_fname, bliss_entry, return_dict=True)
|
204
203
|
else:
|
205
|
-
|
204
|
+
edf = EdfImage()
|
205
|
+
edf.open(first_edf_proj)
|
206
|
+
edf_header = edf.getHeader()
|
206
207
|
# .info File
|
207
208
|
if info_file is None:
|
208
209
|
info_file_content = simulate_info_file(bliss_fname, bliss_entry, return_dict=True)
|
nabu/app/histogram.py
CHANGED
@@ -5,8 +5,8 @@ from silx.io.dictdump import h5todict
|
|
5
5
|
from ..utils import check_supported
|
6
6
|
from ..io.utils import get_first_hdf5_entry, get_h5_value
|
7
7
|
from ..io.writer import NXProcessWriter
|
8
|
-
from ..
|
9
|
-
from ..
|
8
|
+
from ..processing.histogram import PartialHistogram, VolumeHistogram, hist_as_2Darray
|
9
|
+
from ..processing.histogram_cuda import CudaVolumeHistogram
|
10
10
|
from ..resources.logger import Logger, LoggerOrPrint
|
11
11
|
from .utils import parse_params_values
|
12
12
|
from .cli_configs import HistogramConfig
|
nabu/app/multicor.py
CHANGED
@@ -34,7 +34,12 @@ def main():
|
|
34
34
|
program_version="nabu " + version,
|
35
35
|
)
|
36
36
|
|
37
|
-
reconstructor = get_reconstructor(
|
37
|
+
reconstructor = get_reconstructor(
|
38
|
+
args,
|
39
|
+
# Put a dummy CoR to avoid crash in both full-FoV and extended-FoV.
|
40
|
+
# It will be overwritten later by the user-defined CoRs
|
41
|
+
overwrite_options={"reconstruction/rotation_axis_position": 10.0},
|
42
|
+
)
|
38
43
|
|
39
44
|
if reconstructor.delta_z > 1:
|
40
45
|
raise ValueError("Only slice reconstruction can be used (have delta_z = %d)" % reconstructor.delta_z)
|
@@ -0,0 +1,151 @@
|
|
1
|
+
import numpy as np
|
2
|
+
from os import path
|
3
|
+
from datetime import datetime
|
4
|
+
from ..utils import check_supported, convert_str_to_tuple
|
5
|
+
from .utils import parse_params_values
|
6
|
+
from .cli_configs import ShowReconstructionTimingsConfig
|
7
|
+
|
8
|
+
try:
|
9
|
+
import matplotlib.pyplot as plt
|
10
|
+
|
11
|
+
__have_matplotlib__ = True
|
12
|
+
except ImportError:
|
13
|
+
__have_matplotlib__ = False
|
14
|
+
|
15
|
+
steps_to_measure = [
|
16
|
+
"Reading data",
|
17
|
+
"Applying flat-field",
|
18
|
+
"Applying double flat-field",
|
19
|
+
"Applying CCD corrections",
|
20
|
+
"Rotating projections",
|
21
|
+
"Performing phase retrieval",
|
22
|
+
"Performing unsharp mask",
|
23
|
+
"Taking logarithm",
|
24
|
+
"Applying radios movements",
|
25
|
+
"Normalizing sinograms",
|
26
|
+
"Building sinograms", # deprecated
|
27
|
+
"Removing rings on sinograms",
|
28
|
+
"Reconstruction",
|
29
|
+
"Computing histogram",
|
30
|
+
"Saving data",
|
31
|
+
]
|
32
|
+
|
33
|
+
|
34
|
+
def extract_timings_from_volume_reconstruction_lines(lines, separator=" - "):
|
35
|
+
def extract_timestamp(line):
|
36
|
+
timestamp = line.split(separator)[0]
|
37
|
+
return datetime.strptime(timestamp, "%d-%m-%Y %H:%M:%S")
|
38
|
+
|
39
|
+
def extract_current_step(line):
|
40
|
+
return line.split(separator)[-1]
|
41
|
+
|
42
|
+
current_step = extract_current_step(lines[0])
|
43
|
+
t1 = extract_timestamp(lines[0])
|
44
|
+
|
45
|
+
res = {}
|
46
|
+
for line in lines[1:]:
|
47
|
+
line = line.strip()
|
48
|
+
if len(line.split(separator)) == 1:
|
49
|
+
continue
|
50
|
+
timestamp = line.strip().split(separator)[0]
|
51
|
+
t2 = datetime.strptime(timestamp, "%d-%m-%Y %H:%M:%S")
|
52
|
+
|
53
|
+
res.setdefault(current_step, [])
|
54
|
+
res[current_step].append((t2 - t1).seconds)
|
55
|
+
|
56
|
+
t1 = t2
|
57
|
+
current_step = extract_current_step(line)
|
58
|
+
|
59
|
+
return res
|
60
|
+
|
61
|
+
|
62
|
+
def parse_logfile(fname, separator=" - "):
|
63
|
+
"""
|
64
|
+
Returns
|
65
|
+
-------
|
66
|
+
timings: list of dict
|
67
|
+
List of dictionaries: one dict per reconstruction in the log file.
|
68
|
+
For each dict, the key is the pipeline step name, and the value is the list of timings for the different chunks.
|
69
|
+
"""
|
70
|
+
with open(fname, "r") as f:
|
71
|
+
lines = f.readlines()
|
72
|
+
|
73
|
+
start_text = "Going to reconstruct slices"
|
74
|
+
end_text = "Merging reconstructions to"
|
75
|
+
|
76
|
+
rec_log_bounds = []
|
77
|
+
for i, line in enumerate(lines):
|
78
|
+
if start_text in line:
|
79
|
+
start_line = i
|
80
|
+
if end_text in line:
|
81
|
+
rec_log_bounds.append((start_line, i))
|
82
|
+
rec_file_basename = path.basename(line.split(end_text)[-1])
|
83
|
+
|
84
|
+
results = []
|
85
|
+
for bounds in rec_log_bounds:
|
86
|
+
start, end = bounds
|
87
|
+
timings = {}
|
88
|
+
res = extract_timings_from_volume_reconstruction_lines(lines[start:end], separator=separator)
|
89
|
+
for step in steps_to_measure:
|
90
|
+
if step in res:
|
91
|
+
timings[step] = res[step]
|
92
|
+
results.append(timings)
|
93
|
+
return results
|
94
|
+
|
95
|
+
|
96
|
+
def display_timings_pie(timings, reduce_function=None, cutoffs=None):
|
97
|
+
reduce_function = reduce_function or np.median
|
98
|
+
cutoffs = cutoffs or (0, np.inf)
|
99
|
+
|
100
|
+
def _format_pie_text(pct, allvals):
|
101
|
+
# https://matplotlib.org/stable/gallery/pie_and_polar_charts/pie_and_donut_labels.html
|
102
|
+
absolute = int(np.round(pct / 100.0 * np.sum(allvals)))
|
103
|
+
return f"{pct:.1f}%\n({absolute:d} s)"
|
104
|
+
|
105
|
+
for run in timings:
|
106
|
+
fig = plt.figure()
|
107
|
+
pie_labels = []
|
108
|
+
pie_sizes = []
|
109
|
+
for step_name, step_timings in run.items():
|
110
|
+
t = reduce_function(step_timings)
|
111
|
+
if t > cutoffs[0] and t < cutoffs[1]:
|
112
|
+
# pie_labels.append(step_name)
|
113
|
+
pie_labels.append(step_name + "\n(%d s)" % t)
|
114
|
+
pie_sizes.append(t)
|
115
|
+
ax = fig.subplots()
|
116
|
+
# ax.pie(pie_sizes, labels=pie_labels, autopct=lambda pct: _format_pie_text(pct, pie_sizes)) # autopct='%1.1f%%')
|
117
|
+
ax.pie(pie_sizes, labels=pie_labels, autopct="%1.1f%%")
|
118
|
+
|
119
|
+
fig.show()
|
120
|
+
input("Press any key to continue")
|
121
|
+
|
122
|
+
|
123
|
+
def parse_reclog_cli():
|
124
|
+
args = parse_params_values(
|
125
|
+
ShowReconstructionTimingsConfig, parser_description="Display reconstruction performances from a log file"
|
126
|
+
)
|
127
|
+
if not (__have_matplotlib__):
|
128
|
+
print("Need matplotlib to use this utility")
|
129
|
+
exit(1)
|
130
|
+
|
131
|
+
display_functions = {
|
132
|
+
"pie": display_timings_pie,
|
133
|
+
}
|
134
|
+
|
135
|
+
logfile = args["logfile"]
|
136
|
+
cutoff = args["cutoff"]
|
137
|
+
display_type = args["type"]
|
138
|
+
|
139
|
+
check_supported(display_type, display_functions.keys(), "Graphics display type")
|
140
|
+
if cutoff is not None:
|
141
|
+
cutoff = list(map(float, convert_str_to_tuple(cutoff)))
|
142
|
+
|
143
|
+
timings = parse_logfile(logfile)
|
144
|
+
display_functions[display_type](timings, cutoffs=cutoff)
|
145
|
+
|
146
|
+
return 0
|
147
|
+
|
148
|
+
|
149
|
+
if __name__ == "__main__":
|
150
|
+
parse_reclog_cli()
|
151
|
+
exit(0)
|