nabu 2023.2.1__py3-none-any.whl → 2024.1.0rc3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doc/conf.py +1 -1
- doc/doc_config.py +32 -0
- nabu/__init__.py +2 -1
- nabu/app/bootstrap_stitching.py +1 -1
- nabu/app/cli_configs.py +122 -2
- nabu/app/composite_cor.py +27 -2
- nabu/app/correct_rot.py +70 -0
- nabu/app/create_distortion_map_from_poly.py +42 -18
- nabu/app/diag_to_pix.py +358 -0
- nabu/app/diag_to_rot.py +449 -0
- nabu/app/generate_header.py +4 -3
- nabu/app/histogram.py +2 -2
- nabu/app/multicor.py +6 -1
- nabu/app/parse_reconstruction_log.py +151 -0
- nabu/app/prepare_weights_double.py +83 -22
- nabu/app/reconstruct.py +5 -1
- nabu/app/reconstruct_helical.py +7 -0
- nabu/app/reduce_dark_flat.py +6 -3
- nabu/app/rotate.py +4 -4
- nabu/app/stitching.py +16 -2
- nabu/app/tests/test_reduce_dark_flat.py +18 -2
- nabu/app/validator.py +4 -4
- nabu/cuda/convolution.py +8 -376
- nabu/cuda/fft.py +4 -0
- nabu/cuda/kernel.py +4 -4
- nabu/cuda/medfilt.py +5 -158
- nabu/cuda/padding.py +5 -71
- nabu/cuda/processing.py +23 -2
- nabu/cuda/src/ElementOp.cu +78 -0
- nabu/cuda/src/backproj.cu +28 -2
- nabu/cuda/src/fourier_wavelets.cu +2 -2
- nabu/cuda/src/normalization.cu +23 -0
- nabu/cuda/src/padding.cu +2 -2
- nabu/cuda/src/transpose.cu +16 -0
- nabu/cuda/utils.py +39 -0
- nabu/estimation/alignment.py +10 -1
- nabu/estimation/cor.py +808 -38
- nabu/estimation/cor_sino.py +7 -9
- nabu/estimation/tests/test_cor.py +85 -3
- nabu/io/reader.py +26 -18
- nabu/io/tests/test_cast_volume.py +3 -3
- nabu/io/tests/test_detector_distortion.py +3 -3
- nabu/io/tiffwriter_zmm.py +2 -2
- nabu/io/utils.py +14 -4
- nabu/io/writer.py +5 -3
- nabu/misc/fftshift.py +6 -0
- nabu/misc/histogram.py +5 -285
- nabu/misc/histogram_cuda.py +8 -104
- nabu/misc/kernel_base.py +3 -121
- nabu/misc/padding_base.py +5 -69
- nabu/misc/processing_base.py +3 -107
- nabu/misc/rotation.py +5 -62
- nabu/misc/rotation_cuda.py +5 -65
- nabu/misc/transpose.py +6 -0
- nabu/misc/unsharp.py +3 -78
- nabu/misc/unsharp_cuda.py +5 -52
- nabu/misc/unsharp_opencl.py +8 -85
- nabu/opencl/fft.py +6 -0
- nabu/opencl/kernel.py +21 -6
- nabu/opencl/padding.py +5 -72
- nabu/opencl/processing.py +27 -5
- nabu/opencl/src/backproj.cl +3 -3
- nabu/opencl/src/fftshift.cl +65 -12
- nabu/opencl/src/padding.cl +2 -2
- nabu/opencl/src/roll.cl +96 -0
- nabu/opencl/src/transpose.cl +16 -0
- nabu/pipeline/config_validators.py +63 -3
- nabu/pipeline/dataset_validator.py +2 -2
- nabu/pipeline/estimators.py +193 -35
- nabu/pipeline/fullfield/chunked.py +34 -17
- nabu/pipeline/fullfield/chunked_cuda.py +7 -5
- nabu/pipeline/fullfield/computations.py +48 -13
- nabu/pipeline/fullfield/nabu_config.py +13 -13
- nabu/pipeline/fullfield/processconfig.py +10 -5
- nabu/pipeline/fullfield/reconstruction.py +1 -2
- nabu/pipeline/helical/fbp.py +5 -0
- nabu/pipeline/helical/filtering.py +12 -9
- nabu/pipeline/helical/gridded_accumulator.py +179 -33
- nabu/pipeline/helical/helical_chunked_regridded.py +262 -151
- nabu/pipeline/helical/helical_chunked_regridded_cuda.py +4 -11
- nabu/pipeline/helical/helical_reconstruction.py +56 -18
- nabu/pipeline/helical/span_strategy.py +1 -1
- nabu/pipeline/helical/tests/test_accumulator.py +4 -0
- nabu/pipeline/params.py +23 -2
- nabu/pipeline/processconfig.py +3 -8
- nabu/pipeline/tests/test_chunk_reader.py +78 -0
- nabu/pipeline/tests/test_estimators.py +120 -2
- nabu/pipeline/utils.py +25 -0
- nabu/pipeline/writer.py +2 -0
- nabu/preproc/ccd_cuda.py +9 -7
- nabu/preproc/ctf.py +21 -26
- nabu/preproc/ctf_cuda.py +25 -25
- nabu/preproc/double_flatfield.py +14 -2
- nabu/preproc/double_flatfield_cuda.py +7 -11
- nabu/preproc/flatfield_cuda.py +23 -27
- nabu/preproc/phase.py +19 -24
- nabu/preproc/phase_cuda.py +21 -21
- nabu/preproc/shift_cuda.py +58 -28
- nabu/preproc/tests/test_ctf.py +5 -5
- nabu/preproc/tests/test_double_flatfield.py +2 -2
- nabu/preproc/tests/test_vshift.py +13 -2
- nabu/processing/__init__.py +0 -0
- nabu/processing/convolution_cuda.py +375 -0
- nabu/processing/fft_base.py +163 -0
- nabu/processing/fft_cuda.py +256 -0
- nabu/processing/fft_opencl.py +54 -0
- nabu/processing/fftshift.py +134 -0
- nabu/processing/histogram.py +286 -0
- nabu/processing/histogram_cuda.py +103 -0
- nabu/processing/kernel_base.py +126 -0
- nabu/processing/medfilt_cuda.py +159 -0
- nabu/processing/muladd.py +29 -0
- nabu/processing/muladd_cuda.py +68 -0
- nabu/processing/padding_base.py +71 -0
- nabu/processing/padding_cuda.py +75 -0
- nabu/processing/padding_opencl.py +77 -0
- nabu/processing/processing_base.py +123 -0
- nabu/processing/roll_opencl.py +64 -0
- nabu/processing/rotation.py +63 -0
- nabu/processing/rotation_cuda.py +66 -0
- nabu/processing/tests/__init__.py +0 -0
- nabu/processing/tests/test_fft.py +268 -0
- nabu/processing/tests/test_fftshift.py +71 -0
- nabu/{misc → processing}/tests/test_histogram.py +2 -4
- nabu/{cuda → processing}/tests/test_medfilt.py +1 -1
- nabu/processing/tests/test_muladd.py +54 -0
- nabu/{cuda → processing}/tests/test_padding.py +119 -75
- nabu/processing/tests/test_roll.py +63 -0
- nabu/{misc → processing}/tests/test_rotation.py +3 -2
- nabu/processing/tests/test_transpose.py +72 -0
- nabu/{misc → processing}/tests/test_unsharp.py +41 -8
- nabu/processing/transpose.py +126 -0
- nabu/processing/unsharp.py +79 -0
- nabu/processing/unsharp_cuda.py +53 -0
- nabu/processing/unsharp_opencl.py +75 -0
- nabu/reconstruction/fbp.py +34 -10
- nabu/reconstruction/fbp_base.py +35 -16
- nabu/reconstruction/fbp_opencl.py +7 -12
- nabu/reconstruction/filtering.py +2 -2
- nabu/reconstruction/filtering_cuda.py +13 -14
- nabu/reconstruction/filtering_opencl.py +3 -4
- nabu/reconstruction/projection.py +2 -0
- nabu/reconstruction/rings.py +158 -1
- nabu/reconstruction/rings_cuda.py +218 -58
- nabu/reconstruction/sinogram_cuda.py +16 -12
- nabu/reconstruction/tests/test_deringer.py +116 -14
- nabu/reconstruction/tests/test_fbp.py +22 -31
- nabu/reconstruction/tests/test_filtering.py +11 -2
- nabu/resources/dataset_analyzer.py +89 -26
- nabu/resources/nxflatfield.py +2 -2
- nabu/resources/tests/test_nxflatfield.py +1 -1
- nabu/resources/utils.py +9 -2
- nabu/stitching/alignment.py +184 -0
- nabu/stitching/config.py +241 -39
- nabu/stitching/definitions.py +6 -0
- nabu/stitching/frame_composition.py +4 -2
- nabu/stitching/overlap.py +99 -3
- nabu/stitching/sample_normalization.py +60 -0
- nabu/stitching/slurm_utils.py +10 -10
- nabu/stitching/tests/test_alignment.py +99 -0
- nabu/stitching/tests/test_config.py +16 -1
- nabu/stitching/tests/test_overlap.py +68 -2
- nabu/stitching/tests/test_sample_normalization.py +49 -0
- nabu/stitching/tests/test_slurm_utils.py +5 -5
- nabu/stitching/tests/test_utils.py +3 -33
- nabu/stitching/tests/test_z_stitching.py +391 -22
- nabu/stitching/utils.py +144 -202
- nabu/stitching/z_stitching.py +309 -126
- nabu/testutils.py +18 -0
- nabu/thirdparty/tomocupy_remove_stripe.py +586 -0
- nabu/utils.py +32 -6
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/LICENSE +1 -1
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/METADATA +5 -5
- nabu-2024.1.0rc3.dist-info/RECORD +296 -0
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/WHEEL +1 -1
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/entry_points.txt +5 -1
- nabu/conftest.py +0 -14
- nabu/opencl/fftshift.py +0 -92
- nabu/opencl/tests/test_fftshift.py +0 -55
- nabu/opencl/tests/test_padding.py +0 -84
- nabu-2023.2.1.dist-info/RECORD +0 -252
- /nabu/cuda/src/{fftshift.cu → dfi_fftshift.cu} +0 -0
- {nabu-2023.2.1.dist-info → nabu-2024.1.0rc3.dist-info}/top_level.txt +0 -0
nabu/app/diag_to_pix.py
ADDED
@@ -0,0 +1,358 @@
|
|
1
|
+
from .. import version
|
2
|
+
from os import environ
|
3
|
+
|
4
|
+
import shutil
|
5
|
+
import os
|
6
|
+
import sys
|
7
|
+
import re
|
8
|
+
import h5py
|
9
|
+
import numpy as np
|
10
|
+
import silx.math.fft
|
11
|
+
from silx.io.dictdump import h5todict
|
12
|
+
|
13
|
+
|
14
|
+
from ..resources.logger import LoggerOrPrint
|
15
|
+
|
16
|
+
from .utils import parse_params_values
|
17
|
+
from .cli_configs import DiagToPixConfig
|
18
|
+
|
19
|
+
import h5py
|
20
|
+
from nabu.utils import DictToObj
|
21
|
+
from nabu.pipeline.estimators import oversample
|
22
|
+
from scipy.ndimage import gaussian_filter
|
23
|
+
from nxtomo.application.nxtomo import NXtomo
|
24
|
+
from multiprocessing import Pool
|
25
|
+
from ..utils import get_available_threads
|
26
|
+
|
27
|
+
|
28
|
+
"""
|
29
|
+
The operations here below rely on diag objects which are found in the result of a nab-helical run with the diag_zpro_run set to a number > 0
|
30
|
+
They are found in the configuration section of the nabu output, in several sequential dataset, with hdf5 dataset keys
|
31
|
+
which are 0,1,2.... corresponding to all the z-windows ( chunck) for which we have collected contributions at different angles
|
32
|
+
which are nothing else but pieces of ready to use preprocessed radio , and this for different angles.
|
33
|
+
In other words redundant contributions conccurring at the same prepocessed radiography but comming from different moment
|
34
|
+
of the helical scan are kept separate
|
35
|
+
Forming pairs from contributions for same angle they should coincide where they both have signal ( part of them can be dark if the detector is out of view above or below)
|
36
|
+
|
37
|
+
For each key there is a sequence of radio, the corresponding sequence of weights map, the corresponding z translation, and angles
|
38
|
+
|
39
|
+
The number passed to diag_zpro_run object, is >1, and is interpreted by the diagnostic collection run
|
40
|
+
as the number of wished collecte angles between 0 and 180. Lets call it num_0_180
|
41
|
+
|
42
|
+
The collection is not done here. Here we exploit the result of a previous collection to deduce, looking at the correlations, which correction
|
43
|
+
we must bring to the pixel size
|
44
|
+
|
45
|
+
An example of collection is this :
|
46
|
+
|
47
|
+
|_____ diagnostics
|
48
|
+
| |
|
49
|
+
|__ 0
|
50
|
+
| |_ radios (4*num_0_180, chunky, chunkx)
|
51
|
+
| |
|
52
|
+
| |_ weights (4*num_0_180, chunky, chunkx)
|
53
|
+
| |
|
54
|
+
| |_ angles ( 4*num_0_180,)
|
55
|
+
|
|
56
|
+
|_ searched_rad ( 2*num_0_180,) these are all the searched angles between 0 and 360 in radians
|
57
|
+
|
|
58
|
+
|_ zmm_trans ( 4*num_0_180,) the z translation in mm
|
59
|
+
|
|
60
|
+
|_ zpix_transl ( 4*num_0_180,) the z translation in pix
|
61
|
+
|
|
62
|
+
|_ pixes_size_mm scalar
|
63
|
+
|
64
|
+
"""
|
65
|
+
|
66
|
+
|
67
|
+
def transform_images(diag, ovs):
|
68
|
+
"""Filter the radios, and oversample them along the vertical line.
|
69
|
+
The method in general is similar to the composite cor finding.
|
70
|
+
Several overlapping positions are used to match redundant contributions at
|
71
|
+
different rotation stages ( theta and theta+360).
|
72
|
+
But beforehand it is beneficial to remove low spatial frequencies.
|
73
|
+
And we do oversampling on the fly.
|
74
|
+
|
75
|
+
"""
|
76
|
+
assert len(ovs) == 2, "oversampling must be specified for both vertical and horizontal dimension"
|
77
|
+
|
78
|
+
diag.radios[:] = diag.radios / diag.weights
|
79
|
+
|
80
|
+
diag.radios = [oversample((ima - gaussian_filter(ima, 20, mode="nearest")), ovs) for ima in diag.radios]
|
81
|
+
|
82
|
+
diag.weights = [oversample(ima, ovs) for ima in diag.weights]
|
83
|
+
|
84
|
+
|
85
|
+
def detailed_merit(diag, shift):
|
86
|
+
# res will become the merit summed over all the pairs theta, theta+180
|
87
|
+
res = 0.0
|
88
|
+
|
89
|
+
# need to account for the weight also. So this will become the used weight for the pairs theta, theta+180
|
90
|
+
res_w = 0.0
|
91
|
+
|
92
|
+
## The following two variables are very important information to be collected.
|
93
|
+
## On the the z translation over a 360 turn
|
94
|
+
## the other is the pixel size in mm.
|
95
|
+
## At the end of the script, the residual shift for perfect correlation
|
96
|
+
## will used to correct zpix_mm, doing a pro-rata with respect to
|
97
|
+
## the z observed translation over one turn
|
98
|
+
observed_oneturn_total_shift_zpix_list = []
|
99
|
+
zpix_mm = None
|
100
|
+
|
101
|
+
n_angles_2pi = len(diag.radios) // 2
|
102
|
+
# In accordance with the collection layout for diagnostics (diag_zpro_run parameter passed to nabu-helical)
|
103
|
+
# there are n_angles_pi in [0,180[, and then again the same number of possibly valid radios
|
104
|
+
# (check for nan in z translation) in [180,360[, [360,540[, 540,720[
|
105
|
+
# So we have len(diag.radios) // 2 in the range [0,360[
|
106
|
+
# because we have len(diag.radios) in [0,720[
|
107
|
+
|
108
|
+
detailed_merit_list = [] # one for each theta theta+360 pair
|
109
|
+
detailed_weight_list = [] # one for each theta theta+360 pair
|
110
|
+
|
111
|
+
for i in range(n_angles_2pi):
|
112
|
+
# if we have something for both items of the pair, proceed
|
113
|
+
if (not np.isnan(diag.zpix_transl[i])) and (not np.isnan(diag.zpix_transl[i + n_angles_2pi])):
|
114
|
+
# because we have theta and theta + 360
|
115
|
+
|
116
|
+
zpix_mm = diag.pixel_size_mm
|
117
|
+
add, add_w = merit(
|
118
|
+
diag.radios[i], diag.radios[i + n_angles_2pi], diag.weights[i], diag.weights[i + n_angles_2pi], shift
|
119
|
+
)
|
120
|
+
|
121
|
+
detailed_merit_list.append(add)
|
122
|
+
detailed_weight_list.append(add_w)
|
123
|
+
|
124
|
+
observed_oneturn_total_shift_zpix_list.append(diag.zpix_transl[i + n_angles_2pi] - diag.zpix_transl[i])
|
125
|
+
|
126
|
+
return detailed_merit_list, detailed_weight_list, observed_oneturn_total_shift_zpix_list, zpix_mm
|
127
|
+
|
128
|
+
|
129
|
+
def merit(ima_a, ima_b, w_a, w_b, s):
|
130
|
+
"""A definition of the merit which accounts also for the data weight.
|
131
|
+
calculates the merit for a given shift s.
|
132
|
+
Comparison between a and b
|
133
|
+
Considering signal ima and weight w
|
134
|
+
"""
|
135
|
+
if s == 0:
|
136
|
+
# return - abs( (ima_a - ima_b) * w_a * w_b ).astype("d").mean(), (w_a * w_b).astype("d").mean()
|
137
|
+
|
138
|
+
return (ima_a * ima_b * w_a * w_b).astype("d").sum(), (w_a * w_b).astype("d").sum()
|
139
|
+
|
140
|
+
elif s > 0:
|
141
|
+
# Keep the comment lines in case one wish to test L1
|
142
|
+
# pi = abs(ima_b[s:] - ima_a[:-s])
|
143
|
+
# pw = w_b[s:] * w_a[:-s]
|
144
|
+
# return - ( pi * pw ).astype("d").mean(), (pw).astype("d").mean()
|
145
|
+
|
146
|
+
pi = ima_b[s:] * ima_a[:-s]
|
147
|
+
pw = w_b[s:] * w_a[:-s]
|
148
|
+
return (pi * pw).astype("d").sum(), pw.astype("d").sum()
|
149
|
+
else:
|
150
|
+
# Keep the comment lines in case one wish to test L1
|
151
|
+
# pi = abs(ima_a[-s:] - ima_b[:s])
|
152
|
+
# pw = w_a[-s:] * w_b[:s]
|
153
|
+
# return - ( pi * pw ).astype("d").mean(), pw.astype("d").mean()
|
154
|
+
|
155
|
+
pi = ima_a[-s:] * ima_b[:s]
|
156
|
+
pw = w_a[-s:] * w_b[:s]
|
157
|
+
return (pi * pw).astype("d").sum(), pw.astype("d").sum()
|
158
|
+
|
159
|
+
|
160
|
+
def build_total_merit_list(diag, oversample_factor, args):
|
161
|
+
# calculats the merit at all the tested extra adjustment shifts.
|
162
|
+
|
163
|
+
transform_images(diag, [oversample_factor, 1])
|
164
|
+
h_ima = diag.radios[0].shape[0]
|
165
|
+
# search_radius_v = min(oversample_factor * args.search_radius_v, h_ima - 1)
|
166
|
+
search_radius_v = oversample_factor * args.search_radius_v
|
167
|
+
|
168
|
+
shift_s = []
|
169
|
+
for_all_shifts_detailed_merit_lists = []
|
170
|
+
for_all_shifts_detailed_weight_lists = []
|
171
|
+
|
172
|
+
observed_oneturn_total_shift_zpix_list, zpix_mm = None, None
|
173
|
+
|
174
|
+
for shift in range(-search_radius_v, search_radius_v + 1):
|
175
|
+
(
|
176
|
+
detailed_merit_list,
|
177
|
+
detailed_weight_list,
|
178
|
+
found_observed_oneturn_total_shift_zpix_list,
|
179
|
+
found_zpix_mm,
|
180
|
+
) = detailed_merit(diag, shift)
|
181
|
+
|
182
|
+
if found_zpix_mm is not None:
|
183
|
+
# the following two lines do not depend on the shift.
|
184
|
+
# The shift is what we do prior to a comparison f images
|
185
|
+
# while the two items below are a properties of the scan
|
186
|
+
# in particular they depend on z_translation and angles from bliss
|
187
|
+
zpix_mm = found_zpix_mm
|
188
|
+
observed_oneturn_total_shift_zpix_list = found_observed_oneturn_total_shift_zpix_list
|
189
|
+
|
190
|
+
# The merit and weight are the result of comparison, they depend on the shift
|
191
|
+
for_all_shifts_detailed_merit_lists.append(detailed_merit_list)
|
192
|
+
for_all_shifts_detailed_weight_lists.append(detailed_weight_list)
|
193
|
+
shift_s.append(
|
194
|
+
shift / oversample_factor
|
195
|
+
) # shift_s is stored in original pixel units. Images were oversampled
|
196
|
+
|
197
|
+
else:
|
198
|
+
# here there is nothing to append, not correspondance was found
|
199
|
+
pass
|
200
|
+
|
201
|
+
# now transposition: we want for each pair theta, theta+360 a list which contains meritvalues for each adjustment shift
|
202
|
+
# For each pair there is a list which runs over the shifts
|
203
|
+
# Same thing for the weights
|
204
|
+
for_all_pairs_detailed_merit_lists = zip(*for_all_shifts_detailed_merit_lists)
|
205
|
+
for_all_pairs_detailed_weight_lists = zip(*for_all_shifts_detailed_weight_lists)
|
206
|
+
|
207
|
+
return (
|
208
|
+
for_all_pairs_detailed_merit_lists,
|
209
|
+
for_all_pairs_detailed_weight_lists,
|
210
|
+
observed_oneturn_total_shift_zpix_list,
|
211
|
+
zpix_mm,
|
212
|
+
)
|
213
|
+
|
214
|
+
|
215
|
+
def main(user_args=None):
|
216
|
+
"""Analyse the diagnostics and correct the pixel size"""
|
217
|
+
|
218
|
+
if user_args is None:
|
219
|
+
user_args = sys.argv[1:]
|
220
|
+
|
221
|
+
args = DictToObj(
|
222
|
+
parse_params_values(
|
223
|
+
DiagToPixConfig,
|
224
|
+
parser_description=main.__doc__,
|
225
|
+
program_version="nabu " + version,
|
226
|
+
user_args=user_args,
|
227
|
+
)
|
228
|
+
)
|
229
|
+
|
230
|
+
oversample_factor = 4
|
231
|
+
if args.nexus_source is None:
|
232
|
+
args.nexus_source = args.nexus_target
|
233
|
+
|
234
|
+
## Read all the available diagnostics.
|
235
|
+
## Every key correspond to a chunk of the helical pipeline
|
236
|
+
diag_url = os.path.join("/", args.entry_name, "reconstruction/configuration/diagnostics")
|
237
|
+
diag_keys = []
|
238
|
+
with h5py.File(args.diag_file, "r") as f:
|
239
|
+
diag_keys = list(f[diag_url].keys())
|
240
|
+
diag_keys = [diag_keys[i] for i in np.argsort(list(map(int, diag_keys)))]
|
241
|
+
|
242
|
+
# The diag_keys are 0,1,2 ... corresponding to all the z-windows ( chunck) for which we have collected contributions at different angles
|
243
|
+
# which are nothing else but pieces of ready to use preprocessed radio , and this for different angles.
|
244
|
+
# Pairs should coincide where they both have signal ( part of them can be dark if the detector is out of view above or below)
|
245
|
+
# For each key there is a sequence of radio, the corresponding sequence of weights map, the corresponding z translation, and angles
|
246
|
+
|
247
|
+
zpix_mm = None
|
248
|
+
observed_oneturn_total_shift_zpix = None
|
249
|
+
|
250
|
+
argument_list = [
|
251
|
+
(DictToObj(h5todict(args.diag_file, os.path.join(diag_url, my_key))), oversample_factor, args)
|
252
|
+
for my_key in diag_keys
|
253
|
+
]
|
254
|
+
|
255
|
+
ncpus = get_available_threads()
|
256
|
+
with Pool(processes=ncpus) as pool:
|
257
|
+
all_res_plus_infos = pool.starmap(build_total_merit_list, argument_list)
|
258
|
+
|
259
|
+
observed_oneturn_total_shift_zpix, zpix_mm = None, None
|
260
|
+
|
261
|
+
# needs to flatten the result of pool.map
|
262
|
+
for_all_pairs_detailed_merit_lists = []
|
263
|
+
for_all_pairs_detailed_weight_lists = []
|
264
|
+
observed_oneturn_total_shift_zpix_list = []
|
265
|
+
zpix_mm = None
|
266
|
+
|
267
|
+
for (
|
268
|
+
tmp_merit_lists,
|
269
|
+
tmp_weight_lists,
|
270
|
+
tmp_observed_oneturn_total_shift_zpix_list,
|
271
|
+
tmp_zpix_mm,
|
272
|
+
) in all_res_plus_infos:
|
273
|
+
if tmp_zpix_mm is not None:
|
274
|
+
# then each item of the composed list will be for a given pairs theta, theta+360
|
275
|
+
# and each such item is a list where each item is for a given probed shift
|
276
|
+
for_all_pairs_detailed_merit_lists.extend(tmp_merit_lists)
|
277
|
+
for_all_pairs_detailed_weight_lists.extend(tmp_weight_lists)
|
278
|
+
observed_oneturn_total_shift_zpix_list.extend(tmp_observed_oneturn_total_shift_zpix_list)
|
279
|
+
|
280
|
+
zpix_mm = tmp_zpix_mm
|
281
|
+
|
282
|
+
if zpix_mm is None:
|
283
|
+
message = "No overlapping was found"
|
284
|
+
raise RuntimeError(message)
|
285
|
+
|
286
|
+
if len(for_all_pairs_detailed_merit_lists) == 0:
|
287
|
+
message = "No diag was found"
|
288
|
+
raise RuntimeError(message)
|
289
|
+
|
290
|
+
# Now an important search step:
|
291
|
+
# We find for which pair of theta theta+360 the observed translation has the bigger absolute value.
|
292
|
+
# Then the search for the optimum is performed for the readjustment shift in the
|
293
|
+
# range (-search_radius_v, search_radius_v + 1)
|
294
|
+
# considered as readjustmnet for the foud ideal pair which has exactly a translation equal to this maximal absolute observed translation
|
295
|
+
# For all the others the readjustment is multiplied by the pro-rata factor
|
296
|
+
# given by their smaller z-translation over the maximal one
|
297
|
+
max_absolute_shift = abs(np.array(observed_oneturn_total_shift_zpix_list)).max()
|
298
|
+
|
299
|
+
# gong to search for the best pixel size
|
300
|
+
max_merit = None
|
301
|
+
best_shift = None
|
302
|
+
search_radius_v = oversample_factor * args.search_radius_v
|
303
|
+
|
304
|
+
probed_shift_list = list(range(-search_radius_v, search_radius_v + 1))
|
305
|
+
|
306
|
+
for shift in range(-search_radius_v, search_radius_v + 1):
|
307
|
+
total_sum = 0
|
308
|
+
total_weight = 0
|
309
|
+
|
310
|
+
for merit_list, weight_list, one_turn_shift in zip(
|
311
|
+
for_all_pairs_detailed_merit_lists,
|
312
|
+
for_all_pairs_detailed_weight_lists,
|
313
|
+
observed_oneturn_total_shift_zpix_list,
|
314
|
+
):
|
315
|
+
# sanity check
|
316
|
+
assert len(merit_list) == len(probed_shift_list), " this should not happen"
|
317
|
+
assert len(weight_list) == len(probed_shift_list), " this should not happen"
|
318
|
+
|
319
|
+
# pro_rata shift
|
320
|
+
my_shift = shift * (one_turn_shift / max_absolute_shift)
|
321
|
+
|
322
|
+
# doing interpolation with search sorted
|
323
|
+
i1 = np.searchsorted(probed_shift_list, my_shift)
|
324
|
+
|
325
|
+
if i1 > 0 and i1 < len(probed_shift_list):
|
326
|
+
i0 = i1 - 1
|
327
|
+
fract = (-my_shift + probed_shift_list[i1]) / (probed_shift_list[i1] - probed_shift_list[i0])
|
328
|
+
|
329
|
+
total_sum += fract * merit_list[i0] + (1 - fract) * merit_list[i1]
|
330
|
+
total_weight += fract * weight_list[i0] + (1 - fract) * weight_list[i1]
|
331
|
+
|
332
|
+
if total_weight == 0:
|
333
|
+
# this avoid 0/0 = nan
|
334
|
+
total_weight = 1
|
335
|
+
|
336
|
+
m = total_sum / total_weight
|
337
|
+
|
338
|
+
if (max_merit is None) or ((not np.isnan(m)) and m > max_merit):
|
339
|
+
max_merit = m
|
340
|
+
best_shift = shift / oversample_factor
|
341
|
+
|
342
|
+
print(" Best shift at ", best_shift)
|
343
|
+
print(
|
344
|
+
f" Over one turn the reference shift was {max_absolute_shift} pixels. But a residual shift of {best_shift} remains "
|
345
|
+
)
|
346
|
+
# the formula below is already purged from the ovrsamplig factor. We did this when we recorded best_shift and the z shift
|
347
|
+
# is registered when lloking at the z_translation and one does not fiddle aroud with the oversamplig at that moment
|
348
|
+
zpix_mm = zpix_mm * (max_absolute_shift) / (max_absolute_shift - best_shift)
|
349
|
+
|
350
|
+
print(f"Corrected zpix_mm = {zpix_mm}")
|
351
|
+
|
352
|
+
if args.nexus_target is not None:
|
353
|
+
nx_tomo = NXtomo().load(args.nexus_source, args.entry_name)
|
354
|
+
nx_tomo.instrument.detector.x_pixel_size = zpix_mm * 1.0e-3 # pixel size must be provided in SI (meters)
|
355
|
+
nx_tomo.instrument.detector.y_pixel_size = zpix_mm * 1.0e-3 # pixel size must be provided in SI (meters)
|
356
|
+
nx_tomo.save(file_path=args.nexus_target, data_path=args.entry_name, overwrite=True)
|
357
|
+
|
358
|
+
return 0
|