nabu 2025.1.0rc5__py3-none-any.whl → 2025.1.1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nabu/__init__.py +1 -1
- nabu/estimation/motion.py +9 -2
- nabu/io/cast_volume.py +33 -11
- nabu/io/reader.py +12 -4
- nabu/io/tests/test_readers.py +37 -34
- nabu/misc/utils.py +2 -2
- nabu/pipeline/estimators.py +1 -1
- nabu/pipeline/fullfield/chunked.py +25 -10
- nabu/pipeline/fullfield/nabu_config.py +1 -1
- nabu/pipeline/fullfield/processconfig.py +2 -1
- nabu/pipeline/fullfield/reconstruction.py +1 -0
- nabu/preproc/flatfield.py +3 -3
- nabu/reconstruction/iterative.py +30 -0
- nabu/resources/dataset_analyzer.py +35 -0
- nabu/resources/tests/test_dataset_analyzer.py +37 -0
- nabu/testutils.py +34 -0
- {nabu-2025.1.0rc5.dist-info → nabu-2025.1.1.dist-info}/METADATA +1 -1
- {nabu-2025.1.0rc5.dist-info → nabu-2025.1.1.dist-info}/RECORD +22 -22
- doc/doc_config.py +0 -32
- nabu/reconstruction/astra.py +0 -245
- {nabu-2025.1.0rc5.dist-info → nabu-2025.1.1.dist-info}/WHEEL +0 -0
- {nabu-2025.1.0rc5.dist-info → nabu-2025.1.1.dist-info}/entry_points.txt +0 -0
- {nabu-2025.1.0rc5.dist-info → nabu-2025.1.1.dist-info}/licenses/LICENSE +0 -0
- {nabu-2025.1.0rc5.dist-info → nabu-2025.1.1.dist-info}/top_level.txt +0 -0
nabu/__init__.py
CHANGED
nabu/estimation/motion.py
CHANGED
@@ -1,5 +1,6 @@
|
|
1
1
|
from enum import Enum
|
2
2
|
from multiprocessing.pool import ThreadPool
|
3
|
+
from warnings import warn
|
3
4
|
import numpy as np
|
4
5
|
from ..utils import get_num_threads
|
5
6
|
|
@@ -218,12 +219,18 @@ class MotionEstimation:
|
|
218
219
|
return _normalize_with_reference(a_all[-n_outward_projs:])
|
219
220
|
|
220
221
|
def _configure_shifts_estimator(self, shifts_estimator, shifts_estimator_kwargs):
|
221
|
-
self.shifts_estimator = shifts_estimator
|
222
222
|
if shifts_estimator not in self._shifts_estimators_default_kwargs:
|
223
223
|
raise NotImplementedError(
|
224
224
|
f"Unknown estimator shifts '{shifts_estimator}', available are {list(self._shifts_estimators_default_kwargs.keys())}"
|
225
225
|
)
|
226
|
-
self.
|
226
|
+
self.shifts_estimator = shifts_estimator
|
227
|
+
if self.shifts_estimator == "phase_cross_correlation" and phase_cross_correlation is None:
|
228
|
+
warn(
|
229
|
+
"shift estimator was set to 'phase_cross_correlation' but it requires scikit-image which is not available. Falling back to 'DetectorTranslationAlongBeam'",
|
230
|
+
Warning,
|
231
|
+
)
|
232
|
+
self.shifts_estimator = "DetectorTranslationAlongBeam"
|
233
|
+
self._shifts_estimator_kwargs = self._shifts_estimators_default_kwargs[self.shifts_estimator].copy()
|
227
234
|
self._shifts_estimator_kwargs.update(shifts_estimator_kwargs or {})
|
228
235
|
|
229
236
|
def _find_shifts(self, img1, img2):
|
nabu/io/cast_volume.py
CHANGED
@@ -142,7 +142,7 @@ def cast_volume(
|
|
142
142
|
remove_input_volume: bool = False,
|
143
143
|
) -> VolumeBase:
|
144
144
|
"""
|
145
|
-
cast
|
145
|
+
cast given volume to output_volume of 'output_data_type' type
|
146
146
|
|
147
147
|
:param VolumeBase input_volume:
|
148
148
|
:param VolumeBase output_volume:
|
@@ -231,6 +231,7 @@ def cast_volume(
|
|
231
231
|
data_max=data_max,
|
232
232
|
rescale_min_percentile=rescale_min_percentile,
|
233
233
|
rescale_max_percentile=rescale_max_percentile,
|
234
|
+
default_value_for_nan=new_min,
|
234
235
|
).astype(output_data_type)
|
235
236
|
else:
|
236
237
|
output_slice = input_slice.astype(output_data_type)
|
@@ -268,25 +269,46 @@ def clamp_and_rescale_data(
|
|
268
269
|
data_max=None,
|
269
270
|
rescale_min_percentile=RESCALE_MIN_PERCENTILE,
|
270
271
|
rescale_max_percentile=RESCALE_MAX_PERCENTILE,
|
272
|
+
default_value_for_nan=None,
|
273
|
+
do_float64=True,
|
271
274
|
):
|
272
275
|
"""
|
273
276
|
rescale data to 'new_min', 'new_max'
|
274
277
|
|
275
|
-
|
276
|
-
|
277
|
-
:
|
278
|
-
|
279
|
-
:
|
280
|
-
|
281
|
-
:
|
282
|
-
|
278
|
+
Parameters
|
279
|
+
----------
|
280
|
+
data: numpy.ndarray
|
281
|
+
Data to be rescaled (image or volume)
|
282
|
+
new_min: scalar
|
283
|
+
Rescaled data new min (clamp min value)
|
284
|
+
new_max: scalar
|
285
|
+
Rescaled data new min (clamp max value)
|
286
|
+
data_min: scalar, optional
|
287
|
+
Data minimum value. If not provided, will re-compute the min() over data.
|
288
|
+
data_max: scalar, optional
|
289
|
+
Data maximum value. If not provided, will re-compute the min() over data.
|
290
|
+
rescale_min_percentile: scalar, optional
|
291
|
+
if `data_min` is None will set data_min to 'rescale_min_percentile'
|
292
|
+
rescale_max_percentile
|
293
|
+
if `data_max` is None will set data_min to 'rescale_max_percentile'
|
294
|
+
default_value_for_nan: scalar, optional
|
295
|
+
Value that will replace NaNs, if any. Default is None (keep NaNs, will likely raise an error)
|
296
|
+
do_float64
|
297
|
+
Whether to do internal computations in float64. Recommended when casting from float32 to int32 for example.
|
283
298
|
"""
|
299
|
+
if do_float64 and data.dtype.itemsize < 8:
|
300
|
+
data = numpy.float64(data)
|
284
301
|
if data_min is None:
|
285
|
-
data_min = numpy.
|
302
|
+
data_min = numpy.nanpercentile(data, rescale_min_percentile)
|
286
303
|
if data_max is None:
|
287
|
-
data_max = numpy.
|
304
|
+
data_max = numpy.nanpercentile(data, rescale_max_percentile)
|
288
305
|
# rescale data
|
289
306
|
rescaled_data = rescale_data(data, new_min=new_min, new_max=new_max, data_min=data_min, data_max=data_max)
|
307
|
+
# Handle NaNs
|
308
|
+
if default_value_for_nan is not None:
|
309
|
+
isnan_mask = numpy.isnan(rescaled_data)
|
310
|
+
if numpy.any(isnan_mask):
|
311
|
+
rescaled_data[isnan_mask] = default_value_for_nan
|
290
312
|
# clamp data
|
291
313
|
rescaled_data[rescaled_data < new_min] = new_min
|
292
314
|
rescaled_data[rescaled_data > new_max] = new_max
|
nabu/io/reader.py
CHANGED
@@ -556,7 +556,11 @@ class VolReaderBase:
|
|
556
556
|
slice_x = None
|
557
557
|
if isinstance(sub_region, (tuple, list)):
|
558
558
|
slice_angle, slice_z, slice_x = sub_region
|
559
|
-
self.sub_region = (
|
559
|
+
self.sub_region = (
|
560
|
+
slice_angle if slice_angle is not None else slice(None, None),
|
561
|
+
slice_z if slice_z is not None else slice(None, None),
|
562
|
+
slice_x if slice_x is not None else slice(None, None),
|
563
|
+
)
|
560
564
|
|
561
565
|
def _set_processing_function(self, processing_func, processing_func_args, processing_func_kwargs):
|
562
566
|
self.processing_func = processing_func
|
@@ -682,9 +686,13 @@ class NXTomoReader(VolReaderBase):
|
|
682
686
|
# In this case, we can use h5py read_direct() to avoid extraneous memory consumption
|
683
687
|
image_key_slice = self._image_key_slices[0]
|
684
688
|
# merge image key selection and user selection (if any)
|
685
|
-
|
686
|
-
|
687
|
-
|
689
|
+
angles_slice = self.sub_region[0]
|
690
|
+
if isinstance(angles_slice, slice) or angles_slice is None:
|
691
|
+
angles_slice = merge_slices(image_key_slice, self.sub_region[0] or slice(None, None))
|
692
|
+
else: # assuming numpy array
|
693
|
+
# TODO more elegant
|
694
|
+
angles_slice = np.arange(self.data_shape_total[0], dtype=np.uint64)[image_key_slice][angles_slice]
|
695
|
+
self._source_selection = (angles_slice,) + self.sub_region[1:]
|
688
696
|
else:
|
689
697
|
user_selection_dim0 = self.sub_region[0]
|
690
698
|
indices = np.arange(self.data_shape_total[0])
|
nabu/io/tests/test_readers.py
CHANGED
@@ -1,41 +1,23 @@
|
|
1
1
|
from math import ceil
|
2
2
|
from tempfile import TemporaryDirectory
|
3
|
-
from dataclasses import dataclass
|
4
3
|
from tomoscan.io import HDF5File
|
5
4
|
import pytest
|
6
5
|
import numpy as np
|
7
6
|
from nxtomo.application.nxtomo import ImageKey
|
8
7
|
from tomoscan.esrf import EDFVolume
|
9
8
|
from nabu.pipeline.reader import NXTomoReaderBinning
|
10
|
-
from nabu.testutils import utilstest, __do_long_tests__, get_file
|
9
|
+
from nabu.testutils import utilstest, __do_long_tests__, get_file, get_dummy_nxtomo_info
|
11
10
|
from nabu.utils import indices_to_slices, merge_slices
|
12
11
|
from nabu.io.reader import EDFStackReader, NXTomoReader, NXDarksFlats
|
13
|
-
|
14
|
-
|
15
|
-
@dataclass
|
16
|
-
class SimpleNXTomoDescription:
|
17
|
-
n_darks: int = 0
|
18
|
-
n_flats1: int = 0
|
19
|
-
n_projs: int = 0
|
20
|
-
n_flats2: int = 0
|
21
|
-
n_align: int = 0
|
22
|
-
frame_shape: tuple = None
|
23
|
-
dtype: np.dtype = np.uint16
|
12
|
+
from nabu.resources.dataset_analyzer import analyze_dataset
|
24
13
|
|
25
14
|
|
26
15
|
@pytest.fixture(scope="class")
|
27
16
|
def bootstrap_nx_reader(request):
|
28
17
|
cls = request.cls
|
29
|
-
|
30
|
-
|
31
|
-
cls.nx_data_path = "entry/instrument/detector/data"
|
32
|
-
cls.data_desc = SimpleNXTomoDescription(
|
33
|
-
n_darks=10, n_flats1=11, n_projs=100, n_flats2=11, n_align=12, frame_shape=(11, 10), dtype=np.uint16
|
18
|
+
cls.nx_fname, cls.data_desc, cls.image_key, cls.projs_vals, cls.darks_vals, cls.flats1_vals, cls.flats2_vals = (
|
19
|
+
get_dummy_nxtomo_info()
|
34
20
|
)
|
35
|
-
cls.projs_vals = np.arange(cls.data_desc.n_projs) + cls.data_desc.n_flats1 + cls.data_desc.n_darks
|
36
|
-
cls.darks_vals = np.arange(cls.data_desc.n_darks)
|
37
|
-
cls.flats1_vals = np.arange(cls.data_desc.n_darks, cls.data_desc.n_darks + cls.data_desc.n_flats1)
|
38
|
-
cls.flats2_vals = np.arange(cls.data_desc.n_darks, cls.data_desc.n_darks + cls.data_desc.n_flats2)
|
39
21
|
|
40
22
|
yield
|
41
23
|
# teardown
|
@@ -45,7 +27,7 @@ def bootstrap_nx_reader(request):
|
|
45
27
|
class TestNXReader:
|
46
28
|
def test_incorrect_path(self):
|
47
29
|
with pytest.raises(FileNotFoundError):
|
48
|
-
reader = NXTomoReader("/invalid/path"
|
30
|
+
reader = NXTomoReader("/invalid/path")
|
49
31
|
with pytest.raises(KeyError):
|
50
32
|
reader = NXTomoReader(self.nx_fname, "/bad/data/path") # noqa: F841
|
51
33
|
|
@@ -53,7 +35,7 @@ class TestNXReader:
|
|
53
35
|
"""
|
54
36
|
Test NXTomoReader with simplest settings
|
55
37
|
"""
|
56
|
-
reader1 = NXTomoReader(self.nx_fname
|
38
|
+
reader1 = NXTomoReader(self.nx_fname)
|
57
39
|
data1 = reader1.load_data()
|
58
40
|
assert data1.shape == (self.data_desc.n_projs,) + self.data_desc.frame_shape
|
59
41
|
assert np.allclose(data1[:, 0, 0], self.projs_vals)
|
@@ -62,15 +44,15 @@ class TestNXReader:
|
|
62
44
|
"""
|
63
45
|
Test the data selection using "image_key".
|
64
46
|
"""
|
65
|
-
reader_projs = NXTomoReader(self.nx_fname,
|
47
|
+
reader_projs = NXTomoReader(self.nx_fname, image_key=ImageKey.PROJECTION.value)
|
66
48
|
data = reader_projs.load_data()
|
67
49
|
assert np.allclose(data[:, 0, 0], self.projs_vals)
|
68
50
|
|
69
|
-
reader_darks = NXTomoReader(self.nx_fname,
|
51
|
+
reader_darks = NXTomoReader(self.nx_fname, image_key=ImageKey.DARK_FIELD.value)
|
70
52
|
data_darks = reader_darks.load_data()
|
71
53
|
assert np.allclose(data_darks[:, 0, 0], self.darks_vals)
|
72
54
|
|
73
|
-
reader_flats = NXTomoReader(self.nx_fname,
|
55
|
+
reader_flats = NXTomoReader(self.nx_fname, image_key=ImageKey.FLAT_FIELD.value)
|
74
56
|
data_flats = reader_flats.load_data()
|
75
57
|
assert np.allclose(data_flats[:, 0, 0], np.concatenate([self.flats1_vals, self.flats2_vals]))
|
76
58
|
|
@@ -83,10 +65,10 @@ class TestNXReader:
|
|
83
65
|
def _check_correct_shape_succeeds(shape, sub_region, test_description=""):
|
84
66
|
err_msg = "Something wrong with the following test:" + test_description
|
85
67
|
data_buffer = np.zeros(shape, dtype="f")
|
86
|
-
reader1 = NXTomoReader(self.nx_fname,
|
68
|
+
reader1 = NXTomoReader(self.nx_fname, sub_region=sub_region)
|
87
69
|
data1 = reader1.load_data(output=data_buffer)
|
88
70
|
assert id(data1) == id(data_buffer), err_msg
|
89
|
-
reader2 = NXTomoReader(self.nx_fname,
|
71
|
+
reader2 = NXTomoReader(self.nx_fname, sub_region=sub_region)
|
90
72
|
data2 = reader2.load_data()
|
91
73
|
assert np.allclose(data1, data2), err_msg
|
92
74
|
|
@@ -124,7 +106,6 @@ class TestNXReader:
|
|
124
106
|
data_buffer_wrong_shape = np.zeros(wrong_shape, dtype="f")
|
125
107
|
reader = NXTomoReader(
|
126
108
|
self.nx_fname,
|
127
|
-
self.nx_data_path,
|
128
109
|
sub_region=test_case["sub_region"],
|
129
110
|
)
|
130
111
|
reader.load_data(output=data_buffer_wrong_shape)
|
@@ -148,7 +129,7 @@ class TestNXReader:
|
|
148
129
|
]
|
149
130
|
|
150
131
|
for test_case in test_cases:
|
151
|
-
reader = NXTomoReader(self.nx_fname,
|
132
|
+
reader = NXTomoReader(self.nx_fname, sub_region=test_case["sub_region"])
|
152
133
|
data = reader.load_data()
|
153
134
|
assert data.shape == test_case["expected_shape"]
|
154
135
|
assert np.allclose(data[:, 0, 0], test_case["expected_values"])
|
@@ -156,7 +137,7 @@ class TestNXReader:
|
|
156
137
|
def test_reading_with_binning_(self):
|
157
138
|
from nabu.pipeline.reader import NXTomoReaderBinning
|
158
139
|
|
159
|
-
reader_with_binning = NXTomoReaderBinning((2, 2), self.nx_fname
|
140
|
+
reader_with_binning = NXTomoReaderBinning((2, 2), self.nx_fname)
|
160
141
|
data = reader_with_binning.load_data()
|
161
142
|
assert data.shape == (self.data_desc.n_projs,) + tuple(n // 2 for n in self.data_desc.frame_shape)
|
162
143
|
|
@@ -177,7 +158,6 @@ class TestNXReader:
|
|
177
158
|
reader_distortion_corr = NXTomoReaderDistortionCorrection(
|
178
159
|
distortion_corrector,
|
179
160
|
self.nx_fname,
|
180
|
-
self.nx_data_path,
|
181
161
|
sub_region=sub_region,
|
182
162
|
)
|
183
163
|
|
@@ -220,7 +200,7 @@ class TestNXReader:
|
|
220
200
|
for test_case in test_cases:
|
221
201
|
binning = test_case.get("binning", None)
|
222
202
|
reader_cls = NXTomoReader
|
223
|
-
init_args = [self.nx_fname
|
203
|
+
init_args = [self.nx_fname]
|
224
204
|
init_kwargs = {"sub_region": test_case["sub_region"]}
|
225
205
|
if binning is not None:
|
226
206
|
reader_cls = NXTomoReaderBinning
|
@@ -231,6 +211,29 @@ class TestNXReader:
|
|
231
211
|
assert data.shape == test_case["expected_shape"], err_msg
|
232
212
|
assert np.allclose(data[:, 0, 0], test_case["expected_values"]), err_msg
|
233
213
|
|
214
|
+
def test_load_exclude_projections(self):
|
215
|
+
n_z, n_x = self.data_desc.frame_shape
|
216
|
+
# projs_idx = np.where(self.image_key == 0)[0]
|
217
|
+
projs_idx = np.arange(self.data_desc.n_projs, dtype=np.int64)
|
218
|
+
excluded_projs_idx_1 = projs_idx[10:20]
|
219
|
+
excluded_projs_idx_2 = np.concatenate([projs_idx[10:14], projs_idx[50:57]])
|
220
|
+
|
221
|
+
set_to_nparray = lambda x: np.array(sorted(list(x)))
|
222
|
+
|
223
|
+
projs_idx1 = set_to_nparray(set(projs_idx) - set(excluded_projs_idx_1))
|
224
|
+
projs_idx2 = set_to_nparray(set(projs_idx) - set(excluded_projs_idx_2))
|
225
|
+
|
226
|
+
sub_regions_to_test = (
|
227
|
+
(projs_idx1, None, None),
|
228
|
+
(projs_idx1, slice(0, n_z // 2), None),
|
229
|
+
(projs_idx2, None, None),
|
230
|
+
(projs_idx2, slice(3, n_z // 2), None),
|
231
|
+
)
|
232
|
+
for sub_region in sub_regions_to_test:
|
233
|
+
reader = NXTomoReader(self.nx_fname, sub_region=sub_region)
|
234
|
+
data = reader.load_data()
|
235
|
+
assert np.allclose(data[:, 0, 0], self.projs_vals[sub_region[0]])
|
236
|
+
|
234
237
|
|
235
238
|
@pytest.fixture(scope="class")
|
236
239
|
def bootstrap_edf_reader(request):
|
nabu/misc/utils.py
CHANGED
@@ -3,9 +3,9 @@ import numpy as np
|
|
3
3
|
|
4
4
|
def rescale_data(data, new_min, new_max, data_min=None, data_max=None):
|
5
5
|
if data_min is None:
|
6
|
-
data_min = np.
|
6
|
+
data_min = np.nanmin(data)
|
7
7
|
if data_max is None:
|
8
|
-
data_max = np.
|
8
|
+
data_max = np.nanmax(data)
|
9
9
|
return (new_max - new_min) / (data_max - data_min) * (data - data_min) + new_min
|
10
10
|
|
11
11
|
|
nabu/pipeline/estimators.py
CHANGED
@@ -690,7 +690,7 @@ class CompositeCORFinder(CORFinderBase):
|
|
690
690
|
tmp_sy, ovsd_sx = radio1.shape
|
691
691
|
assert orig_sy == tmp_sy and orig_ovsd_sx == ovsd_sx, "this should not happen"
|
692
692
|
|
693
|
-
cor_side = self.cor_options["side"]
|
693
|
+
cor_side = self._lookup_side if self._lookup_side is not None else self.cor_options["side"]
|
694
694
|
if cor_side == "center":
|
695
695
|
overlap_min = max(round(ovsd_sx - ovsd_sx / 3), 4)
|
696
696
|
overlap_max = min(round(ovsd_sx + ovsd_sx / 3), 2 * ovsd_sx - 4)
|
@@ -1,6 +1,5 @@
|
|
1
1
|
from os import path
|
2
2
|
from time import time
|
3
|
-
from math import ceil
|
4
3
|
import numpy as np
|
5
4
|
from silx.io.url import DataUrl
|
6
5
|
|
@@ -127,10 +126,10 @@ class ChunkedPipeline:
|
|
127
126
|
if len(chunk_shape) != 3:
|
128
127
|
raise ValueError("Expected chunk_shape to be a tuple of length 3 in the form (n_z, n_y, n_x)")
|
129
128
|
self.chunk_shape = tuple(int(c) for c in chunk_shape) # cast to int, as numpy.int64 can make pycuda crash
|
130
|
-
|
129
|
+
ss_start = getattr(self.process_config, "subsampling_start", 0)
|
131
130
|
# (n_a, n_z, n_x)
|
132
131
|
self.radios_shape = (
|
133
|
-
|
132
|
+
np.arange(self.chunk_shape[0])[ss_start :: self.process_config.subsampling_factor].size,
|
134
133
|
self.chunk_shape[1] // self.process_config.binning[1],
|
135
134
|
self.chunk_shape[2] // self.process_config.binning[0],
|
136
135
|
)
|
@@ -340,13 +339,28 @@ class ChunkedPipeline:
|
|
340
339
|
subs_z = None
|
341
340
|
subs_x = None
|
342
341
|
angular_sub_region = slice(*(self.sub_region[0]))
|
342
|
+
|
343
|
+
# exclude(subsample(.)) != subsample(exclude(.))
|
344
|
+
# Here we want the latter: first exclude the user-defined angular range, and then subsample the remaining indices
|
345
|
+
if len(self.dataset_info.get_excluded_projections_indices()) > 0:
|
346
|
+
angular_sub_region = np.array(
|
347
|
+
[
|
348
|
+
self.dataset_info.index_to_proj_number(i)
|
349
|
+
for i in sorted(list(self.dataset_info.projections.keys()))
|
350
|
+
]
|
351
|
+
)
|
343
352
|
if self.process_config.subsampling_factor:
|
344
353
|
subs_angles = self.process_config.subsampling_factor
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
354
|
+
start = getattr(self.process_config, "subsampling_start", 0) + self.sub_region[0][0]
|
355
|
+
if isinstance(angular_sub_region, slice):
|
356
|
+
angular_sub_region = slice(
|
357
|
+
start,
|
358
|
+
self.sub_region[0][1],
|
359
|
+
subs_angles,
|
360
|
+
)
|
361
|
+
else:
|
362
|
+
angular_sub_region = angular_sub_region[start::subs_angles]
|
363
|
+
|
350
364
|
reader_sub_region = (
|
351
365
|
angular_sub_region,
|
352
366
|
slice(*(self.sub_region[1]) + ((subs_z,) if subs_z else ())),
|
@@ -363,7 +377,7 @@ class ChunkedPipeline:
|
|
363
377
|
if self.dataset_info.kind == "nx":
|
364
378
|
self.chunk_reader = NXTomoReader(
|
365
379
|
self.dataset_info.dataset_hdf5_url.file_path(),
|
366
|
-
self.dataset_info.dataset_hdf5_url.data_path(),
|
380
|
+
data_path=self.dataset_info.dataset_hdf5_url.data_path(),
|
367
381
|
sub_region=reader_sub_region,
|
368
382
|
image_key=0,
|
369
383
|
**other_reader_kwargs,
|
@@ -865,9 +879,10 @@ class ChunkedPipeline:
|
|
865
879
|
data_vwu[i] = self.radios[:, i, :]
|
866
880
|
# ---
|
867
881
|
|
868
|
-
|
882
|
+
rec = self.reconstruction.reconstruct( # pylint: disable=E1101
|
869
883
|
data_vwu,
|
870
884
|
)
|
885
|
+
return rec.astype("f") # corrct uses float64 data
|
871
886
|
|
872
887
|
@pipeline_step("histogram", "Computing histogram")
|
873
888
|
def _compute_histogram(self, data=None):
|
@@ -103,7 +103,7 @@ nabu_config = {
|
|
103
103
|
},
|
104
104
|
"detector_distortion_correction": {
|
105
105
|
"default": "",
|
106
|
-
"help": "Apply coordinate transformation on the raw data, at the reading stage. Default (empty) is None
|
106
|
+
"help": "Apply coordinate transformation on the raw data, at the reading stage. Default (empty) is None.\n Available are: None, identity(for testing the pipeline), map_xz.\n This latter method requires two URLs being passed by detector_distortion_correction_options: map_x and map_z pointing to two 2D arrays containing the position where each pixel can be interpolated at in the raw data",
|
107
107
|
"validator": detector_distortion_correction_validator,
|
108
108
|
"type": "advanced",
|
109
109
|
},
|
@@ -12,6 +12,7 @@ from ...resources.nxflatfield import update_dataset_info_flats_darks
|
|
12
12
|
from ...resources.utils import get_quantities_and_units
|
13
13
|
from ..estimators import estimate_cor
|
14
14
|
from ..processconfig import ProcessConfigBase
|
15
|
+
from ..config_validators import convert_to_bool
|
15
16
|
from .nabu_config import nabu_config, renamed_keys
|
16
17
|
from .dataset_validator import FullFieldDatasetValidator
|
17
18
|
from nxtomo.nxobject.nxdetector import ImageKey
|
@@ -475,7 +476,7 @@ class ProcessConfig(ProcessConfigBase):
|
|
475
476
|
# Double flat field
|
476
477
|
#
|
477
478
|
# ---- COMPAT ----
|
478
|
-
if nabu_config["preproc"].get("double_flatfield_enabled", False):
|
479
|
+
if convert_to_bool(nabu_config["preproc"].get("double_flatfield_enabled", False))[0]:
|
479
480
|
deprecation_warning(
|
480
481
|
"'double_flatfield_enabled' has been renamed to 'double_flatfield'. Please update your configuration file"
|
481
482
|
)
|
@@ -367,6 +367,7 @@ class FullFieldReconstructor:
|
|
367
367
|
# overlap = ceil(delta_z * d2 / (d1 + d2)) # sqrt(2) missing ?
|
368
368
|
|
369
369
|
max_overlap = ceil(n_z * d2 / (d1 + d2)) # sqrt(2) missing ?
|
370
|
+
max_overlap = max(max_overlap, 10) # use at least 10 pixels
|
370
371
|
|
371
372
|
return (max_overlap, 0)
|
372
373
|
|
nabu/preproc/flatfield.py
CHANGED
@@ -456,10 +456,10 @@ class FlatFieldDataUrls(FlatField):
|
|
456
456
|
|
457
457
|
|
458
458
|
class PCAFlatsNormalizer:
|
459
|
-
"""This class implement a flatfield normalization based on a PCA of a series of
|
459
|
+
"""This class implement a flatfield normalization based on a PCA of a series of acquired flatfields.
|
460
460
|
The PCA decomposition is handled by a PCAFlatsDecomposer object.
|
461
461
|
|
462
|
-
This implementation was proposed by Jailin C. et al in https://
|
462
|
+
This implementation was proposed by Jailin C. et al in https://journals.iucr.org/s/issues/2017/01/00/fv5055/
|
463
463
|
|
464
464
|
Code initially written by ID11 @ ESRF staff.
|
465
465
|
Jonathan Wright - Implementation based on research paper
|
@@ -619,7 +619,7 @@ class PCAFlatsDecomposer:
|
|
619
619
|
"""This class implements a PCA decomposition of a serie of acquired flatfields.
|
620
620
|
The PCA decomposition is used to normalize the projections through a PCAFLatNormalizer object.
|
621
621
|
|
622
|
-
This implementation was proposed by Jailin C. et al in https://
|
622
|
+
This implementation was proposed by Jailin C. et al in https://journals.iucr.org/s/issues/2017/01/00/fv5055/
|
623
623
|
|
624
624
|
Code initially written by ID11 @ ESRF staff.
|
625
625
|
Jonathan Wright - Implementation based on research paper
|
@@ -0,0 +1,30 @@
|
|
1
|
+
import numpy as np
|
2
|
+
|
3
|
+
|
4
|
+
class IterativeBase:
|
5
|
+
|
6
|
+
backend = None # placeholder
|
7
|
+
implementation = None # placeholder
|
8
|
+
|
9
|
+
default_extra_options = {
|
10
|
+
"axis_correction": None,
|
11
|
+
"centered_axis": False,
|
12
|
+
"clip_outer_circle": False,
|
13
|
+
"scale_factor": None,
|
14
|
+
"outer_circle_value": 0.0,
|
15
|
+
}
|
16
|
+
|
17
|
+
backend_processing_class = ProcessingBase
|
18
|
+
|
19
|
+
def __init__(
|
20
|
+
self,
|
21
|
+
sino_shape,
|
22
|
+
angles=None,
|
23
|
+
rot_center=None,
|
24
|
+
halftomo=False,
|
25
|
+
filter_name=None,
|
26
|
+
slice_roi=None,
|
27
|
+
scale_factor=None,
|
28
|
+
extra_options=None,
|
29
|
+
backend_options=None,
|
30
|
+
): ...
|
@@ -396,6 +396,11 @@ class EDFDatasetAnalyzer(DatasetAnalyzer):
|
|
396
396
|
def scan_dirname(self):
|
397
397
|
return self.dataset_scanner.path
|
398
398
|
|
399
|
+
def get_excluded_projections_indices(self, including_other_frames_types=True):
|
400
|
+
if not (including_other_frames_types):
|
401
|
+
raise NotImplementedError
|
402
|
+
return self.dataset_scanner.get_ignored_projection_indices()
|
403
|
+
|
399
404
|
|
400
405
|
class HDF5DatasetAnalyzer(DatasetAnalyzer):
|
401
406
|
"""
|
@@ -606,6 +611,36 @@ class HDF5DatasetAnalyzer(DatasetAnalyzer):
|
|
606
611
|
def get_frame(self, idx):
|
607
612
|
return get_data(self.dataset_scanner.frames[idx].url)
|
608
613
|
|
614
|
+
def get_frames_indices(self, frame_type):
|
615
|
+
return self._select_according_to_frame_type(np.arange(self.dataset_scanner.image_key_control.size), frame_type)
|
616
|
+
|
617
|
+
def index_to_proj_number(self, proj_index):
|
618
|
+
"""
|
619
|
+
Return the projection *number*, from its frame *index*.
|
620
|
+
|
621
|
+
For example if there are 11 flats before projections,
|
622
|
+
then projections will have indices [11, 12, .....] (possibly not contiguous)
|
623
|
+
while their number is [0, 1, ..., ] (contiguous, starts from 0)
|
624
|
+
"""
|
625
|
+
all_projs_indices = self.get_frames_indices("projection")
|
626
|
+
return search_sorted(all_projs_indices, proj_index)
|
627
|
+
|
628
|
+
def get_excluded_projections_indices(self, including_other_frames_types=True):
|
629
|
+
# Get indices of ALL projections (even excluded ones)
|
630
|
+
# the index accounts for flats/darks !
|
631
|
+
# Get indices of excluded projs (again, accounting for flats/darks)
|
632
|
+
ignored_projs_indices = self.dataset_scanner.get_ignored_projection_indices()
|
633
|
+
ignored_projs_indices = [
|
634
|
+
idx for idx in ignored_projs_indices if self.dataset_scanner.frames[idx].is_control is False
|
635
|
+
]
|
636
|
+
if including_other_frames_types:
|
637
|
+
return ignored_projs_indices
|
638
|
+
# Get indices of excluded projs, now relative to the pure projections stack
|
639
|
+
ignored_projs_indices_rel = [
|
640
|
+
self.index_to_proj_number(ignored_proj_idx_abs) for ignored_proj_idx_abs in ignored_projs_indices
|
641
|
+
]
|
642
|
+
return ignored_projs_indices_rel
|
643
|
+
|
609
644
|
|
610
645
|
def get_angle_at_index(all_angles, index):
|
611
646
|
"""
|
@@ -0,0 +1,37 @@
|
|
1
|
+
import pytest
|
2
|
+
import numpy as np
|
3
|
+
from nabu.testutils import get_dummy_nxtomo_info
|
4
|
+
from nabu.resources.dataset_analyzer import analyze_dataset
|
5
|
+
|
6
|
+
|
7
|
+
@pytest.fixture(scope="class")
|
8
|
+
def bootstrap_nx(request):
|
9
|
+
cls = request.cls
|
10
|
+
cls.nx_fname, cls.data_desc, cls.image_key, cls.projs_vals, cls.darks_vals, cls.flats1_vals, cls.flats2_vals = (
|
11
|
+
get_dummy_nxtomo_info()
|
12
|
+
)
|
13
|
+
|
14
|
+
|
15
|
+
@pytest.mark.usefixtures("bootstrap_nx")
|
16
|
+
class TestNXDataset:
|
17
|
+
|
18
|
+
def test_exclude_projs_angular_range(self):
|
19
|
+
dataset_info_with_all_projs = analyze_dataset(self.nx_fname)
|
20
|
+
|
21
|
+
# Test exclude angular range - angles min and max in degrees
|
22
|
+
angular_ranges_to_test = [(0, 15), (5, 6), (50, 58.5)]
|
23
|
+
for angular_range in angular_ranges_to_test:
|
24
|
+
angle_min, angle_max = angular_range
|
25
|
+
dataset_info = analyze_dataset(
|
26
|
+
self.nx_fname,
|
27
|
+
extra_options={"exclude_projections": {"type": "angular_range", "range": [angle_min, angle_max]}},
|
28
|
+
)
|
29
|
+
excluded_projs_indices = dataset_info.get_excluded_projections_indices()
|
30
|
+
# Check that get_excluded_projections_indices() angles are correct
|
31
|
+
for excluded_proj_index in excluded_projs_indices:
|
32
|
+
frame_angle_deg = dataset_info.dataset_scanner.frames[excluded_proj_index].rotation_angle
|
33
|
+
assert angle_min <= frame_angle_deg and frame_angle_deg <= angle_max
|
34
|
+
|
35
|
+
assert set(dataset_info_with_all_projs.projections.keys()) - set(dataset_info.projections.keys()) == set(
|
36
|
+
excluded_projs_indices
|
37
|
+
)
|
nabu/testutils.py
CHANGED
@@ -1,3 +1,4 @@
|
|
1
|
+
from dataclasses import dataclass
|
1
2
|
from itertools import product
|
2
3
|
import tarfile
|
3
4
|
import os
|
@@ -5,6 +6,7 @@ import numpy as np
|
|
5
6
|
from scipy.signal.windows import gaussian
|
6
7
|
from silx.resources import ExternalResources
|
7
8
|
from silx.io.dictdump import nxtodict, dicttonx
|
9
|
+
from nxtomo.application.nxtomo import ImageKey
|
8
10
|
|
9
11
|
utilstest = ExternalResources(
|
10
12
|
project="nabu", url_base="http://www.silx.org/pub/nabu/data/", env_key="NABU_DATA", timeout=60
|
@@ -56,6 +58,38 @@ def get_data(*dataset_path):
|
|
56
58
|
return np.load(dataset_downloaded_path)
|
57
59
|
|
58
60
|
|
61
|
+
@dataclass
|
62
|
+
class SimpleNXTomoDescription:
|
63
|
+
n_darks: int = 0
|
64
|
+
n_flats1: int = 0
|
65
|
+
n_projs: int = 0
|
66
|
+
n_flats2: int = 0
|
67
|
+
n_align: int = 0
|
68
|
+
frame_shape: tuple = None
|
69
|
+
dtype: np.dtype = np.uint16
|
70
|
+
|
71
|
+
|
72
|
+
def get_dummy_nxtomo_info():
|
73
|
+
nx_fname = utilstest.getfile("dummy_nxtomo.nx")
|
74
|
+
data_desc = SimpleNXTomoDescription(
|
75
|
+
n_darks=10, n_flats1=11, n_projs=100, n_flats2=11, n_align=12, frame_shape=(11, 10), dtype=np.uint16
|
76
|
+
)
|
77
|
+
image_key = np.concatenate(
|
78
|
+
[
|
79
|
+
np.zeros(data_desc.n_darks, dtype=np.int32) + ImageKey.DARK_FIELD.value,
|
80
|
+
np.zeros(data_desc.n_flats1, dtype=np.int32) + ImageKey.FLAT_FIELD.value,
|
81
|
+
np.zeros(data_desc.n_projs, dtype=np.int32) + ImageKey.PROJECTION.value,
|
82
|
+
np.zeros(data_desc.n_flats2, dtype=np.int32) + ImageKey.FLAT_FIELD.value,
|
83
|
+
np.zeros(data_desc.n_align, dtype=np.int32) + ImageKey.ALIGNMENT.value,
|
84
|
+
]
|
85
|
+
)
|
86
|
+
projs_vals = np.arange(data_desc.n_projs) + data_desc.n_flats1 + data_desc.n_darks
|
87
|
+
darks_vals = np.arange(data_desc.n_darks)
|
88
|
+
flats1_vals = np.arange(data_desc.n_darks, data_desc.n_darks + data_desc.n_flats1)
|
89
|
+
flats2_vals = np.arange(data_desc.n_darks, data_desc.n_darks + data_desc.n_flats2)
|
90
|
+
return nx_fname, data_desc, image_key, projs_vals, darks_vals, flats1_vals, flats2_vals
|
91
|
+
|
92
|
+
|
59
93
|
def get_array_of_given_shape(img, shape, dtype):
|
60
94
|
"""
|
61
95
|
From a given image, returns an array of the wanted shape and dtype.
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.4
|
2
2
|
Name: nabu
|
3
|
-
Version: 2025.1.
|
3
|
+
Version: 2025.1.1
|
4
4
|
Summary: Nabu - Tomography software
|
5
5
|
Author-email: Pierre Paleo <pierre.paleo@esrf.fr>, Henri Payno <henri.payno@esrf.fr>, Alessandro Mirone <mirone@esrf.fr>, Jérôme Lesaint <jerome.lesaint@esrf.fr>
|
6
6
|
Maintainer-email: Pierre Paleo <pierre.paleo@esrf.fr>
|
@@ -1,10 +1,9 @@
|
|
1
1
|
doc/conf.py,sha256=3xtCarCHrXPr50GbeRDuH-o3Jzojw7mpr7vpGfZPLAE,3787
|
2
2
|
doc/create_conf_doc.py,sha256=IVOdP70KvbW9WS_UQu3Iyd0YfS60E2fJ5IDtQ_s4cDw,1143
|
3
|
-
doc/doc_config.py,sha256=anqeOVjqE2e7eVzg7yuh9dvIneTkrA5doGl1cVBqT7Q,730
|
4
3
|
doc/get_mathjax.py,sha256=VIvKRCdDuF2VoY8JD3mSey9XX13AZMmwTJBHdt1tUs4,1012
|
5
|
-
nabu/__init__.py,sha256=
|
4
|
+
nabu/__init__.py,sha256=cquIT6AKfmVtn7EkXyjYjG8hLJ0tMLDKmD2vmBh_vnY,270
|
6
5
|
nabu/tests.py,sha256=hOJD1GGxn_KE1bWMoxfjnjzI7d9JBUpoc9B2_tVFiEk,1370
|
7
|
-
nabu/testutils.py,sha256=
|
6
|
+
nabu/testutils.py,sha256=4I62IP3VLOJx8JvGBgY1t4i4CiJMWfT_aUopxg39JIM,10047
|
8
7
|
nabu/utils.py,sha256=tJI64BNXMhD6W293fwwcgf9bvTalYG_5AwVGYkgi6tU,27179
|
9
8
|
nabu/app/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
10
9
|
nabu/app/bootstrap.py,sha256=3yLZJmrmQBmPJMBtE2ih2cspfqOy5T_UN2U8B3i_hkI,3266
|
@@ -70,7 +69,7 @@ nabu/estimation/cor.py,sha256=8RsyZ_MYT04EWvP2tLThftn_bD3cz9-uWo8D5FpXLKQ,50353
|
|
70
69
|
nabu/estimation/cor_sino.py,sha256=qN6y16UVqoDX696JYyn3iWXDxQo0FMcFTuGbT92BW_s,18959
|
71
70
|
nabu/estimation/distortion.py,sha256=DEXizQpgHBXmrhbQ0kuEchicxmiDgmU2qrh8bCgSezg,4701
|
72
71
|
nabu/estimation/focus.py,sha256=CdtMFk6Xt4qq1JwwKDmDcVCeVocz-mpV0saikO0B1mc,17995
|
73
|
-
nabu/estimation/motion.py,sha256=
|
72
|
+
nabu/estimation/motion.py,sha256=5p6Tsea-77Kc9QaYKkD33aQ_ZA5mSO1arHQ4tjclEJ0,26927
|
74
73
|
nabu/estimation/tilt.py,sha256=6R8l8gnf00m-xdgLhvQmkvZ3lIO_8hDglNsFjzlzZ4E,8834
|
75
74
|
nabu/estimation/translation.py,sha256=qoxT8VT38TZqrqLRwOef7-wBE5OeEObp_Qy9T2De2Do,10184
|
76
75
|
nabu/estimation/utils.py,sha256=31d17Ng__NxcLOtGXPmbPPW1veh1m0poCvRgDCJssUA,347
|
@@ -82,15 +81,15 @@ nabu/estimation/tests/test_motion_estimation.py,sha256=WKUpkVand8VszoBIvsaEnhKqv
|
|
82
81
|
nabu/estimation/tests/test_tilt.py,sha256=KIgTJqQvNfWndm8f3aRSdznWFl3AdQhYXiZPKLseYOs,1672
|
83
82
|
nabu/estimation/tests/test_translation.py,sha256=RkOnCYgk9DZGKlIka1snqTv4wbIz_nG7-EHAxnBHsJU,2999
|
84
83
|
nabu/io/__init__.py,sha256=AbQgj4-fCCHOKynO_PyAR9ejnFSuWKgroxxhxWVpjyQ,120
|
85
|
-
nabu/io/cast_volume.py,sha256=
|
84
|
+
nabu/io/cast_volume.py,sha256=y7MRsrC5WBbY0BtlnqQJWxByEP43Gq1qrq969bADGBI,22220
|
86
85
|
nabu/io/detector_distortion.py,sha256=qO1Z6gejkBrixThvU_sLwH3UfLAe8aAO63YQ8z7PH78,11750
|
87
|
-
nabu/io/reader.py,sha256=
|
86
|
+
nabu/io/reader.py,sha256=CRQfzLqG10QNzKpPY1j3z2toEAti2bNgImMwWnHQfVE,41775
|
88
87
|
nabu/io/reader_helical.py,sha256=q3LOmu6F_4Uxi3rZZWJ-rsix2Lgu_saXXdiJF8TLi24,4533
|
89
88
|
nabu/io/utils.py,sha256=iSeBhOIcLlKgiHXdDLfAdyvSrgdv6I5iQUcjT2gv_WQ,9303
|
90
89
|
nabu/io/writer.py,sha256=0bZ2X0hvT-r_2Uu2u3fIfXZH7clxKIDhBwHmBS0ZcxM,15811
|
91
90
|
nabu/io/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
92
91
|
nabu/io/tests/test_cast_volume.py,sha256=UGrIYVp1fxm372YFJinLWAL-xCtcR731axYchhonZHY,10774
|
93
|
-
nabu/io/tests/test_readers.py,sha256=
|
92
|
+
nabu/io/tests/test_readers.py,sha256=70vD7qPuhVjZnebDAR9zrXjk1cD65bCH_F_5xb0JfbA,18252
|
94
93
|
nabu/io/tests/test_remove_volume.py,sha256=q632Rq0qLneDcJLHGWCR8HYlvdnTvaEEyTQAsNv1ggM,5967
|
95
94
|
nabu/io/tests/test_writers.py,sha256=EJp3DKeaRko7FVTgGdjrl2tt6jES228_XT5Jb767I0w,3137
|
96
95
|
nabu/misc/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
@@ -110,7 +109,7 @@ nabu/misc/transpose.py,sha256=ogJ1PPYO0sOPUfCgjk-Ho5cTjlBbP-KXGqhCgTj75DY,223
|
|
110
109
|
nabu/misc/unsharp.py,sha256=3xYsdiLTqTDlE8G-tIY7EeTf3nKxPrvMHOnXgkcKWvU,209
|
111
110
|
nabu/misc/unsharp_cuda.py,sha256=-csDxfQt_naYn5O8fOxksYNqyFKxbF8lV7yXlPd2XoM,235
|
112
111
|
nabu/misc/unsharp_opencl.py,sha256=HCPoobPwi2LEAfqGQ2qekjQi6G2VDKmqJQZTDXySv1Y,252
|
113
|
-
nabu/misc/utils.py,sha256=
|
112
|
+
nabu/misc/utils.py,sha256=b0JlnRmhEyx-9SBycRGQxPqNa25uTZqOFtGOF3Fyxdg,3904
|
114
113
|
nabu/misc/tests/__init__.py,sha256=AbpHGcgLb-kRsJGnwFEktk7uzpZOCcBY74-YBdrKVGs,1
|
115
114
|
nabu/misc/tests/test_binning.py,sha256=vcZZINi_v_KOw4DMqLIgPlF-CwLTG102Yf7dMQ5_SEo,1890
|
116
115
|
nabu/misc/tests/test_interpolation.py,sha256=H8ZJ7dppCcpxVa9wOBkJ9U0mymH5lGyCJ0LZCYIK8PE,2389
|
@@ -135,21 +134,21 @@ nabu/pipeline/config_validators.py,sha256=Wx9nnUpfxTJ8H2ODkUyjImTtJS9V68TafLH1Te
|
|
135
134
|
nabu/pipeline/datadump.py,sha256=tKW-Dl28FU2vf1OuIie_YI1-GT1DMreis57sOR-DuGU,7026
|
136
135
|
nabu/pipeline/dataset_validator.py,sha256=etQw9NC_YGsdWCgjsn8aJ3WfvcRuJlLVZlWoqhvvo-8,9263
|
137
136
|
nabu/pipeline/detector_distortion_provider.py,sha256=ru1AxbcuO-FA8FYooPBWgp1lzdSDUtzFUC1A_sS8jME,920
|
138
|
-
nabu/pipeline/estimators.py,sha256=
|
137
|
+
nabu/pipeline/estimators.py,sha256=GhBs5bW4J2ZgDK30Pgg-hcb-B6SYQUPIP5sZmQhBpYA,51842
|
139
138
|
nabu/pipeline/params.py,sha256=UKMQWFQnrlNMW5aIGty-JoGmBdkS6tpoAXCjW8n6FX8,4229
|
140
139
|
nabu/pipeline/processconfig.py,sha256=3xx2Lc8uEzPAqSMwUncr4RCiCtKn2c7wnXXbPSn8GNo,7719
|
141
140
|
nabu/pipeline/reader.py,sha256=wkxPHYOi_C8dHNc7kddB8AMtFuW7GjsP_tm6SJeHlEY,4792
|
142
141
|
nabu/pipeline/utils.py,sha256=5GGhT9Wu7tHDlF3w7YNjTTYkNBl5xHa9EcRZSGFUWtM,3538
|
143
142
|
nabu/pipeline/writer.py,sha256=NVeAtkWDtXg5UJ4C3wsbkfM23ZnK64atCWl8tjmjsuY,8166
|
144
143
|
nabu/pipeline/fullfield/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
145
|
-
nabu/pipeline/fullfield/chunked.py,sha256=
|
144
|
+
nabu/pipeline/fullfield/chunked.py,sha256=5dGoP_vCajI7kl2b9AysufG6DV1gdX9uvF0THrhNDaA,43330
|
146
145
|
nabu/pipeline/fullfield/chunked_cuda.py,sha256=US5prrhNjsx3QVHkY5duQp8uFcGdgYEPzVS7nfWkJRw,6047
|
147
146
|
nabu/pipeline/fullfield/computations.py,sha256=uqf7LvuDPm7n51BpP8eb8vTewDgRFyzSDP249g3FWBE,10098
|
148
147
|
nabu/pipeline/fullfield/dataset_validator.py,sha256=HK_bmlII9pc59PXCgKJOyLv7Xu3DYv_jbH3RmQSgzvI,2933
|
149
148
|
nabu/pipeline/fullfield/get_double_flatfield.py,sha256=uYFDAii6Nw4RCUQO_6Id6tXLdmtVbj_pxAHQWennSeE,5411
|
150
|
-
nabu/pipeline/fullfield/nabu_config.py,sha256=
|
151
|
-
nabu/pipeline/fullfield/processconfig.py,sha256=
|
152
|
-
nabu/pipeline/fullfield/reconstruction.py,sha256=
|
149
|
+
nabu/pipeline/fullfield/nabu_config.py,sha256=XcvPpJjyTmp8QSWK1lcNWRa1ybL0AQ0poj3O_CgDYtM,33150
|
150
|
+
nabu/pipeline/fullfield/processconfig.py,sha256=72hjxgClKcxmzypVpvcWzkzoXP7Ypu5VpRxMjYQVnJQ,38461
|
151
|
+
nabu/pipeline/fullfield/reconstruction.py,sha256=Fi3GCsVjO3CpYIWZM-4i6S2UOzG7iqVeukQ375j09Pg,38075
|
153
152
|
nabu/pipeline/helical/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
154
153
|
nabu/pipeline/helical/dataset_validator.py,sha256=HdKjUSj3PIpJb1dKSzJg8s4zXbAnMPWaPn8kvp_xQEs,657
|
155
154
|
nabu/pipeline/helical/fbp.py,sha256=MKn587bO5Lj7yFu-Sll2RkOIY5r3rC-fmC10SzFU5i0,5841
|
@@ -177,7 +176,7 @@ nabu/preproc/distortion.py,sha256=XksQNrrSBfZS7mlvIdVEMZjw839ppQWP6AitTLcgfb0,33
|
|
177
176
|
nabu/preproc/double_flatfield.py,sha256=WcYsNuotgQgm_KaioNa3OVI8rGfk3Wrn_YCW5v4mo4w,7895
|
178
177
|
nabu/preproc/double_flatfield_cuda.py,sha256=lqgvZyeujdWJ5nF_GNRMQx7punjqA3SZ8K3IIyL3HDY,6197
|
179
178
|
nabu/preproc/double_flatfield_variable_region.py,sha256=yiyvfGLFv3b93aKzHw84EQszPwQHfBv0PqtlQ8khvm4,2258
|
180
|
-
nabu/preproc/flatfield.py,sha256=
|
179
|
+
nabu/preproc/flatfield.py,sha256=t0NaV0NYaUAKtfWAop2srhL7wrfw6sGPMUAjs--LE3k,30574
|
181
180
|
nabu/preproc/flatfield_cuda.py,sha256=Iiqv7bHa870DZOH68L19xiN1kG9I9JXuckFfA3khGtY,5482
|
182
181
|
nabu/preproc/flatfield_variable_region.py,sha256=RVmSW515vgkHagjqotPNPUe97oQooHgdqkBn6hPH_2Q,3142
|
183
182
|
nabu/preproc/phase.py,sha256=nRFhnHN_Bmmu5AHDcoO-Kt59sXYFSQaTljHZ5dlZiA0,13857
|
@@ -228,7 +227,6 @@ nabu/processing/tests/test_rotation.py,sha256=5O1yHthJfdoP-2loXob96j_V2IwI2eb8ro
|
|
228
227
|
nabu/processing/tests/test_transpose.py,sha256=hTG17wTaB5Wv6twbW3ZFhBv6BYfqJY7DTQPoO0-KdkM,2760
|
229
228
|
nabu/processing/tests/test_unsharp.py,sha256=R3ovbwDDp3ccy2A8t6CcUVELXRWkED5EnQdN2FQOfQM,4391
|
230
229
|
nabu/reconstruction/__init__.py,sha256=EmKVvx_-FJvzJngG4ielIC7FhMCpI1Waaflg_lF44tk,163
|
231
|
-
nabu/reconstruction/astra.py,sha256=qnFYabU-Bzgys8hXjIBcwO2NazrvhNXUYFIkMHc6BmM,10444
|
232
230
|
nabu/reconstruction/cone.py,sha256=tSjaMDHeFV-h_IFbxUqSbhqlWmvlBcJQ8u89Y9Q9gg8,20559
|
233
231
|
nabu/reconstruction/fbp.py,sha256=ptHcQsZTxgMFa9PhFJeTzDekibWR-P1BUj2SvRrk770,5684
|
234
232
|
nabu/reconstruction/fbp_base.py,sha256=usd49ctQMI5w6uU5xn8qBsN7gI95iU9a3jRZgSPmOJk,18653
|
@@ -237,6 +235,7 @@ nabu/reconstruction/filtering.py,sha256=monJnA_kk9k_Gy7bMAos9I-XgU8czWhf9vBth6ik
|
|
237
235
|
nabu/reconstruction/filtering_cuda.py,sha256=_S-BZMhtnNt8ugePSmf-LF7JvMPCOyGPUMSseymgwZw,4019
|
238
236
|
nabu/reconstruction/filtering_opencl.py,sha256=v-sUzbnRp6M1B221F-iSh-crBCGknjHYYsjFs05VhDY,3788
|
239
237
|
nabu/reconstruction/hbp.py,sha256=Qll7i20LWxUo1-SHRxemkYAolBTP8HScwt1OvWmD2r0,18642
|
238
|
+
nabu/reconstruction/iterative.py,sha256=urZg_Aj3P2wGfsbjlyvmOvf2yd-kXNt_f4Sjlj6oxhE,637
|
240
239
|
nabu/reconstruction/mlem.py,sha256=wgC2pKl6RKB-f2318worB9VE-qCGoQcz24aKbtkrJos,5794
|
241
240
|
nabu/reconstruction/projection.py,sha256=SNocaOY9HuNiHs-VxkW9IS707JPJnd3sDjAbf7aIw2E,9081
|
242
241
|
nabu/reconstruction/reconstructor.py,sha256=16xxHcK4iie-uh-trf6x_IuvgxJKBvQRTE5B8tnc4F8,7358
|
@@ -258,7 +257,7 @@ nabu/reconstruction/tests/test_reconstructor.py,sha256=xzfEM0j48ScQoGqWVcAK74HG9
|
|
258
257
|
nabu/reconstruction/tests/test_sino_normalization.py,sha256=qNnpVUp3UcFGyLlSP0rCzE5hxdV6YENL9AF6mo72WcQ,3669
|
259
258
|
nabu/resources/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
260
259
|
nabu/resources/cor.py,sha256=-mcrTbj3G7o4PP5E_gIRo2j6_-ADmMkkOc_0CyQv84c,170
|
261
|
-
nabu/resources/dataset_analyzer.py,sha256=
|
260
|
+
nabu/resources/dataset_analyzer.py,sha256=X52X2YAOGBSMRfWko4Z2-oL_VePpc27A1GZbbTAk5yI,25739
|
262
261
|
nabu/resources/gpu.py,sha256=oQA8PpPdyuIzpxq1PwVd9gJdyCiLIry2godUV1AbPW0,5769
|
263
262
|
nabu/resources/logger.py,sha256=xV9UoLZBw3wXAWYfOgqrnOtzJc9aC1VNO4LM2cHxWJg,3738
|
264
263
|
nabu/resources/nxflatfield.py,sha256=kgHPf_jGQiuJ_EFe8fladRkx_PM6PfdRtcf5Tf2cYgU,12385
|
@@ -271,6 +270,7 @@ nabu/resources/templates/id16_holo.conf,sha256=sDd_rEJGZjOGVAsGub5sT2arfXDnc_sxy
|
|
271
270
|
nabu/resources/templates/id16a_fluo.conf,sha256=Nz1etzO2fSwksi7CThWJ5T1kZEdyBe8rMO7puNJ93Hc,542
|
272
271
|
nabu/resources/templates/id19_pag.conf,sha256=u4fFPEBprzOW9_5_ChkIgowQcYpLhjmA8Gwm5XgC4Jc,384
|
273
272
|
nabu/resources/tests/__init__.py,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
|
273
|
+
nabu/resources/tests/test_dataset_analyzer.py,sha256=qp0cCwZfZMITWNDvxa-q84xpfUyFnQygDwCLN7QMSOs,1590
|
274
274
|
nabu/resources/tests/test_extract.py,sha256=6ufLTc4Wgf9-FLsscpG2TbUDEn767iLVdyOrwRQyI9A,443
|
275
275
|
nabu/resources/tests/test_nxflatfield.py,sha256=XCiFULzNyApdhr89a3a3BseJMPUN4JnzxQ8VdBaA8ac,4225
|
276
276
|
nabu/resources/tests/test_units.py,sha256=F2jFTck-1UwYET1MwTtX6ntzYUosfwOJkugSencGgz8,2155
|
@@ -319,9 +319,9 @@ nabu/thirdparty/pore3d_deringer_munch.py,sha256=o4bisnFc-wMjuohWBT8wgWmfNehPQGtC
|
|
319
319
|
nabu/thirdparty/tomocupy_remove_stripe.py,sha256=Khe4zFf0kRzu65Yxnvq58gt1ljOztqJGdMDhVAiM7lM,24363
|
320
320
|
nabu/thirdparty/tomopy_phase.py,sha256=hK4oPpkogLOhv23XzzEXQY2u3r8fJvASY_bINVs6ERE,8634
|
321
321
|
nabu/thirdparty/tomwer_load_flats_darks.py,sha256=ZNoVAinUb_wGYbfvs_4BVnWsjsQmNxSvCh1bWhR2WWg,5611
|
322
|
-
nabu-2025.1.
|
323
|
-
nabu-2025.1.
|
324
|
-
nabu-2025.1.
|
325
|
-
nabu-2025.1.
|
326
|
-
nabu-2025.1.
|
327
|
-
nabu-2025.1.
|
322
|
+
nabu-2025.1.1.dist-info/licenses/LICENSE,sha256=1eAIPSnEsnSFNUODnLtNtQTs76exG3ZxJ1DJR6zoUBA,1066
|
323
|
+
nabu-2025.1.1.dist-info/METADATA,sha256=3wyX7ZF2Sqn5Z_w80g6_dvSEeT6Uct06l8DvCoYRxpA,4271
|
324
|
+
nabu-2025.1.1.dist-info/WHEEL,sha256=_zCd3N1l69ArxyTb8rzEoP9TpbYXkqRFSNOD5OuxnTs,91
|
325
|
+
nabu-2025.1.1.dist-info/entry_points.txt,sha256=YxzCY5CNQ1XHrIGbRKg-BgC1Jy7QaCITdITpyhhxpZU,1338
|
326
|
+
nabu-2025.1.1.dist-info/top_level.txt,sha256=fsm_N3eXLRZk2QXF9OSKPNDPFXOz8FAQjHh5avT3dok,9
|
327
|
+
nabu-2025.1.1.dist-info/RECORD,,
|
doc/doc_config.py
DELETED
@@ -1,32 +0,0 @@
|
|
1
|
-
#!/usr/bin/env python
|
2
|
-
|
3
|
-
from nabu.resources.nabu_config import nabu_config
|
4
|
-
|
5
|
-
|
6
|
-
def generate(file_):
|
7
|
-
def write(content):
|
8
|
-
print(content, file=file_)
|
9
|
-
for section, values in nabu_config.items():
|
10
|
-
if section == "about":
|
11
|
-
continue
|
12
|
-
write("## %s\n" % section)
|
13
|
-
for key, val in values.items():
|
14
|
-
if val["type"] == "unsupported":
|
15
|
-
continue
|
16
|
-
write(val["help"] + "\n")
|
17
|
-
write(
|
18
|
-
"```ini\n%s = %s\n```"
|
19
|
-
% (key, val["default"])
|
20
|
-
)
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
if __name__ == "__main__":
|
25
|
-
|
26
|
-
import sys, os
|
27
|
-
print(os.path.abspath(__file__))
|
28
|
-
exit(0)
|
29
|
-
|
30
|
-
fname = "/tmp/test.md"
|
31
|
-
with open(fname, "w") as f:
|
32
|
-
generate(f)
|
nabu/reconstruction/astra.py
DELETED
@@ -1,245 +0,0 @@
|
|
1
|
-
# ruff: noqa
|
2
|
-
try:
|
3
|
-
import astra
|
4
|
-
|
5
|
-
__have_astra__ = True
|
6
|
-
except ImportError:
|
7
|
-
__have_astra__ = False
|
8
|
-
astra = None
|
9
|
-
|
10
|
-
|
11
|
-
class AstraReconstructor:
|
12
|
-
"""
|
13
|
-
Base class for reconstructors based on the Astra toolbox
|
14
|
-
"""
|
15
|
-
|
16
|
-
default_extra_options = {
|
17
|
-
"axis_correction": None,
|
18
|
-
"clip_outer_circle": False,
|
19
|
-
"scale_factor": None,
|
20
|
-
"filter_cutoff": 1.0,
|
21
|
-
"outer_circle_value": 0.0,
|
22
|
-
}
|
23
|
-
|
24
|
-
def __init__(
|
25
|
-
self,
|
26
|
-
sinos_shape,
|
27
|
-
angles=None,
|
28
|
-
volume_shape=None,
|
29
|
-
rot_center=None,
|
30
|
-
pixel_size=None,
|
31
|
-
padding_mode="zeros",
|
32
|
-
filter_name=None,
|
33
|
-
slice_roi=None,
|
34
|
-
cuda_options=None,
|
35
|
-
extra_options=None,
|
36
|
-
):
|
37
|
-
self._configure_extra_options(extra_options)
|
38
|
-
self._init_cuda(cuda_options)
|
39
|
-
self._set_sino_shape(sinos_shape)
|
40
|
-
self._orig_prog_geom = None
|
41
|
-
self._init_geometry(
|
42
|
-
source_origin_dist,
|
43
|
-
origin_detector_dist,
|
44
|
-
pixel_size,
|
45
|
-
angles,
|
46
|
-
volume_shape,
|
47
|
-
rot_center,
|
48
|
-
relative_z_position,
|
49
|
-
slice_roi,
|
50
|
-
)
|
51
|
-
self._init_fdk(padding_mode, filter_name)
|
52
|
-
self._alg_id = None
|
53
|
-
self._vol_id = None
|
54
|
-
self._proj_id = None
|
55
|
-
|
56
|
-
def _configure_extra_options(self, extra_options):
|
57
|
-
self.extra_options = self.default_extra_options.copy()
|
58
|
-
self.extra_options.update(extra_options or {})
|
59
|
-
|
60
|
-
def _init_cuda(self, cuda_options):
|
61
|
-
cuda_options = cuda_options or {}
|
62
|
-
self.cuda = CudaProcessing(**cuda_options)
|
63
|
-
|
64
|
-
def _set_sino_shape(self, sinos_shape):
|
65
|
-
if len(sinos_shape) != 3:
|
66
|
-
raise ValueError("Expected a 3D shape")
|
67
|
-
self.sinos_shape = sinos_shape
|
68
|
-
self.n_sinos, self.n_angles, self.prj_width = sinos_shape
|
69
|
-
|
70
|
-
def _set_pixel_size(self, pixel_size):
|
71
|
-
if pixel_size is None:
|
72
|
-
det_spacing_y = det_spacing_x = 1
|
73
|
-
elif np.iterable(pixel_size):
|
74
|
-
det_spacing_y, det_spacing_x = pixel_size
|
75
|
-
else:
|
76
|
-
# assuming scalar
|
77
|
-
det_spacing_y = det_spacing_x = pixel_size
|
78
|
-
self._det_spacing_y = det_spacing_y
|
79
|
-
self._det_spacing_x = det_spacing_x
|
80
|
-
|
81
|
-
def _set_slice_roi(self, slice_roi):
|
82
|
-
self.slice_roi = slice_roi
|
83
|
-
self._vol_geom_n_x = self.n_x
|
84
|
-
self._vol_geom_n_y = self.n_y
|
85
|
-
self._crop_data = True
|
86
|
-
if slice_roi is None:
|
87
|
-
return
|
88
|
-
start_x, end_x, start_y, end_y = slice_roi
|
89
|
-
if roi_is_centered(self.volume_shape[1:], (slice(start_y, end_y), slice(start_x, end_x))):
|
90
|
-
# Astra can only reconstruct subregion centered around the origin
|
91
|
-
self._vol_geom_n_x = self.n_x - start_x * 2
|
92
|
-
self._vol_geom_n_y = self.n_y - start_y * 2
|
93
|
-
else:
|
94
|
-
raise NotImplementedError(
|
95
|
-
"Astra supports only slice_roi centered around origin (got slice_roi=%s with n_x=%d, n_y=%d)"
|
96
|
-
% (str(slice_roi), self.n_x, self.n_y)
|
97
|
-
)
|
98
|
-
|
99
|
-
def _init_geometry(
|
100
|
-
self,
|
101
|
-
source_origin_dist,
|
102
|
-
origin_detector_dist,
|
103
|
-
pixel_size,
|
104
|
-
angles,
|
105
|
-
volume_shape,
|
106
|
-
rot_center,
|
107
|
-
relative_z_position,
|
108
|
-
slice_roi,
|
109
|
-
):
|
110
|
-
if angles is None:
|
111
|
-
self.angles = np.linspace(0, 2 * np.pi, self.n_angles, endpoint=True)
|
112
|
-
else:
|
113
|
-
self.angles = angles
|
114
|
-
if volume_shape is None:
|
115
|
-
volume_shape = (self.sinos_shape[0], self.sinos_shape[2], self.sinos_shape[2])
|
116
|
-
self.volume_shape = volume_shape
|
117
|
-
self.n_z, self.n_y, self.n_x = self.volume_shape
|
118
|
-
self.source_origin_dist = source_origin_dist
|
119
|
-
self.origin_detector_dist = origin_detector_dist
|
120
|
-
self.magnification = 1 + origin_detector_dist / source_origin_dist
|
121
|
-
self._set_slice_roi(slice_roi)
|
122
|
-
self.vol_geom = astra.create_vol_geom(self._vol_geom_n_y, self._vol_geom_n_x, self.n_z)
|
123
|
-
self.vol_shape = astra.geom_size(self.vol_geom)
|
124
|
-
self._cor_shift = 0.0
|
125
|
-
self.rot_center = rot_center
|
126
|
-
if rot_center is not None:
|
127
|
-
self._cor_shift = (self.sinos_shape[-1] - 1) / 2.0 - rot_center
|
128
|
-
self._set_pixel_size(pixel_size)
|
129
|
-
self._axis_corrections = self.extra_options.get("axis_correction", None)
|
130
|
-
self._create_astra_proj_geometry(relative_z_position)
|
131
|
-
|
132
|
-
def _create_astra_proj_geometry(self, relative_z_position):
|
133
|
-
# This object has to be re-created each time, because once the modifications below are done,
|
134
|
-
# it is no more a "cone" geometry but a "cone_vec" geometry, and cannot be updated subsequently
|
135
|
-
# (see astra/functions.py:271)
|
136
|
-
self.proj_geom = astra.create_proj_geom(
|
137
|
-
"cone",
|
138
|
-
self._det_spacing_x,
|
139
|
-
self._det_spacing_y,
|
140
|
-
self.n_sinos,
|
141
|
-
self.prj_width,
|
142
|
-
self.angles,
|
143
|
-
self.source_origin_dist,
|
144
|
-
self.origin_detector_dist,
|
145
|
-
)
|
146
|
-
self.relative_z_position = relative_z_position or 0.0
|
147
|
-
# This will turn the geometry of type "cone" into a geometry of type "cone_vec"
|
148
|
-
if self._orig_prog_geom is None:
|
149
|
-
self._orig_prog_geom = self.proj_geom
|
150
|
-
self.proj_geom = astra.geom_postalignment(self.proj_geom, (self._cor_shift, 0))
|
151
|
-
# (src, detector_center, u, v) = (srcX, srcY, srcZ, dX, dY, dZ, uX, uY, uZ, vX, vY, vZ)
|
152
|
-
vecs = self.proj_geom["Vectors"]
|
153
|
-
|
154
|
-
# To adapt the center of rotation:
|
155
|
-
# dX = cor_shift * cos(theta) - origin_detector_dist * sin(theta)
|
156
|
-
# dY = origin_detector_dist * cos(theta) + cor_shift * sin(theta)
|
157
|
-
if self._axis_corrections is not None:
|
158
|
-
# should we check that dX and dY match the above formulas ?
|
159
|
-
cor_shifts = self._cor_shift + self._axis_corrections
|
160
|
-
vecs[:, 3] = cor_shifts * np.cos(self.angles) - self.origin_detector_dist * np.sin(self.angles)
|
161
|
-
vecs[:, 4] = self.origin_detector_dist * np.cos(self.angles) + cor_shifts * np.sin(self.angles)
|
162
|
-
|
163
|
-
# To adapt the z position:
|
164
|
-
# Component 2 of vecs is the z coordinate of the source, component 5 is the z component of the detector position
|
165
|
-
# We need to re-create the same inclination of the cone beam, thus we need to keep the inclination of the two z positions.
|
166
|
-
# The detector is centered on the rotation axis, thus moving it up or down, just moves it out of the reconstruction volume.
|
167
|
-
# We can bring back the detector in the correct volume position, by applying a rigid translation of both the detector and the source.
|
168
|
-
# The translation is exactly the amount that brought the detector up or down, but in the opposite direction.
|
169
|
-
vecs[:, 2] = -self.relative_z_position
|
170
|
-
|
171
|
-
def _set_output(self, volume):
|
172
|
-
if volume is not None:
|
173
|
-
expected_shape = self.vol_shape # if not (self._crop_data) else self._output_cropped_shape
|
174
|
-
self.cuda.check_array(volume, expected_shape)
|
175
|
-
self.cuda.set_array("output", volume)
|
176
|
-
if volume is None:
|
177
|
-
self.cuda.allocate_array("output", self.vol_shape)
|
178
|
-
d_volume = self.cuda.get_array("output")
|
179
|
-
z, y, x = d_volume.shape
|
180
|
-
self._vol_link = astra.data3d.GPULink(d_volume.ptr, x, y, z, d_volume.strides[-2])
|
181
|
-
self._vol_id = astra.data3d.link("-vol", self.vol_geom, self._vol_link)
|
182
|
-
|
183
|
-
def _set_input(self, sinos):
|
184
|
-
self.cuda.check_array(sinos, self.sinos_shape)
|
185
|
-
self.cuda.set_array("sinos", sinos) # self.cuda.sinos is now a GPU array
|
186
|
-
# TODO don't create new link/proj_id if ptr is the same ?
|
187
|
-
# But it seems Astra modifies the input sinogram while doing FDK, so this might be not relevant
|
188
|
-
d_sinos = self.cuda.get_array("sinos")
|
189
|
-
|
190
|
-
# self._proj_data_link = astra.data3d.GPULink(d_sinos.ptr, self.prj_width, self.n_angles, self.n_z, sinos.strides[-2])
|
191
|
-
self._proj_data_link = astra.data3d.GPULink(
|
192
|
-
d_sinos.ptr, self.prj_width, self.n_angles, self.n_sinos, d_sinos.strides[-2]
|
193
|
-
)
|
194
|
-
self._proj_id = astra.data3d.link("-sino", self.proj_geom, self._proj_data_link)
|
195
|
-
|
196
|
-
def _preprocess_data(self):
|
197
|
-
d_sinos = self.cuda.sinos
|
198
|
-
for i in range(d_sinos.shape[0]):
|
199
|
-
self.sino_filter.filter_sino(d_sinos[i], output=d_sinos[i])
|
200
|
-
|
201
|
-
def _update_reconstruction(self):
|
202
|
-
cfg = astra.astra_dict("BP3D_CUDA")
|
203
|
-
cfg["ReconstructionDataId"] = self._vol_id
|
204
|
-
cfg["ProjectionDataId"] = self._proj_id
|
205
|
-
if self._alg_id is not None:
|
206
|
-
astra.algorithm.delete(self._alg_id)
|
207
|
-
self._alg_id = astra.algorithm.create(cfg)
|
208
|
-
|
209
|
-
def reconstruct(self, sinos, output=None, relative_z_position=None):
|
210
|
-
"""
|
211
|
-
sinos: numpy.ndarray or pycuda.gpuarray
|
212
|
-
Sinograms, with shape (n_sinograms, n_angles, width)
|
213
|
-
output: pycuda.gpuarray, optional
|
214
|
-
Output array. If not provided, a new numpy array is returned
|
215
|
-
relative_z_position: int, optional
|
216
|
-
Position of the central slice of the slab, with respect to the full stack of slices.
|
217
|
-
By default it is set to zero, meaning that the current slab is assumed in the middle of the stack
|
218
|
-
"""
|
219
|
-
self._create_astra_proj_geometry(relative_z_position)
|
220
|
-
self._set_input(sinos)
|
221
|
-
self._set_output(output)
|
222
|
-
self._preprocess_data()
|
223
|
-
self._update_reconstruction()
|
224
|
-
astra.algorithm.run(self._alg_id)
|
225
|
-
#
|
226
|
-
# NB: Could also be done with
|
227
|
-
# from astra.experimental import direct_BP3D
|
228
|
-
# projector_id = astra.create_projector("cuda3d", self.proj_geom, self.vol_geom, options=None)
|
229
|
-
# direct_BP3D(projector_id, self._vol_link, self._proj_data_link)
|
230
|
-
#
|
231
|
-
result = self.cuda.get_array("output")
|
232
|
-
if output is None:
|
233
|
-
result = result.get()
|
234
|
-
if self.extra_options.get("scale_factor", None) is not None:
|
235
|
-
result *= np.float32(self.extra_options["scale_factor"]) # in-place for pycuda
|
236
|
-
self.cuda.recover_arrays_references(["sinos", "output"])
|
237
|
-
return result
|
238
|
-
|
239
|
-
def __del__(self):
|
240
|
-
if getattr(self, "_alg_id", None) is not None:
|
241
|
-
astra.algorithm.delete(self._alg_id)
|
242
|
-
if getattr(self, "_vol_id", None) is not None:
|
243
|
-
astra.data3d.delete(self._vol_id)
|
244
|
-
if getattr(self, "_proj_id", None) is not None:
|
245
|
-
astra.data3d.delete(self._proj_id)
|
File without changes
|
File without changes
|
File without changes
|
File without changes
|