nabu 2024.2.13__py3-none-any.whl → 2025.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- doc/doc_config.py +32 -0
- nabu/__init__.py +1 -1
- nabu/app/bootstrap_stitching.py +4 -2
- nabu/app/cast_volume.py +16 -14
- nabu/app/cli_configs.py +102 -9
- nabu/app/compare_volumes.py +1 -1
- nabu/app/composite_cor.py +2 -4
- nabu/app/diag_to_pix.py +5 -6
- nabu/app/diag_to_rot.py +10 -11
- nabu/app/double_flatfield.py +18 -5
- nabu/app/estimate_motion.py +75 -0
- nabu/app/multicor.py +28 -15
- nabu/app/parse_reconstruction_log.py +1 -0
- nabu/app/pcaflats.py +122 -0
- nabu/app/prepare_weights_double.py +1 -2
- nabu/app/reconstruct.py +1 -7
- nabu/app/reconstruct_helical.py +5 -9
- nabu/app/reduce_dark_flat.py +5 -4
- nabu/app/rotate.py +3 -1
- nabu/app/stitching.py +7 -2
- nabu/app/tests/test_reduce_dark_flat.py +2 -2
- nabu/app/validator.py +1 -4
- nabu/cuda/convolution.py +1 -1
- nabu/cuda/fft.py +1 -1
- nabu/cuda/medfilt.py +1 -1
- nabu/cuda/padding.py +1 -1
- nabu/cuda/src/backproj.cu +6 -6
- nabu/cuda/src/cone.cu +4 -0
- nabu/cuda/src/hierarchical_backproj.cu +14 -0
- nabu/cuda/utils.py +2 -2
- nabu/estimation/alignment.py +17 -31
- nabu/estimation/cor.py +27 -33
- nabu/estimation/cor_sino.py +2 -8
- nabu/estimation/focus.py +4 -8
- nabu/estimation/motion.py +557 -0
- nabu/estimation/tests/test_alignment.py +2 -0
- nabu/estimation/tests/test_motion_estimation.py +471 -0
- nabu/estimation/tests/test_tilt.py +1 -1
- nabu/estimation/tilt.py +6 -5
- nabu/estimation/translation.py +47 -1
- nabu/io/cast_volume.py +108 -18
- nabu/io/detector_distortion.py +5 -6
- nabu/io/reader.py +45 -6
- nabu/io/reader_helical.py +5 -4
- nabu/io/tests/test_cast_volume.py +2 -2
- nabu/io/tests/test_readers.py +41 -38
- nabu/io/tests/test_remove_volume.py +152 -0
- nabu/io/tests/test_writers.py +2 -2
- nabu/io/utils.py +8 -4
- nabu/io/writer.py +1 -2
- nabu/misc/fftshift.py +1 -1
- nabu/misc/fourier_filters.py +1 -1
- nabu/misc/histogram.py +1 -1
- nabu/misc/histogram_cuda.py +1 -1
- nabu/misc/padding_base.py +1 -1
- nabu/misc/rotation.py +1 -1
- nabu/misc/rotation_cuda.py +1 -1
- nabu/misc/tests/test_binning.py +1 -1
- nabu/misc/transpose.py +1 -1
- nabu/misc/unsharp.py +1 -1
- nabu/misc/unsharp_cuda.py +1 -1
- nabu/misc/unsharp_opencl.py +1 -1
- nabu/misc/utils.py +1 -1
- nabu/opencl/fft.py +1 -1
- nabu/opencl/padding.py +1 -1
- nabu/opencl/src/backproj.cl +6 -6
- nabu/opencl/utils.py +8 -8
- nabu/pipeline/config.py +2 -2
- nabu/pipeline/config_validators.py +46 -46
- nabu/pipeline/datadump.py +3 -3
- nabu/pipeline/estimators.py +271 -11
- nabu/pipeline/fullfield/chunked.py +103 -67
- nabu/pipeline/fullfield/chunked_cuda.py +5 -2
- nabu/pipeline/fullfield/computations.py +4 -1
- nabu/pipeline/fullfield/dataset_validator.py +0 -1
- nabu/pipeline/fullfield/get_double_flatfield.py +147 -0
- nabu/pipeline/fullfield/nabu_config.py +36 -17
- nabu/pipeline/fullfield/processconfig.py +41 -7
- nabu/pipeline/fullfield/reconstruction.py +14 -10
- nabu/pipeline/helical/dataset_validator.py +3 -4
- nabu/pipeline/helical/fbp.py +4 -4
- nabu/pipeline/helical/filtering.py +5 -4
- nabu/pipeline/helical/gridded_accumulator.py +10 -11
- nabu/pipeline/helical/helical_chunked_regridded.py +1 -0
- nabu/pipeline/helical/helical_reconstruction.py +12 -9
- nabu/pipeline/helical/helical_utils.py +1 -2
- nabu/pipeline/helical/nabu_config.py +2 -1
- nabu/pipeline/helical/span_strategy.py +1 -0
- nabu/pipeline/helical/weight_balancer.py +2 -3
- nabu/pipeline/params.py +20 -3
- nabu/pipeline/tests/__init__.py +0 -0
- nabu/pipeline/tests/test_estimators.py +240 -3
- nabu/pipeline/utils.py +1 -1
- nabu/pipeline/writer.py +1 -1
- nabu/preproc/alignment.py +0 -10
- nabu/preproc/ccd.py +53 -3
- nabu/preproc/ctf.py +8 -8
- nabu/preproc/ctf_cuda.py +1 -1
- nabu/preproc/double_flatfield_cuda.py +2 -2
- nabu/preproc/double_flatfield_variable_region.py +0 -1
- nabu/preproc/flatfield.py +307 -2
- nabu/preproc/flatfield_cuda.py +1 -2
- nabu/preproc/flatfield_variable_region.py +3 -3
- nabu/preproc/phase.py +2 -4
- nabu/preproc/phase_cuda.py +2 -2
- nabu/preproc/shift.py +4 -2
- nabu/preproc/shift_cuda.py +0 -1
- nabu/preproc/tests/test_ctf.py +4 -4
- nabu/preproc/tests/test_double_flatfield.py +1 -1
- nabu/preproc/tests/test_flatfield.py +1 -1
- nabu/preproc/tests/test_paganin.py +1 -3
- nabu/preproc/tests/test_pcaflats.py +154 -0
- nabu/preproc/tests/test_vshift.py +4 -1
- nabu/processing/azim.py +9 -5
- nabu/processing/convolution_cuda.py +6 -4
- nabu/processing/fft_base.py +7 -3
- nabu/processing/fft_cuda.py +25 -164
- nabu/processing/fft_opencl.py +28 -6
- nabu/processing/fftshift.py +1 -1
- nabu/processing/histogram.py +1 -1
- nabu/processing/muladd.py +0 -1
- nabu/processing/padding_base.py +1 -1
- nabu/processing/padding_cuda.py +0 -2
- nabu/processing/processing_base.py +12 -6
- nabu/processing/rotation_cuda.py +3 -1
- nabu/processing/tests/test_fft.py +2 -64
- nabu/processing/tests/test_fftshift.py +1 -1
- nabu/processing/tests/test_medfilt.py +1 -3
- nabu/processing/tests/test_padding.py +1 -1
- nabu/processing/tests/test_roll.py +1 -1
- nabu/processing/tests/test_rotation.py +4 -2
- nabu/processing/unsharp_opencl.py +1 -1
- nabu/reconstruction/astra.py +245 -0
- nabu/reconstruction/cone.py +39 -9
- nabu/reconstruction/fbp.py +14 -0
- nabu/reconstruction/fbp_base.py +40 -8
- nabu/reconstruction/fbp_opencl.py +8 -0
- nabu/reconstruction/filtering.py +59 -25
- nabu/reconstruction/filtering_cuda.py +22 -21
- nabu/reconstruction/filtering_opencl.py +10 -14
- nabu/reconstruction/hbp.py +26 -13
- nabu/reconstruction/mlem.py +55 -16
- nabu/reconstruction/projection.py +3 -5
- nabu/reconstruction/sinogram.py +1 -1
- nabu/reconstruction/sinogram_cuda.py +0 -1
- nabu/reconstruction/tests/test_cone.py +37 -2
- nabu/reconstruction/tests/test_deringer.py +4 -4
- nabu/reconstruction/tests/test_fbp.py +36 -15
- nabu/reconstruction/tests/test_filtering.py +27 -7
- nabu/reconstruction/tests/test_halftomo.py +28 -2
- nabu/reconstruction/tests/test_mlem.py +94 -64
- nabu/reconstruction/tests/test_projector.py +7 -2
- nabu/reconstruction/tests/test_reconstructor.py +1 -1
- nabu/reconstruction/tests/test_sino_normalization.py +0 -1
- nabu/resources/dataset_analyzer.py +210 -24
- nabu/resources/gpu.py +4 -4
- nabu/resources/logger.py +4 -4
- nabu/resources/nxflatfield.py +103 -37
- nabu/resources/tests/test_dataset_analyzer.py +37 -0
- nabu/resources/tests/test_extract.py +11 -0
- nabu/resources/tests/test_nxflatfield.py +5 -5
- nabu/resources/utils.py +16 -10
- nabu/stitching/alignment.py +8 -11
- nabu/stitching/config.py +44 -35
- nabu/stitching/definitions.py +2 -2
- nabu/stitching/frame_composition.py +8 -10
- nabu/stitching/overlap.py +4 -4
- nabu/stitching/sample_normalization.py +5 -5
- nabu/stitching/slurm_utils.py +2 -2
- nabu/stitching/stitcher/base.py +2 -0
- nabu/stitching/stitcher/dumper/base.py +0 -1
- nabu/stitching/stitcher/dumper/postprocessing.py +1 -1
- nabu/stitching/stitcher/post_processing.py +11 -9
- nabu/stitching/stitcher/pre_processing.py +37 -31
- nabu/stitching/stitcher/single_axis.py +2 -3
- nabu/stitching/stitcher_2D.py +2 -1
- nabu/stitching/tests/test_config.py +10 -11
- nabu/stitching/tests/test_sample_normalization.py +1 -1
- nabu/stitching/tests/test_slurm_utils.py +1 -2
- nabu/stitching/tests/test_y_preprocessing_stitching.py +11 -8
- nabu/stitching/tests/test_z_postprocessing_stitching.py +3 -3
- nabu/stitching/tests/test_z_preprocessing_stitching.py +27 -24
- nabu/stitching/utils/tests/__init__.py +0 -0
- nabu/stitching/utils/tests/test_post-processing.py +1 -0
- nabu/stitching/utils/utils.py +16 -18
- nabu/tests.py +0 -3
- nabu/testutils.py +62 -9
- nabu/utils.py +50 -20
- {nabu-2024.2.13.dist-info → nabu-2025.1.0.dist-info}/METADATA +7 -7
- nabu-2025.1.0.dist-info/RECORD +328 -0
- {nabu-2024.2.13.dist-info → nabu-2025.1.0.dist-info}/WHEEL +1 -1
- {nabu-2024.2.13.dist-info → nabu-2025.1.0.dist-info}/entry_points.txt +2 -1
- nabu/app/correct_rot.py +0 -70
- nabu/io/tests/test_detector_distortion.py +0 -178
- nabu-2024.2.13.dist-info/RECORD +0 -317
- /nabu/{stitching → app}/tests/__init__.py +0 -0
- {nabu-2024.2.13.dist-info → nabu-2025.1.0.dist-info}/licenses/LICENSE +0 -0
- {nabu-2024.2.13.dist-info → nabu-2025.1.0.dist-info}/top_level.txt +0 -0
nabu/testutils.py
CHANGED
|
@@ -1,3 +1,4 @@
|
|
|
1
|
+
from dataclasses import dataclass
|
|
1
2
|
from itertools import product
|
|
2
3
|
import tarfile
|
|
3
4
|
import os
|
|
@@ -5,6 +6,7 @@ import numpy as np
|
|
|
5
6
|
from scipy.signal.windows import gaussian
|
|
6
7
|
from silx.resources import ExternalResources
|
|
7
8
|
from silx.io.dictdump import nxtodict, dicttonx
|
|
9
|
+
from nxtomo.application.nxtomo import ImageKey
|
|
8
10
|
|
|
9
11
|
utilstest = ExternalResources(
|
|
10
12
|
project="nabu", url_base="http://www.silx.org/pub/nabu/data/", env_key="NABU_DATA", timeout=60
|
|
@@ -14,14 +16,14 @@ __big_testdata_dir__ = os.environ.get("NABU_BIGDATA_DIR")
|
|
|
14
16
|
if __big_testdata_dir__ is None or not (os.path.isdir(__big_testdata_dir__)):
|
|
15
17
|
__big_testdata_dir__ = None
|
|
16
18
|
|
|
17
|
-
__do_long_tests__ = os.environ.get("NABU_LONG_TESTS", False)
|
|
19
|
+
__do_long_tests__ = os.environ.get("NABU_LONG_TESTS", False) # noqa: PLW1508
|
|
18
20
|
if __do_long_tests__:
|
|
19
21
|
try:
|
|
20
22
|
__do_long_tests__ = bool(int(__do_long_tests__))
|
|
21
23
|
except:
|
|
22
24
|
__do_long_tests__ = False
|
|
23
25
|
|
|
24
|
-
__do_large_mem_tests__ = os.environ.get("NABU_LARGE_MEM_TESTS", False)
|
|
26
|
+
__do_large_mem_tests__ = os.environ.get("NABU_LARGE_MEM_TESTS", False) # noqa: PLW1508
|
|
25
27
|
if __do_large_mem_tests__:
|
|
26
28
|
try:
|
|
27
29
|
__do_large_mem_tests__ = bool(int(__do_large_mem_tests__))
|
|
@@ -56,6 +58,38 @@ def get_data(*dataset_path):
|
|
|
56
58
|
return np.load(dataset_downloaded_path)
|
|
57
59
|
|
|
58
60
|
|
|
61
|
+
@dataclass
|
|
62
|
+
class SimpleNXTomoDescription:
|
|
63
|
+
n_darks: int = 0
|
|
64
|
+
n_flats1: int = 0
|
|
65
|
+
n_projs: int = 0
|
|
66
|
+
n_flats2: int = 0
|
|
67
|
+
n_align: int = 0
|
|
68
|
+
frame_shape: tuple = None
|
|
69
|
+
dtype: np.dtype = np.uint16
|
|
70
|
+
|
|
71
|
+
|
|
72
|
+
def get_dummy_nxtomo_info():
|
|
73
|
+
nx_fname = utilstest.getfile("dummy_nxtomo.nx")
|
|
74
|
+
data_desc = SimpleNXTomoDescription(
|
|
75
|
+
n_darks=10, n_flats1=11, n_projs=100, n_flats2=11, n_align=12, frame_shape=(11, 10), dtype=np.uint16
|
|
76
|
+
)
|
|
77
|
+
image_key = np.concatenate(
|
|
78
|
+
[
|
|
79
|
+
np.zeros(data_desc.n_darks, dtype=np.int32) + ImageKey.DARK_FIELD.value,
|
|
80
|
+
np.zeros(data_desc.n_flats1, dtype=np.int32) + ImageKey.FLAT_FIELD.value,
|
|
81
|
+
np.zeros(data_desc.n_projs, dtype=np.int32) + ImageKey.PROJECTION.value,
|
|
82
|
+
np.zeros(data_desc.n_flats2, dtype=np.int32) + ImageKey.FLAT_FIELD.value,
|
|
83
|
+
np.zeros(data_desc.n_align, dtype=np.int32) + ImageKey.ALIGNMENT.value,
|
|
84
|
+
]
|
|
85
|
+
)
|
|
86
|
+
projs_vals = np.arange(data_desc.n_projs) + data_desc.n_flats1 + data_desc.n_darks
|
|
87
|
+
darks_vals = np.arange(data_desc.n_darks)
|
|
88
|
+
flats1_vals = np.arange(data_desc.n_darks, data_desc.n_darks + data_desc.n_flats1)
|
|
89
|
+
flats2_vals = np.arange(data_desc.n_darks, data_desc.n_darks + data_desc.n_flats2)
|
|
90
|
+
return nx_fname, data_desc, image_key, projs_vals, darks_vals, flats1_vals, flats2_vals
|
|
91
|
+
|
|
92
|
+
|
|
59
93
|
def get_array_of_given_shape(img, shape, dtype):
|
|
60
94
|
"""
|
|
61
95
|
From a given image, returns an array of the wanted shape and dtype.
|
|
@@ -81,12 +115,34 @@ def get_big_data(filename):
|
|
|
81
115
|
|
|
82
116
|
|
|
83
117
|
def uncompress_file(compressed_file_path, target_directory):
|
|
84
|
-
|
|
85
|
-
f
|
|
118
|
+
if not tarfile.is_tarfile(compressed_file_path):
|
|
119
|
+
raise ValueError(f"Invalid tar file: {compressed_file_path}")
|
|
120
|
+
|
|
121
|
+
def is_safe_member(member, target_directory):
|
|
122
|
+
"""Ensure the member does not extract outside the target directory."""
|
|
123
|
+
if not isinstance(member, tarfile.TarInfo):
|
|
124
|
+
return False # Reject any unexpected type
|
|
125
|
+
|
|
126
|
+
abs_target = os.path.abspath(target_directory)
|
|
127
|
+
member_path = os.path.abspath(os.path.join(target_directory, member.name))
|
|
128
|
+
return member_path.startswith(abs_target)
|
|
129
|
+
|
|
130
|
+
def get_valid_members(tar):
|
|
131
|
+
members = [m for m in tar.getmembers() if is_safe_member(m, target_directory)]
|
|
132
|
+
if not members:
|
|
133
|
+
raise ValueError("No valid files to extract or archive contains unsafe paths.")
|
|
134
|
+
for member in members:
|
|
135
|
+
if not is_safe_member(member, target_directory):
|
|
136
|
+
raise ValueError(f"Unsafe path detected: {member.name}")
|
|
137
|
+
|
|
138
|
+
with tarfile.open(compressed_file_path, "r") as tar:
|
|
139
|
+
tar.extractall( # noqa: S202 - what can be done in addition of the above checks ?
|
|
140
|
+
path=target_directory, members=get_valid_members(tar)
|
|
141
|
+
)
|
|
86
142
|
|
|
87
143
|
|
|
88
144
|
def get_file(fname):
|
|
89
|
-
downloaded_file =
|
|
145
|
+
downloaded_file = utilstest.getfile(fname)
|
|
90
146
|
if ".tar" in fname:
|
|
91
147
|
uncompress_file(downloaded_file, os.path.dirname(downloaded_file))
|
|
92
148
|
downloaded_file = downloaded_file.split(".tar")[0]
|
|
@@ -204,10 +260,7 @@ def generate_nx_dataset(out_fname, image_key, data_volume=None, rotation_angle=N
|
|
|
204
260
|
nx_entry = nx_dict["entry"]
|
|
205
261
|
|
|
206
262
|
def _get_field(dict_, path):
|
|
207
|
-
|
|
208
|
-
path = path[1:]
|
|
209
|
-
if path.endswith("/"):
|
|
210
|
-
path = path[:-1]
|
|
263
|
+
path = path.strip("/")
|
|
211
264
|
split_path = path.split("/")
|
|
212
265
|
if len(split_path) == 1:
|
|
213
266
|
return dict_[split_path[0]]
|
nabu/utils.py
CHANGED
|
@@ -1,7 +1,8 @@
|
|
|
1
|
+
from bisect import bisect_left
|
|
1
2
|
from fnmatch import fnmatch
|
|
2
3
|
from functools import partial
|
|
3
4
|
import os
|
|
4
|
-
from functools import
|
|
5
|
+
from functools import lru_cache
|
|
5
6
|
from itertools import product
|
|
6
7
|
import warnings
|
|
7
8
|
from time import time
|
|
@@ -94,7 +95,7 @@ def indices_to_slices(indices):
|
|
|
94
95
|
jumps = np.hstack([-1, jumps, len(indices) - 1])
|
|
95
96
|
slices = []
|
|
96
97
|
for i in range(len(jumps) - 1):
|
|
97
|
-
slices.append(slice(indices[jumps[i] + 1], indices[jumps[i + 1]] + 1))
|
|
98
|
+
slices.append(slice(indices[jumps[i] + 1], indices[jumps[i + 1]] + 1)) # noqa: PERF401
|
|
98
99
|
return slices
|
|
99
100
|
|
|
100
101
|
|
|
@@ -180,10 +181,12 @@ def list_match_queries(available, queries):
|
|
|
180
181
|
Given a list of strings, return all items matching any of one elements of "queries"
|
|
181
182
|
"""
|
|
182
183
|
matches = []
|
|
184
|
+
if isinstance(queries, str):
|
|
185
|
+
queries = [queries]
|
|
183
186
|
for a in available:
|
|
184
187
|
for q in queries:
|
|
185
188
|
if fnmatch(a, q):
|
|
186
|
-
matches.append(a)
|
|
189
|
+
matches.append(a) # noqa: PERF401
|
|
187
190
|
return matches
|
|
188
191
|
|
|
189
192
|
|
|
@@ -211,7 +214,7 @@ def _sizeof(Type):
|
|
|
211
214
|
return np.dtype(Type).itemsize
|
|
212
215
|
|
|
213
216
|
|
|
214
|
-
class
|
|
217
|
+
class _DefaultFormat(dict):
|
|
215
218
|
"""
|
|
216
219
|
https://docs.python.org/3/library/stdtypes.html
|
|
217
220
|
"""
|
|
@@ -224,7 +227,7 @@ def safe_format(str_, **kwargs):
|
|
|
224
227
|
"""
|
|
225
228
|
Alternative to str.format(), but does not throw a KeyError when fields are missing.
|
|
226
229
|
"""
|
|
227
|
-
return str_.format_map(
|
|
230
|
+
return str_.format_map(_DefaultFormat(**kwargs))
|
|
228
231
|
|
|
229
232
|
|
|
230
233
|
def get_ftype(url):
|
|
@@ -296,7 +299,7 @@ def view_as_images_stack(img):
|
|
|
296
299
|
|
|
297
300
|
|
|
298
301
|
def rescale_integers(items, new_tot):
|
|
299
|
-
"""
|
|
302
|
+
"""
|
|
300
303
|
From a given sequence of integers, create a new sequence
|
|
301
304
|
where the sum of all items must be equal to "new_tot".
|
|
302
305
|
The relative contribution of each item to the new total is approximately kept.
|
|
@@ -329,6 +332,8 @@ def merged_shape(shapes, axis=0):
|
|
|
329
332
|
return (shapes[0][0], n_img, shapes[0][2])
|
|
330
333
|
elif axis == 2:
|
|
331
334
|
return shapes[0][:2] + (n_img,)
|
|
335
|
+
else:
|
|
336
|
+
raise ValueError
|
|
332
337
|
|
|
333
338
|
|
|
334
339
|
def is_device_backend(backend):
|
|
@@ -371,7 +376,7 @@ def generate_powers():
|
|
|
371
376
|
powers = product(*valuations)
|
|
372
377
|
res = []
|
|
373
378
|
for pw in powers:
|
|
374
|
-
res.append(np.prod(list(map(lambda x: x[0] ** x[1], zip(primes, pw)))))
|
|
379
|
+
res.append(np.prod(list(map(lambda x: x[0] ** x[1], zip(primes, pw))))) # noqa: PERF401
|
|
375
380
|
return np.unique(res)
|
|
376
381
|
|
|
377
382
|
|
|
@@ -422,10 +427,14 @@ def partition_dict(dict_, n_partitions):
|
|
|
422
427
|
|
|
423
428
|
|
|
424
429
|
def first_dict_item(dict_):
|
|
425
|
-
keys = sorted(
|
|
430
|
+
keys = sorted(dict_.keys())
|
|
426
431
|
return dict_[keys[0]]
|
|
427
432
|
|
|
428
433
|
|
|
434
|
+
def first_generator_item(gen):
|
|
435
|
+
return next(iter(gen)) # instead of list(gen)[0]
|
|
436
|
+
|
|
437
|
+
|
|
429
438
|
def subsample_dict(dic, subsampling_factor):
|
|
430
439
|
"""
|
|
431
440
|
Subsample a dict where keys are integers.
|
|
@@ -437,7 +446,7 @@ def subsample_dict(dic, subsampling_factor):
|
|
|
437
446
|
return res
|
|
438
447
|
|
|
439
448
|
|
|
440
|
-
def compare_dicts(dic1, dic2):
|
|
449
|
+
def compare_dicts(dic1, dic2): # noqa: PLR0911
|
|
441
450
|
"""
|
|
442
451
|
Compare two dictionaries. Return None if and only iff the dictionaries are the same.
|
|
443
452
|
|
|
@@ -530,6 +539,18 @@ def restore_items_in_list(list_, removed_items):
|
|
|
530
539
|
list_.insert(idx, val)
|
|
531
540
|
|
|
532
541
|
|
|
542
|
+
def search_sorted(arr, val):
|
|
543
|
+
"""
|
|
544
|
+
Binary search that returns the "nearest" index given a query,
|
|
545
|
+
i.e find "i" that minimizes abs(arr[i] - val)
|
|
546
|
+
It does not return the "insersion point" contrarily to numpy.searchsorted() or bisect_left
|
|
547
|
+
"""
|
|
548
|
+
pos = bisect_left(arr, val)
|
|
549
|
+
if pos == len(arr):
|
|
550
|
+
return len(arr) - 1
|
|
551
|
+
return pos - 1 if abs(val - arr[pos - 1]) < abs(arr[pos] - val) else pos
|
|
552
|
+
|
|
553
|
+
|
|
533
554
|
def check_supported(param_value, available, param_desc):
|
|
534
555
|
if param_value not in available:
|
|
535
556
|
raise ValueError("Unsupported %s '%s'. Available are: %s" % (param_desc, param_value, str(available)))
|
|
@@ -629,7 +650,7 @@ def get_num_threads(n=None):
|
|
|
629
650
|
return min(n_avail, n)
|
|
630
651
|
|
|
631
652
|
|
|
632
|
-
class DictToObj
|
|
653
|
+
class DictToObj:
|
|
633
654
|
"""utility class to transform a dictionary into an object with dictionary items as members.
|
|
634
655
|
Example:
|
|
635
656
|
|
|
@@ -646,14 +667,17 @@ def remove_parenthesis_or_brackets(input_str):
|
|
|
646
667
|
"""
|
|
647
668
|
clear string from left and or roght parenthesis / braquets
|
|
648
669
|
"""
|
|
649
|
-
if input_str.startswith("(") and input_str.endswith(")") or
|
|
670
|
+
if (input_str.startswith("(") and input_str.endswith(")")) or (
|
|
671
|
+
input_str.startswith("[") and input_str.endswith("]")
|
|
672
|
+
):
|
|
650
673
|
input_str = input_str[1:-1]
|
|
651
674
|
return input_str
|
|
652
675
|
|
|
653
676
|
|
|
654
677
|
def filter_str_def(elmt):
|
|
655
678
|
"""clean elemt if is a string defined from a text file.
|
|
656
|
-
Remove some character that could have be put on left or right and some empty spaces
|
|
679
|
+
Remove some character that could have be put on left or right and some empty spaces
|
|
680
|
+
"""
|
|
657
681
|
if elmt is None:
|
|
658
682
|
return None
|
|
659
683
|
assert isinstance(elmt, str)
|
|
@@ -676,7 +700,7 @@ def convert_str_to_tuple(input_str: str, none_if_empty: bool = False):
|
|
|
676
700
|
if isinstance(input_str, tuple):
|
|
677
701
|
return input_str
|
|
678
702
|
if not isinstance(input_str, str):
|
|
679
|
-
raise TypeError("input_str should be a string not {
|
|
703
|
+
raise TypeError(f"input_str should be a string not {type(input_str)}, {input_str}")
|
|
680
704
|
input_str = input_str.lstrip(" ").lstrip("(").lstrip("[").lstrip(" ").rstrip(" ")
|
|
681
705
|
input_str = remove_parenthesis_or_brackets(input_str)
|
|
682
706
|
input_str = input_str.replace("\n", ",")
|
|
@@ -702,9 +726,9 @@ def concatenate_dict(dict_1, dict_2) -> dict:
|
|
|
702
726
|
return res
|
|
703
727
|
|
|
704
728
|
|
|
705
|
-
class BaseClassError:
|
|
729
|
+
class BaseClassError(BaseException):
|
|
706
730
|
def __init__(self, *args, **kwargs):
|
|
707
|
-
raise
|
|
731
|
+
raise NotImplementedError("Base class")
|
|
708
732
|
|
|
709
733
|
|
|
710
734
|
def MissingComponentError(msg):
|
|
@@ -767,7 +791,7 @@ def median2(img):
|
|
|
767
791
|
Roughly same speed as scipy median filter, but more memory demanding.
|
|
768
792
|
"""
|
|
769
793
|
img2 = extend_image_onepixel(img)
|
|
770
|
-
|
|
794
|
+
img3 = np.array(
|
|
771
795
|
[
|
|
772
796
|
img2[0:-2, 0:-2],
|
|
773
797
|
img2[0:-2, 1:-1],
|
|
@@ -780,13 +804,18 @@ def median2(img):
|
|
|
780
804
|
img2[2:, 2:],
|
|
781
805
|
]
|
|
782
806
|
)
|
|
783
|
-
return np.median(
|
|
807
|
+
return np.median(img3, axis=0)
|
|
784
808
|
|
|
785
809
|
|
|
786
810
|
# ------------------------------------------------------------------------------
|
|
787
811
|
# ---------------------------- Decorators --------------------------------------
|
|
788
812
|
# ------------------------------------------------------------------------------
|
|
789
813
|
|
|
814
|
+
|
|
815
|
+
def no_decorator(func):
|
|
816
|
+
return func
|
|
817
|
+
|
|
818
|
+
|
|
790
819
|
_warnings = {}
|
|
791
820
|
|
|
792
821
|
|
|
@@ -820,6 +849,7 @@ def warning(msg):
|
|
|
820
849
|
print(msg)
|
|
821
850
|
res = func(*args, **kwargs)
|
|
822
851
|
return res
|
|
852
|
+
return None
|
|
823
853
|
|
|
824
854
|
return wrapper
|
|
825
855
|
|
|
@@ -840,7 +870,7 @@ def deprecated(msg, do_print=False):
|
|
|
840
870
|
|
|
841
871
|
def deprecated_class(msg, do_print=False):
|
|
842
872
|
def decorator(cls):
|
|
843
|
-
class
|
|
873
|
+
class Wrapper:
|
|
844
874
|
def __init__(self, *args, **kwargs):
|
|
845
875
|
deprecation_warning(msg, do_print=do_print, func_name=cls.__name__)
|
|
846
876
|
self.wrapped = cls(*args, **kwargs)
|
|
@@ -849,7 +879,7 @@ def deprecated_class(msg, do_print=False):
|
|
|
849
879
|
def __getattr__(self, name):
|
|
850
880
|
return getattr(self.wrapped, name)
|
|
851
881
|
|
|
852
|
-
return
|
|
882
|
+
return Wrapper
|
|
853
883
|
|
|
854
884
|
return decorator
|
|
855
885
|
|
|
@@ -905,7 +935,7 @@ from warnings import catch_warnings
|
|
|
905
935
|
# catch_warnings() does not have "action=XX" kwarg for python < 3.11
|
|
906
936
|
from sys import version_info
|
|
907
937
|
|
|
908
|
-
if version_info.major == 3 and version_info.minor < 11:
|
|
938
|
+
if version_info.major == 3 and version_info.minor < 11: # noqa: YTT204 - not sure about this
|
|
909
939
|
|
|
910
940
|
def dummy(*args, **kwargs):
|
|
911
941
|
pass
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.4
|
|
2
2
|
Name: nabu
|
|
3
|
-
Version:
|
|
3
|
+
Version: 2025.1.0
|
|
4
4
|
Summary: Nabu - Tomography software
|
|
5
5
|
Author-email: Pierre Paleo <pierre.paleo@esrf.fr>, Henri Payno <henri.payno@esrf.fr>, Alessandro Mirone <mirone@esrf.fr>, Jérôme Lesaint <jerome.lesaint@esrf.fr>
|
|
6
6
|
Maintainer-email: Pierre Paleo <pierre.paleo@esrf.fr>
|
|
@@ -14,24 +14,25 @@ Classifier: Development Status :: 5 - Production/Stable
|
|
|
14
14
|
Classifier: Intended Audience :: Developers
|
|
15
15
|
Classifier: Intended Audience :: Science/Research
|
|
16
16
|
Classifier: Programming Language :: Python :: 3
|
|
17
|
-
Classifier: Programming Language :: Python :: 3.7
|
|
18
|
-
Classifier: Programming Language :: Python :: 3.8
|
|
19
17
|
Classifier: Programming Language :: Python :: 3.9
|
|
20
18
|
Classifier: Programming Language :: Python :: 3.10
|
|
19
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
20
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
21
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
21
22
|
Classifier: Environment :: Console
|
|
22
23
|
Classifier: Operating System :: Unix
|
|
23
24
|
Classifier: Operating System :: MacOS :: MacOS X
|
|
24
25
|
Classifier: Operating System :: POSIX
|
|
25
26
|
Classifier: Topic :: Scientific/Engineering :: Physics
|
|
26
27
|
Classifier: Topic :: Scientific/Engineering :: Medical Science Apps.
|
|
27
|
-
Requires-Python: >=3.
|
|
28
|
+
Requires-Python: >=3.9
|
|
28
29
|
Description-Content-Type: text/markdown
|
|
29
30
|
License-File: LICENSE
|
|
30
|
-
Requires-Dist: numpy
|
|
31
|
+
Requires-Dist: numpy>1.9.0
|
|
31
32
|
Requires-Dist: scipy
|
|
32
33
|
Requires-Dist: h5py>=3.0
|
|
33
34
|
Requires-Dist: silx>=0.15.0
|
|
34
|
-
Requires-Dist: tomoscan>=2.
|
|
35
|
+
Requires-Dist: tomoscan>=2.2.2
|
|
35
36
|
Requires-Dist: psutil
|
|
36
37
|
Requires-Dist: pytest
|
|
37
38
|
Requires-Dist: tifffile
|
|
@@ -41,7 +42,6 @@ Requires-Dist: scikit-image; extra == "full"
|
|
|
41
42
|
Requires-Dist: PyWavelets; extra == "full"
|
|
42
43
|
Requires-Dist: glymur; extra == "full"
|
|
43
44
|
Requires-Dist: pycuda!=2024.1.1; extra == "full"
|
|
44
|
-
Requires-Dist: scikit-cuda; extra == "full"
|
|
45
45
|
Requires-Dist: pycudwt; extra == "full"
|
|
46
46
|
Requires-Dist: sluurp>=0.3; extra == "full"
|
|
47
47
|
Requires-Dist: pyvkfft; extra == "full"
|