nabu 2024.1.10__py3-none-any.whl → 2024.2.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nabu/__init__.py +1 -1
- nabu/app/bootstrap.py +2 -3
- nabu/app/cast_volume.py +4 -2
- nabu/app/cli_configs.py +5 -0
- nabu/app/composite_cor.py +1 -1
- nabu/app/create_distortion_map_from_poly.py +5 -6
- nabu/app/diag_to_pix.py +7 -19
- nabu/app/diag_to_rot.py +14 -29
- nabu/app/double_flatfield.py +32 -44
- nabu/app/parse_reconstruction_log.py +3 -0
- nabu/app/reconstruct.py +53 -15
- nabu/app/reconstruct_helical.py +2 -2
- nabu/app/stitching.py +27 -13
- nabu/app/tests/__init__.py +0 -0
- nabu/app/tests/test_reduce_dark_flat.py +4 -1
- nabu/cuda/kernel.py +11 -2
- nabu/cuda/processing.py +2 -2
- nabu/cuda/src/cone.cu +77 -0
- nabu/cuda/src/hierarchical_backproj.cu +271 -0
- nabu/cuda/utils.py +0 -6
- nabu/estimation/alignment.py +5 -19
- nabu/estimation/cor.py +173 -599
- nabu/estimation/cor_sino.py +356 -26
- nabu/estimation/focus.py +63 -11
- nabu/estimation/tests/test_cor.py +124 -58
- nabu/estimation/tests/test_focus.py +6 -6
- nabu/estimation/tilt.py +2 -1
- nabu/estimation/utils.py +5 -33
- nabu/io/__init__.py +1 -1
- nabu/io/cast_volume.py +1 -1
- nabu/io/reader.py +416 -21
- nabu/io/tests/test_readers.py +422 -0
- nabu/io/tests/test_writers.py +1 -102
- nabu/io/writer.py +4 -433
- nabu/opencl/kernel.py +14 -3
- nabu/opencl/processing.py +8 -0
- nabu/pipeline/config_validators.py +5 -2
- nabu/pipeline/datadump.py +12 -5
- nabu/pipeline/estimators.py +162 -188
- nabu/pipeline/fullfield/chunked.py +168 -92
- nabu/pipeline/fullfield/chunked_cuda.py +7 -3
- nabu/pipeline/fullfield/computations.py +2 -7
- nabu/pipeline/fullfield/dataset_validator.py +0 -4
- nabu/pipeline/fullfield/nabu_config.py +37 -13
- nabu/pipeline/fullfield/processconfig.py +22 -13
- nabu/pipeline/fullfield/reconstruction.py +13 -9
- nabu/pipeline/helical/helical_chunked_regridded.py +1 -1
- nabu/pipeline/helical/helical_chunked_regridded_cuda.py +1 -0
- nabu/pipeline/helical/helical_reconstruction.py +1 -1
- nabu/pipeline/params.py +21 -1
- nabu/pipeline/processconfig.py +1 -12
- nabu/pipeline/reader.py +146 -0
- nabu/pipeline/tests/test_estimators.py +44 -72
- nabu/pipeline/utils.py +4 -2
- nabu/pipeline/writer.py +10 -2
- nabu/preproc/ccd_cuda.py +1 -1
- nabu/preproc/ctf.py +14 -7
- nabu/preproc/ctf_cuda.py +2 -3
- nabu/preproc/double_flatfield.py +5 -12
- nabu/preproc/double_flatfield_cuda.py +2 -2
- nabu/preproc/flatfield.py +5 -1
- nabu/preproc/flatfield_cuda.py +5 -1
- nabu/preproc/phase.py +24 -73
- nabu/preproc/phase_cuda.py +5 -8
- nabu/preproc/tests/test_ctf.py +11 -7
- nabu/preproc/tests/test_flatfield.py +67 -122
- nabu/preproc/tests/test_paganin.py +54 -30
- nabu/processing/azim.py +206 -0
- nabu/processing/convolution_cuda.py +1 -1
- nabu/processing/fft_cuda.py +15 -17
- nabu/processing/histogram.py +2 -0
- nabu/processing/histogram_cuda.py +2 -1
- nabu/processing/kernel_base.py +3 -0
- nabu/processing/muladd_cuda.py +1 -0
- nabu/processing/padding_opencl.py +1 -1
- nabu/processing/roll_opencl.py +1 -0
- nabu/processing/rotation_cuda.py +2 -2
- nabu/processing/tests/test_fft.py +17 -10
- nabu/processing/unsharp_cuda.py +1 -1
- nabu/reconstruction/cone.py +104 -40
- nabu/reconstruction/fbp.py +3 -0
- nabu/reconstruction/fbp_base.py +7 -2
- nabu/reconstruction/filtering.py +20 -7
- nabu/reconstruction/filtering_cuda.py +7 -1
- nabu/reconstruction/hbp.py +424 -0
- nabu/reconstruction/mlem.py +99 -0
- nabu/reconstruction/reconstructor.py +2 -0
- nabu/reconstruction/rings_cuda.py +19 -19
- nabu/reconstruction/sinogram_cuda.py +1 -0
- nabu/reconstruction/sinogram_opencl.py +3 -1
- nabu/reconstruction/tests/test_cone.py +10 -5
- nabu/reconstruction/tests/test_deringer.py +7 -6
- nabu/reconstruction/tests/test_fbp.py +124 -10
- nabu/reconstruction/tests/test_filtering.py +13 -11
- nabu/reconstruction/tests/test_halftomo.py +30 -4
- nabu/reconstruction/tests/test_mlem.py +91 -0
- nabu/reconstruction/tests/test_reconstructor.py +8 -3
- nabu/resources/dataset_analyzer.py +142 -92
- nabu/resources/gpu.py +1 -0
- nabu/resources/nxflatfield.py +134 -125
- nabu/resources/templates/id16a_fluo.conf +42 -0
- nabu/resources/tests/test_extract.py +10 -0
- nabu/resources/tests/test_nxflatfield.py +2 -2
- nabu/stitching/alignment.py +80 -24
- nabu/stitching/config.py +105 -68
- nabu/stitching/definitions.py +1 -0
- nabu/stitching/frame_composition.py +68 -60
- nabu/stitching/overlap.py +91 -51
- nabu/stitching/single_axis_stitching.py +32 -0
- nabu/stitching/slurm_utils.py +6 -6
- nabu/stitching/stitcher/__init__.py +0 -0
- nabu/stitching/stitcher/base.py +124 -0
- nabu/stitching/stitcher/dumper/__init__.py +3 -0
- nabu/stitching/stitcher/dumper/base.py +94 -0
- nabu/stitching/stitcher/dumper/postprocessing.py +356 -0
- nabu/stitching/stitcher/dumper/preprocessing.py +60 -0
- nabu/stitching/stitcher/post_processing.py +555 -0
- nabu/stitching/stitcher/pre_processing.py +1068 -0
- nabu/stitching/stitcher/single_axis.py +484 -0
- nabu/stitching/stitcher/stitcher.py +0 -0
- nabu/stitching/stitcher/y_stitcher.py +13 -0
- nabu/stitching/stitcher/z_stitcher.py +45 -0
- nabu/stitching/stitcher_2D.py +278 -0
- nabu/stitching/tests/test_config.py +12 -37
- nabu/stitching/tests/test_frame_composition.py +33 -59
- nabu/stitching/tests/test_overlap.py +149 -7
- nabu/stitching/tests/test_utils.py +1 -1
- nabu/stitching/tests/test_y_preprocessing_stitching.py +132 -0
- nabu/stitching/tests/{test_z_stitching.py → test_z_postprocessing_stitching.py} +167 -561
- nabu/stitching/tests/test_z_preprocessing_stitching.py +431 -0
- nabu/stitching/utils/__init__.py +1 -0
- nabu/stitching/utils/post_processing.py +281 -0
- nabu/stitching/utils/tests/test_post-processing.py +21 -0
- nabu/stitching/{utils.py → utils/utils.py} +79 -52
- nabu/stitching/y_stitching.py +27 -0
- nabu/stitching/z_stitching.py +32 -2281
- nabu/testutils.py +1 -152
- nabu/thirdparty/tomocupy_remove_stripe.py +43 -9
- nabu/utils.py +158 -61
- {nabu-2024.1.10.dist-info → nabu-2024.2.0.dist-info}/METADATA +24 -17
- {nabu-2024.1.10.dist-info → nabu-2024.2.0.dist-info}/RECORD +145 -121
- {nabu-2024.1.10.dist-info → nabu-2024.2.0.dist-info}/WHEEL +1 -1
- nabu/io/tiffwriter_zmm.py +0 -99
- nabu/pipeline/fallback_utils.py +0 -149
- nabu/pipeline/helical/tests/test_accumulator.py +0 -158
- nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -355
- nabu/pipeline/helical/tests/test_strategy.py +0 -61
- nabu/pipeline/helical/utils.py +0 -51
- nabu/pipeline/tests/test_chunk_reader.py +0 -74
- {nabu-2024.1.10.dist-info → nabu-2024.2.0.dist-info}/LICENSE +0 -0
- {nabu-2024.1.10.dist-info → nabu-2024.2.0.dist-info}/entry_points.txt +0 -0
- {nabu-2024.1.10.dist-info → nabu-2024.2.0.dist-info}/top_level.txt +0 -0
nabu/testutils.py
CHANGED
@@ -4,10 +4,7 @@ import os
|
|
4
4
|
import numpy as np
|
5
5
|
from scipy.signal.windows import gaussian
|
6
6
|
from silx.resources import ExternalResources
|
7
|
-
from silx.io.dictdump import
|
8
|
-
from silx.io.url import DataUrl
|
9
|
-
from tomoscan.io import HDF5File
|
10
|
-
from .io.utils import get_compacted_dataslices
|
7
|
+
from silx.io.dictdump import nxtodict, dicttonx
|
11
8
|
|
12
9
|
utilstest = ExternalResources(
|
13
10
|
project="nabu", url_base="http://www.silx.org/pub/nabu/data/", env_key="NABU_DATA", timeout=60
|
@@ -200,154 +197,6 @@ def compare_shifted_images(img1, img2, fwhm_ratio=0.7, return_upper_bound=False)
|
|
200
197
|
return res
|
201
198
|
|
202
199
|
|
203
|
-
class SimpleHDF5TomoScanMock:
|
204
|
-
def __init__(self, image_key):
|
205
|
-
self._image_key = image_key
|
206
|
-
|
207
|
-
@property
|
208
|
-
def image_key(self):
|
209
|
-
return self._image_key
|
210
|
-
|
211
|
-
@image_key.setter
|
212
|
-
def image_key(self, image_key):
|
213
|
-
self._image_key = image_key
|
214
|
-
|
215
|
-
def save_reduced_flats(self, *args, **kwargs):
|
216
|
-
pass
|
217
|
-
|
218
|
-
def save_reduced_darks(self, *args, **kwargs):
|
219
|
-
pass
|
220
|
-
|
221
|
-
|
222
|
-
class NXDatasetMock:
|
223
|
-
"""
|
224
|
-
An alternative to tomoscan.esrf.mock.MockHDF5, with a different interface.
|
225
|
-
Attributes are not supported !
|
226
|
-
"""
|
227
|
-
|
228
|
-
def __init__(self, data_volume, image_keys, rotation_angles=None, incident_energy=19.0, other_params=None):
|
229
|
-
self.data_volume = data_volume
|
230
|
-
self.n_proj = data_volume.shape[0]
|
231
|
-
self.image_key = image_keys
|
232
|
-
if rotation_angles is None:
|
233
|
-
rotation_angles = np.linspace(0, 180, self.n_proj, False)
|
234
|
-
self.rotation_angle = rotation_angles
|
235
|
-
self.incident_energy = incident_energy
|
236
|
-
assert image_keys.size == self.n_proj
|
237
|
-
self._finalize_init(other_params)
|
238
|
-
self.dataset_dict = None
|
239
|
-
self.fname = None
|
240
|
-
# Mocks more attributes
|
241
|
-
self.dataset_scanner = SimpleHDF5TomoScanMock(image_key=self.image_key)
|
242
|
-
self.kind = "hdf5"
|
243
|
-
|
244
|
-
def _finalize_init(self, other_params):
|
245
|
-
if other_params is None:
|
246
|
-
other_params = {}
|
247
|
-
default_params = {
|
248
|
-
"detector": {
|
249
|
-
"count_time": 0.05 * np.ones(self.n_proj, dtype="f"),
|
250
|
-
"distance": 0.5,
|
251
|
-
"field_of_view": "Full",
|
252
|
-
"image_key_control": np.copy(self.image_key),
|
253
|
-
"x_pixel_size": 6.5e-6,
|
254
|
-
"y_pixel_size": 6.5e-6,
|
255
|
-
"x_magnified_pixel_size": 6.5e-5,
|
256
|
-
"y_magnified_pixel_size": 6.5e-5,
|
257
|
-
},
|
258
|
-
"sample": {
|
259
|
-
"name": "dummy sample",
|
260
|
-
"x_translation": 5e-4 * np.ones(self.n_proj, dtype="f"),
|
261
|
-
"y_translation": 5e-4 * np.ones(self.n_proj, dtype="f"),
|
262
|
-
"z_translation": 5e-4 * np.ones(self.n_proj, dtype="f"),
|
263
|
-
},
|
264
|
-
}
|
265
|
-
default_params.update(other_params)
|
266
|
-
self.other_params = default_params
|
267
|
-
|
268
|
-
def generate_dict(self):
|
269
|
-
beam_group = {
|
270
|
-
"incident_energy": self.incident_energy,
|
271
|
-
}
|
272
|
-
detector_other_params = self.other_params["detector"]
|
273
|
-
detector_group = {
|
274
|
-
"count_time": detector_other_params["count_time"],
|
275
|
-
"data": self.data_volume,
|
276
|
-
"distance": detector_other_params["distance"],
|
277
|
-
"field_of_view": detector_other_params["field_of_view"],
|
278
|
-
"image_key": self.image_key,
|
279
|
-
"image_key_control": detector_other_params["image_key_control"],
|
280
|
-
"x_pixel_size": detector_other_params["x_pixel_size"],
|
281
|
-
"y_pixel_size": detector_other_params["y_pixel_size"],
|
282
|
-
"x_magnified_pixel_size": detector_other_params["x_magnified_pixel_size"],
|
283
|
-
"y_magnified_pixel_size": detector_other_params["y_magnified_pixel_size"],
|
284
|
-
}
|
285
|
-
sample_other_params = self.other_params["sample"]
|
286
|
-
sample_group = {
|
287
|
-
"name": sample_other_params["name"],
|
288
|
-
"rotation_angle": self.rotation_angle,
|
289
|
-
"x_translation": sample_other_params["x_translation"],
|
290
|
-
"y_translation": sample_other_params["y_translation"],
|
291
|
-
"z_translation": sample_other_params["z_translation"],
|
292
|
-
}
|
293
|
-
self.dataset_dict = {
|
294
|
-
"beam": beam_group,
|
295
|
-
"instrument": {
|
296
|
-
"detector": detector_group,
|
297
|
-
},
|
298
|
-
"sample": sample_group,
|
299
|
-
}
|
300
|
-
|
301
|
-
def generate_hdf5_file(self, fname, h5path=None):
|
302
|
-
self.fname = fname
|
303
|
-
h5path = h5path or "/entry"
|
304
|
-
if self.dataset_dict is None:
|
305
|
-
self.generate_dict()
|
306
|
-
dicttoh5(self.dataset_dict, fname, h5path=h5path, mode="a")
|
307
|
-
# Patch the "data" field which is exported as string by dicttoh5 (?!)
|
308
|
-
self.dataset_path = os.path.join(h5path, "instrument/detector/data")
|
309
|
-
with HDF5File(fname, "a") as fid:
|
310
|
-
del fid[self.dataset_path]
|
311
|
-
fid[self.dataset_path] = self.dataset_dict["instrument"]["detector"]["data"]
|
312
|
-
|
313
|
-
# Mock some of the HDF5DatasetAnalyzer attributes
|
314
|
-
@property
|
315
|
-
def dataset_hdf5_url(self):
|
316
|
-
if self.fname is None:
|
317
|
-
raise ValueError("generate_hdf5_file() was not called")
|
318
|
-
return DataUrl(file_path=self.fname, data_path=self.dataset_path, scheme="silx")
|
319
|
-
|
320
|
-
def _get_images_with_key(self, key):
|
321
|
-
indices = np.arange(self.image_key.size)[self.image_key == key]
|
322
|
-
urls = [
|
323
|
-
DataUrl(
|
324
|
-
file_path=self.fname,
|
325
|
-
data_path=self.dataset_path,
|
326
|
-
data_slice=slice(img_idx, img_idx + 1),
|
327
|
-
scheme="silx",
|
328
|
-
)
|
329
|
-
for img_idx in indices
|
330
|
-
]
|
331
|
-
return dict(zip(indices, urls))
|
332
|
-
|
333
|
-
@property
|
334
|
-
def flats(self):
|
335
|
-
return self._get_images_with_key(1)
|
336
|
-
|
337
|
-
@property
|
338
|
-
def darks(self):
|
339
|
-
return self._get_images_with_key(2)
|
340
|
-
|
341
|
-
def get_data_slices(self, what):
|
342
|
-
images = getattr(self, what)
|
343
|
-
# we can't directly use set() on slice() object (unhashable). Use tuples
|
344
|
-
tuples_list = list(
|
345
|
-
set((du.data_slice().start, du.data_slice().stop) for du in get_compacted_dataslices(images).values())
|
346
|
-
)
|
347
|
-
slices_list = [slice(item[0], item[1]) for item in tuples_list]
|
348
|
-
return slices_list
|
349
|
-
|
350
|
-
|
351
200
|
# To be improved
|
352
201
|
def generate_nx_dataset(out_fname, image_key, data_volume=None, rotation_angle=None):
|
353
202
|
nx_template_file = get_file("dummy.nx.tar.gz")
|
@@ -4,9 +4,11 @@
|
|
4
4
|
This file is a "GPU" (through cupy) implementation of "remove_all_stripe".
|
5
5
|
The original method is implemented by Nghia Vo in the algotom project: https://github.com/algotom/algotom/blob/master/algotom/prep/removal.py
|
6
6
|
The implementation using cupy is done by Viktor Nikitin in the tomocupy project: https://github.com/tomography/tomocupy/blame/main/src/tomocupy/remove_stripe.py
|
7
|
-
|
7
|
+
then moved to https://github.com/tomography/tomocupy/blob/main/src/tomocupy/processing/remove_stripe.py
|
8
8
|
|
9
9
|
For now we can't rely on off-the-shelf tomocupy as it's not packaged in pypi, and compilation is quite tedious.
|
10
|
+
|
11
|
+
License follows.
|
10
12
|
"""
|
11
13
|
|
12
14
|
# *************************************************************************** #
|
@@ -47,6 +49,7 @@ For now we can't rely on off-the-shelf tomocupy as it's not packaged in pypi, an
|
|
47
49
|
# *************************************************************************** #
|
48
50
|
|
49
51
|
try:
|
52
|
+
import pycuda.gpuarray as garray
|
50
53
|
import cupy as cp
|
51
54
|
import pywt
|
52
55
|
from cupyx.scipy.ndimage import median_filter
|
@@ -559,28 +562,59 @@ def remove_all_stripe(tomo,
|
|
559
562
|
return tomo
|
560
563
|
|
561
564
|
|
565
|
+
def remove_all_stripe_sinos(sinos, snr=3, la_size=61, sm_size=21, dim=1):
|
566
|
+
"""
|
567
|
+
Same as remove_all_stripe(), but acting on sinograms
|
568
|
+
"""
|
569
|
+
n_sinos, n_a, n_x = sinos.shape
|
570
|
+
matindex = _create_matindex(n_x, n_a)
|
571
|
+
for m in range(n_sinos):
|
572
|
+
sino = sinos[m]
|
573
|
+
sino = _rs_dead(sino, snr, la_size, matindex)
|
574
|
+
sino = _rs_sort(sino, sm_size, matindex, dim)
|
575
|
+
sinos[m] = sino
|
576
|
+
return sinos
|
577
|
+
|
578
|
+
|
562
579
|
from ..cuda.utils import pycuda_to_cupy
|
563
|
-
def remove_all_stripe_pycuda(radios, device_id=0, **kwargs):
|
580
|
+
def remove_all_stripe_pycuda(array, layout="radios", device_id=0, **kwargs):
|
564
581
|
"""
|
565
|
-
Nabu interface to "remove_all_stripe".
|
582
|
+
Nabu interface to tomocupy "remove_all_stripe".
|
583
|
+
Processing is done in-place to save memory, meaning that the content of "array" will be overwritten.
|
566
584
|
|
567
585
|
Parameters
|
568
586
|
----------
|
569
|
-
|
570
|
-
Stack of radios in the shape (n_angles, n_y, n_x)
|
571
|
-
|
587
|
+
array: pycuda.GPUArray
|
588
|
+
Stack of radios in the shape (n_angles, n_y, n_x), if layout == "radios"
|
589
|
+
Stack of sinos in the shape (n_y, n_angles, n_x), if layout == "sinos".
|
590
|
+
|
572
591
|
|
573
592
|
Other Parameters
|
574
593
|
----------------
|
575
594
|
See parameters of 'remove_all_stripe
|
576
595
|
"""
|
577
596
|
|
597
|
+
# Init cupy. Nabu currently does not use cupy, with exception of this module,
|
598
|
+
# so the initialization has to be done here.
|
578
599
|
if getattr(remove_all_stripe, "_cupy_init", False) is False:
|
579
600
|
from cupy import cuda
|
580
601
|
cuda.Device(device_id).use()
|
581
602
|
setattr(remove_all_stripe, "_cupy_init", True)
|
582
603
|
|
583
|
-
|
584
|
-
|
585
|
-
|
604
|
+
# remove_all_stripe() in tomocupy expects a 3D array to build the "matindex" data structure.
|
605
|
+
# The usage of this "matindex" array is not clear since this method is supposed to act on individual sinograms.
|
606
|
+
# To avoid memory duplication, we use fake 3D array, i.e, we pass a series of (1, n_a, n_x) sinograms
|
586
607
|
|
608
|
+
if layout == "radios":
|
609
|
+
sinos = array.transpose(axes=(1, 0, 2)) # no copy
|
610
|
+
else:
|
611
|
+
sinos = array
|
612
|
+
# is_contiguous = sinos.flags.c_contigious
|
613
|
+
n_sinos, n_a, n_x = sinos.shape
|
614
|
+
sinos_tmp = garray.zeros((1, n_a, n_x), dtype="f")
|
615
|
+
for i in range(n_sinos):
|
616
|
+
sinos_tmp[0] = sinos[i]
|
617
|
+
cupy_sinos = pycuda_to_cupy(sinos_tmp) # no memory copy, the internal pointer is passed to pycuda
|
618
|
+
remove_all_stripe_sinos(cupy_sinos, **kwargs)
|
619
|
+
sinos[i] = sinos_tmp[0]
|
620
|
+
return array
|
nabu/utils.py
CHANGED
@@ -1,6 +1,6 @@
|
|
1
|
+
from fnmatch import fnmatch
|
1
2
|
from functools import partial
|
2
3
|
import os
|
3
|
-
import sys
|
4
4
|
from functools import partial, lru_cache
|
5
5
|
from itertools import product
|
6
6
|
import warnings
|
@@ -50,7 +50,7 @@ def convert_index(idx, idx_max, default_val):
|
|
50
50
|
|
51
51
|
|
52
52
|
def get_folder_path(foldername=""):
|
53
|
-
_file_dir = os.path.dirname(os.path.
|
53
|
+
_file_dir = os.path.dirname(os.path.abspath(__file__))
|
54
54
|
package_dir = _file_dir
|
55
55
|
return os.path.join(package_dir, foldername)
|
56
56
|
|
@@ -74,6 +74,99 @@ def get_resource_file(filename, subfolder=None):
|
|
74
74
|
return os.path.join(abspath, filename)
|
75
75
|
|
76
76
|
|
77
|
+
def indices_to_slices(indices):
|
78
|
+
"""
|
79
|
+
From a series of integer indices, return corresponding slice() objects.
|
80
|
+
|
81
|
+
Parameters
|
82
|
+
----------
|
83
|
+
indices: collection of sorted unique integers
|
84
|
+
Arrays indices
|
85
|
+
|
86
|
+
Examples
|
87
|
+
--------
|
88
|
+
slices_from_indices([0, 1, 2, 3]) returns [slice(0, 4)]
|
89
|
+
slices_from_indices([8, 9, 10, 14, 15, 16]) returns [slice(8, 11), slice(15, 17)]
|
90
|
+
"""
|
91
|
+
jumps = np.where(np.diff(indices) > 1)[0]
|
92
|
+
if len(jumps) == 0:
|
93
|
+
return [slice(indices[0], indices[-1] + 1)]
|
94
|
+
jumps = np.hstack([-1, jumps, len(indices) - 1])
|
95
|
+
slices = []
|
96
|
+
for i in range(len(jumps) - 1):
|
97
|
+
slices.append(slice(indices[jumps[i] + 1], indices[jumps[i + 1]] + 1))
|
98
|
+
return slices
|
99
|
+
|
100
|
+
|
101
|
+
def merge_slices(slice1, slice2):
|
102
|
+
"""
|
103
|
+
Merge two slicing operations in one.
|
104
|
+
|
105
|
+
Examples
|
106
|
+
--------
|
107
|
+
array = numpy.arange(200)
|
108
|
+
array[slice(133, 412, 2)][slice(31, 35, 2)] # gives [195 199]
|
109
|
+
array[merge_slices(slice(133, 412, 2), slice(31, 35, 2))] # gives [195 199]
|
110
|
+
"""
|
111
|
+
step1 = slice1.step or 1
|
112
|
+
step2 = slice2.step or 1
|
113
|
+
step = step1 * step2
|
114
|
+
if step == 1:
|
115
|
+
step = None
|
116
|
+
|
117
|
+
start = slice1.start + step1 * (slice2.start or 0)
|
118
|
+
if slice2.stop is None:
|
119
|
+
stop = slice1.stop
|
120
|
+
else:
|
121
|
+
stop = min(slice1.stop, slice1.start + step1 * slice2.stop)
|
122
|
+
return slice(start, stop, step)
|
123
|
+
|
124
|
+
|
125
|
+
def compacted_views(slices_):
|
126
|
+
"""
|
127
|
+
From a list of slice objects, returns the slice objects corresponding to a compact view.
|
128
|
+
|
129
|
+
If "array" is obtained with
|
130
|
+
array = np.hstack([big_array[slice1], big_array[slice2]])
|
131
|
+
Then, compacted_views([slice1, slice2]) returns [slice3, slice4] where
|
132
|
+
- slice3 are the indices, in 'array', corresponding to indices of slice1 in 'big_array'
|
133
|
+
- slice4 are the indices, in 'array', corresponding to indices of slice2 in 'big_array'
|
134
|
+
|
135
|
+
Example
|
136
|
+
-------
|
137
|
+
compacted_views([slice(1, 26), slice(526, 551)]) gives [slice(0, 25), slice(25, 50)]
|
138
|
+
|
139
|
+
"""
|
140
|
+
prev_start = 0
|
141
|
+
r = []
|
142
|
+
for s in slices_:
|
143
|
+
start = prev_start
|
144
|
+
stop = start + (s.stop - s.start)
|
145
|
+
r.append(slice(start, stop))
|
146
|
+
prev_start = stop
|
147
|
+
return r
|
148
|
+
|
149
|
+
|
150
|
+
def get_size_from_sliced_dimension(length, slice_):
|
151
|
+
"""
|
152
|
+
From a given array size, returns the size of the array once it is accessed using a slice.
|
153
|
+
|
154
|
+
Examples
|
155
|
+
--------
|
156
|
+
If data.shape = (3500, 2160, 2560)
|
157
|
+
get_size_from_sliced_dimension(data.shape[0], None) returns 3500
|
158
|
+
get_size_from_sliced_dimension(data.shape[0], slice(100, 200)) returns 100
|
159
|
+
"""
|
160
|
+
return np.arange(length)[slice_].size
|
161
|
+
|
162
|
+
|
163
|
+
def get_shape_from_sliced_dims(shape, slices_):
|
164
|
+
"""
|
165
|
+
Same as get_size_from_sliced_dimension() but in 3D
|
166
|
+
"""
|
167
|
+
return tuple(get_size_from_sliced_dimension(length, slice_) for length, slice_ in zip(shape, slices_))
|
168
|
+
|
169
|
+
|
77
170
|
def get_available_threads():
|
78
171
|
try:
|
79
172
|
n_threads = len(os.sched_getaffinity(0))
|
@@ -82,6 +175,18 @@ def get_available_threads():
|
|
82
175
|
return n_threads
|
83
176
|
|
84
177
|
|
178
|
+
def list_match_queries(available, queries):
|
179
|
+
"""
|
180
|
+
Given a list of strings, return all items matching any of one elements of "queries"
|
181
|
+
"""
|
182
|
+
matches = []
|
183
|
+
for a in available:
|
184
|
+
for q in queries:
|
185
|
+
if fnmatch(a, q):
|
186
|
+
matches.append(a)
|
187
|
+
return matches
|
188
|
+
|
189
|
+
|
85
190
|
def is_writeable(location):
|
86
191
|
"""
|
87
192
|
Return True if a file/location is writeable.
|
@@ -93,6 +198,12 @@ def is_int(num, eps=1e-7):
|
|
93
198
|
return abs(num - int(num)) < eps
|
94
199
|
|
95
200
|
|
201
|
+
def is_scalar(stuff):
|
202
|
+
if isinstance(stuff, str):
|
203
|
+
return False
|
204
|
+
return np.isscalar(stuff)
|
205
|
+
|
206
|
+
|
96
207
|
def _sizeof(Type):
|
97
208
|
"""
|
98
209
|
return the size (in bytes) of a scalar type, like the C behavior
|
@@ -100,6 +211,22 @@ def _sizeof(Type):
|
|
100
211
|
return np.dtype(Type).itemsize
|
101
212
|
|
102
213
|
|
214
|
+
class _Default_format(dict):
|
215
|
+
"""
|
216
|
+
https://docs.python.org/3/library/stdtypes.html
|
217
|
+
"""
|
218
|
+
|
219
|
+
def __missing__(self, key):
|
220
|
+
return key
|
221
|
+
|
222
|
+
|
223
|
+
def safe_format(str_, **kwargs):
|
224
|
+
"""
|
225
|
+
Alternative to str.format(), but does not throw a KeyError when fields are missing.
|
226
|
+
"""
|
227
|
+
return str_.format_map(_Default_format(**kwargs))
|
228
|
+
|
229
|
+
|
103
230
|
def get_ftype(url):
|
104
231
|
"""
|
105
232
|
return supposed filetype of an url
|
@@ -294,6 +421,11 @@ def partition_dict(dict_, n_partitions):
|
|
294
421
|
return res
|
295
422
|
|
296
423
|
|
424
|
+
def first_dict_item(dict_):
|
425
|
+
keys = sorted(list(dict_.keys()))
|
426
|
+
return dict_[keys[0]]
|
427
|
+
|
428
|
+
|
297
429
|
def subsample_dict(dic, subsampling_factor):
|
298
430
|
"""
|
299
431
|
Subsample a dict where keys are integers.
|
@@ -559,64 +691,6 @@ def convert_str_to_tuple(input_str: str, none_if_empty: bool = False):
|
|
559
691
|
return tuple(elmts)
|
560
692
|
|
561
693
|
|
562
|
-
class Progress:
|
563
|
-
"""Simple interface for defining advancement on a 100 percentage base"""
|
564
|
-
|
565
|
-
def __init__(self, name: str):
|
566
|
-
self._name = name
|
567
|
-
self.set_name(name)
|
568
|
-
|
569
|
-
def set_name(self, name):
|
570
|
-
self._name = name
|
571
|
-
self.reset()
|
572
|
-
|
573
|
-
def reset(self, max_=None):
|
574
|
-
"""
|
575
|
-
reset the advancement to n and max advancement to max_
|
576
|
-
:param int max_:
|
577
|
-
"""
|
578
|
-
self._n_processed = 0
|
579
|
-
self._max_processed = max_
|
580
|
-
|
581
|
-
def start_process(self) -> None:
|
582
|
-
self.set_advancement(0)
|
583
|
-
|
584
|
-
def set_advancement(self, value: int) -> None:
|
585
|
-
"""
|
586
|
-
|
587
|
-
:param int value: set advancement to value
|
588
|
-
"""
|
589
|
-
length = 20 # modify this to change the length
|
590
|
-
block = int(round(length * value / 100))
|
591
|
-
blocks_str = "#" * block + "-" * (length - block)
|
592
|
-
msg = "\r{0}: [{1}] {2}%".format(self._name, blocks_str, round(value, 2))
|
593
|
-
if value >= 100:
|
594
|
-
msg += " DONE\r\n"
|
595
|
-
sys.stdout.write(msg)
|
596
|
-
sys.stdout.flush()
|
597
|
-
|
598
|
-
def end_process(self) -> None:
|
599
|
-
"""Set advancement to 100 %"""
|
600
|
-
self.set_advancement(100)
|
601
|
-
|
602
|
-
def set_max_advancement(self, n: int) -> None:
|
603
|
-
"""
|
604
|
-
|
605
|
-
:param int n: number of steps contained by the advancement. When
|
606
|
-
advancement reach this value, advancement will be 100 %
|
607
|
-
"""
|
608
|
-
self._max_processed = n
|
609
|
-
|
610
|
-
def increase_advancement(self, i: int = 1) -> None:
|
611
|
-
"""
|
612
|
-
|
613
|
-
:param int i: increase the advancement of n step
|
614
|
-
"""
|
615
|
-
self._n_processed += i
|
616
|
-
advancement = int(float(self._n_processed / self._max_processed) * 100)
|
617
|
-
self.set_advancement(advancement)
|
618
|
-
|
619
|
-
|
620
694
|
def concatenate_dict(dict_1, dict_2) -> dict:
|
621
695
|
"""update dict which has dict as values. And we want concatenate those values to"""
|
622
696
|
res = dict_1.copy()
|
@@ -658,12 +732,14 @@ def generate_coords(img_shp, center=None):
|
|
658
732
|
return R, C
|
659
733
|
|
660
734
|
|
661
|
-
def clip_circle(img, center=None, radius=None):
|
735
|
+
def clip_circle(img, center=None, radius=None, out_value=0):
|
662
736
|
R, C = generate_coords(img.shape, center)
|
663
737
|
if radius is None:
|
664
738
|
radius = R.shape[-1] // 2
|
665
739
|
M = R**2 + C**2
|
666
740
|
res = np.zeros_like(img)
|
741
|
+
if out_value != 0:
|
742
|
+
res.fill(out_value)
|
667
743
|
res[M < radius**2] = img[M < radius**2]
|
668
744
|
return res
|
669
745
|
|
@@ -821,3 +897,24 @@ def docstring(origin):
|
|
821
897
|
If the origin class has not method n case the
|
822
898
|
"""
|
823
899
|
return partial(_docstring, origin=origin)
|
900
|
+
|
901
|
+
|
902
|
+
from warnings import catch_warnings
|
903
|
+
|
904
|
+
# FIX for python < 3.11
|
905
|
+
# catch_warnings() does not have "action=XX" kwarg for python < 3.11
|
906
|
+
from sys import version_info
|
907
|
+
|
908
|
+
if version_info.major == 3 and version_info.minor < 11:
|
909
|
+
|
910
|
+
def dummy(*args, **kwargs):
|
911
|
+
pass
|
912
|
+
|
913
|
+
catch_warnings_old = catch_warnings
|
914
|
+
|
915
|
+
def catch_warnings(*args, **kwargs): # pylint: disable=E0102
|
916
|
+
action = kwargs.pop("action", None)
|
917
|
+
return catch_warnings_old(record=(dummy if action == "ignore" else False))
|
918
|
+
|
919
|
+
|
920
|
+
# ---
|
@@ -1,6 +1,6 @@
|
|
1
1
|
Metadata-Version: 2.1
|
2
2
|
Name: nabu
|
3
|
-
Version: 2024.
|
3
|
+
Version: 2024.2.0
|
4
4
|
Summary: Nabu - Tomography software
|
5
5
|
Author-email: Pierre Paleo <pierre.paleo@esrf.fr>, Henri Payno <henri.payno@esrf.fr>, Alessandro Mirone <mirone@esrf.fr>, Jérôme Lesaint <jerome.lesaint@esrf.fr>
|
6
6
|
Maintainer-email: Pierre Paleo <pierre.paleo@esrf.fr>
|
@@ -49,28 +49,35 @@ Classifier: Topic :: Scientific/Engineering :: Medical Science Apps.
|
|
49
49
|
Requires-Python: >=3.7
|
50
50
|
Description-Content-Type: text/markdown
|
51
51
|
License-File: LICENSE
|
52
|
-
Requires-Dist: numpy<2,>1.9.0
|
52
|
+
Requires-Dist: numpy <2,>1.9.0
|
53
53
|
Requires-Dist: scipy
|
54
|
-
Requires-Dist: h5py>=3.0
|
55
|
-
Requires-Dist: silx>=0.15.0
|
56
|
-
Requires-Dist: tomoscan>=2.0
|
54
|
+
Requires-Dist: h5py >=3.0
|
55
|
+
Requires-Dist: silx >=0.15.0
|
56
|
+
Requires-Dist: tomoscan >=2.1.0
|
57
57
|
Requires-Dist: psutil
|
58
58
|
Requires-Dist: pytest
|
59
59
|
Requires-Dist: tifffile
|
60
|
+
Requires-Dist: tqdm
|
60
61
|
Provides-Extra: doc
|
61
|
-
Requires-Dist: sphinx; extra ==
|
62
|
-
Requires-Dist: cloud-sptheme; extra ==
|
63
|
-
Requires-Dist: myst-parser; extra ==
|
64
|
-
Requires-Dist: nbsphinx; extra ==
|
62
|
+
Requires-Dist: sphinx ; extra == 'doc'
|
63
|
+
Requires-Dist: cloud-sptheme ; extra == 'doc'
|
64
|
+
Requires-Dist: myst-parser ; extra == 'doc'
|
65
|
+
Requires-Dist: nbsphinx ; extra == 'doc'
|
65
66
|
Provides-Extra: full
|
66
|
-
Requires-Dist: scikit-image; extra ==
|
67
|
-
Requires-Dist: PyWavelets; extra ==
|
68
|
-
Requires-Dist: glymur; extra ==
|
69
|
-
Requires-Dist: pycuda
|
70
|
-
Requires-Dist: scikit-cuda; extra ==
|
71
|
-
Requires-Dist: pycudwt; extra ==
|
72
|
-
Requires-Dist: sluurp>=0.3; extra ==
|
73
|
-
Requires-Dist: pyvkfft; extra ==
|
67
|
+
Requires-Dist: scikit-image ; extra == 'full'
|
68
|
+
Requires-Dist: PyWavelets ; extra == 'full'
|
69
|
+
Requires-Dist: glymur ; extra == 'full'
|
70
|
+
Requires-Dist: pycuda !=2024.1.1 ; extra == 'full'
|
71
|
+
Requires-Dist: scikit-cuda ; extra == 'full'
|
72
|
+
Requires-Dist: pycudwt ; extra == 'full'
|
73
|
+
Requires-Dist: sluurp >=0.3 ; extra == 'full'
|
74
|
+
Requires-Dist: pyvkfft ; extra == 'full'
|
75
|
+
Provides-Extra: full_nocuda
|
76
|
+
Requires-Dist: scikit-image ; extra == 'full_nocuda'
|
77
|
+
Requires-Dist: PyWavelets ; extra == 'full_nocuda'
|
78
|
+
Requires-Dist: glymur ; extra == 'full_nocuda'
|
79
|
+
Requires-Dist: sluurp >=0.3 ; extra == 'full_nocuda'
|
80
|
+
Requires-Dist: pyvkfft ; extra == 'full_nocuda'
|
74
81
|
|
75
82
|
# Nabu
|
76
83
|
|