nabu 2022.3.0a1__py3-none-any.whl → 2023.1.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. nabu/__init__.py +1 -1
  2. nabu/app/bootstrap.py +7 -1
  3. nabu/app/cast_volume.py +8 -2
  4. nabu/app/cli_configs.py +69 -0
  5. nabu/app/composite_cor.py +97 -0
  6. nabu/app/create_distortion_map_from_poly.py +118 -0
  7. nabu/app/nx_z_splitter.py +1 -1
  8. nabu/app/prepare_weights_double.py +21 -16
  9. nabu/app/reconstruct_helical.py +0 -1
  10. nabu/app/utils.py +10 -5
  11. nabu/cuda/processing.py +1 -0
  12. nabu/cuda/tests/test_padding.py +1 -0
  13. nabu/cuda/utils.py +1 -0
  14. nabu/distributed/__init__.py +0 -0
  15. nabu/distributed/utils.py +57 -0
  16. nabu/distributed/worker.py +543 -0
  17. nabu/estimation/cor.py +3 -7
  18. nabu/estimation/cor_sino.py +2 -1
  19. nabu/estimation/distortion.py +6 -4
  20. nabu/io/cast_volume.py +10 -1
  21. nabu/io/detector_distortion.py +305 -0
  22. nabu/io/reader.py +37 -7
  23. nabu/io/reader_helical.py +0 -3
  24. nabu/io/tests/test_cast_volume.py +16 -4
  25. nabu/io/tests/test_detector_distortion.py +178 -0
  26. nabu/io/tests/test_writers.py +2 -2
  27. nabu/io/tiffwriter_zmm.py +2 -3
  28. nabu/io/writer.py +84 -1
  29. nabu/io/writer_BACKUP_193259.py +556 -0
  30. nabu/io/writer_BACKUP_193381.py +556 -0
  31. nabu/io/writer_BASE_193259.py +548 -0
  32. nabu/io/writer_BASE_193381.py +548 -0
  33. nabu/io/writer_LOCAL_193259.py +550 -0
  34. nabu/io/writer_LOCAL_193381.py +550 -0
  35. nabu/io/writer_REMOTE_193259.py +557 -0
  36. nabu/io/writer_REMOTE_193381.py +557 -0
  37. nabu/misc/fourier_filters.py +2 -0
  38. nabu/misc/rotation.py +0 -1
  39. nabu/misc/tests/test_rotation.py +1 -0
  40. nabu/pipeline/config_validators.py +10 -0
  41. nabu/pipeline/datadump.py +1 -1
  42. nabu/pipeline/dataset_validator.py +0 -1
  43. nabu/pipeline/detector_distortion_provider.py +20 -0
  44. nabu/pipeline/estimators.py +35 -21
  45. nabu/pipeline/fallback_utils.py +1 -1
  46. nabu/pipeline/fullfield/chunked.py +30 -15
  47. nabu/pipeline/fullfield/chunked_black.py +881 -0
  48. nabu/pipeline/fullfield/chunked_cuda.py +34 -4
  49. nabu/pipeline/fullfield/chunked_fb.py +966 -0
  50. nabu/pipeline/fullfield/chunked_google.py +921 -0
  51. nabu/pipeline/fullfield/chunked_pep8.py +920 -0
  52. nabu/pipeline/fullfield/computations.py +7 -6
  53. nabu/pipeline/fullfield/dataset_validator.py +1 -1
  54. nabu/pipeline/fullfield/grouped_cuda.py +6 -0
  55. nabu/pipeline/fullfield/nabu_config.py +15 -3
  56. nabu/pipeline/fullfield/processconfig.py +5 -0
  57. nabu/pipeline/fullfield/reconstruction.py +1 -2
  58. nabu/pipeline/helical/gridded_accumulator.py +1 -8
  59. nabu/pipeline/helical/helical_chunked_regridded.py +48 -33
  60. nabu/pipeline/helical/helical_reconstruction.py +1 -9
  61. nabu/pipeline/helical/nabu_config.py +11 -14
  62. nabu/pipeline/helical/span_strategy.py +11 -4
  63. nabu/pipeline/helical/tests/test_accumulator.py +0 -3
  64. nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -6
  65. nabu/pipeline/helical/tests/test_strategy.py +0 -1
  66. nabu/pipeline/helical/weight_balancer.py +0 -1
  67. nabu/pipeline/params.py +4 -0
  68. nabu/pipeline/processconfig.py +6 -2
  69. nabu/pipeline/writer.py +9 -4
  70. nabu/preproc/distortion.py +4 -3
  71. nabu/preproc/double_flatfield.py +16 -4
  72. nabu/preproc/double_flatfield_cuda.py +3 -2
  73. nabu/preproc/double_flatfield_variable_region.py +13 -4
  74. nabu/preproc/flatfield.py +29 -7
  75. nabu/preproc/flatfield_cuda.py +0 -1
  76. nabu/preproc/flatfield_variable_region.py +5 -2
  77. nabu/preproc/phase.py +0 -1
  78. nabu/preproc/phase_cuda.py +0 -1
  79. nabu/preproc/tests/test_ctf.py +4 -3
  80. nabu/preproc/tests/test_flatfield.py +6 -7
  81. nabu/reconstruction/fbp_opencl.py +1 -1
  82. nabu/reconstruction/filtering.py +0 -1
  83. nabu/reconstruction/tests/test_fbp.py +1 -0
  84. nabu/resources/dataset_analyzer.py +0 -1
  85. nabu/resources/templates/bm05_pag.conf +34 -0
  86. nabu/resources/templates/id16_ctf.conf +2 -1
  87. nabu/resources/tests/test_nxflatfield.py +0 -1
  88. nabu/resources/tests/test_units.py +0 -1
  89. nabu/stitching/frame_composition.py +7 -1
  90. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/METADATA +2 -7
  91. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/RECORD +96 -75
  92. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/WHEEL +1 -1
  93. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/entry_points.txt +2 -1
  94. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/LICENSE +0 -0
  95. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/top_level.txt +0 -0
  96. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/zip-safe +0 -0
@@ -0,0 +1,550 @@
1
+ from glob import glob
2
+ from os import path, getcwd, chdir
3
+ from posixpath import join as posix_join
4
+ from datetime import datetime
5
+ import numpy as np
6
+ from h5py import VirtualSource, VirtualLayout
7
+ from silx.io.dictdump import dicttonx
8
+ from tomoscan.io import HDF5File
9
+ from tomoscan.esrf import (
10
+ EDFVolume, MultiTIFFVolume,
11
+ HDF5Volume as _HDF5VolumeBase,
12
+ TIFFVolume as _TIFFVolumeBase,
13
+ JP2KVolume as _JP2KVolumeBase
14
+ )
15
+ from .. import version as nabu_version
16
+ from ..utils import merged_shape, deprecation_warning
17
+ from ..misc.utils import rescale_data
18
+ from .utils import check_h5py_version, convert_dict_values
19
+ try:
20
+ from glymur import Jp2k, set_option as glymur_set_option
21
+ from glymur.version import openjpeg_version, version as glymur_version
22
+ __have_jp2k__ = True
23
+ except ImportError:
24
+ __have_jp2k__ = False
25
+
26
+ def get_datetime():
27
+ """
28
+ Function used by some writers to indicate the current date.
29
+ """
30
+ return datetime.now().replace(microsecond=0).isoformat()
31
+
32
+
33
+ class Writer:
34
+ """
35
+ Base class for all writers.
36
+ """
37
+ def __init__(self, fname):
38
+ self.fname = fname
39
+
40
+
41
+ def get_filename(self):
42
+ return self.fname
43
+
44
+
45
+ class NXProcessWriter(Writer):
46
+ """
47
+ A class to write Nexus file with a processing result.
48
+ """
49
+ def __init__(self, fname, entry=None, filemode=None, overwrite=False):
50
+ """
51
+ Initialize a NXProcessWriter.
52
+
53
+ Parameters
54
+ -----------
55
+ fname: str
56
+ Path to the HDF5 file.
57
+ entry: str, optional
58
+ Entry in the HDF5 file. Default is "entry"
59
+ """
60
+ super().__init__(fname)
61
+ self._set_entry(entry)
62
+ self.overwrite = overwrite
63
+ check_h5py_version()
64
+ if filemode is not None:
65
+ deprecation_warning(
66
+ "'filemode' is deprecated and has no effect", func_name="nxprocess_init"
67
+ )
68
+
69
+
70
+ def _set_entry(self, entry):
71
+ self.entry = entry or "entry"
72
+ data_path = posix_join("/", self.entry)
73
+ self.data_path = data_path
74
+
75
+
76
+ def write(self, result, process_name, processing_index=0, config=None, data_name="data", is_frames_stack=True) -> str:
77
+ """
78
+ Write the result in the current NXProcess group.
79
+
80
+ Parameters
81
+ ----------
82
+ result: numpy.ndarray
83
+ Array containing the processing result
84
+ process_name: str
85
+ Name of the processing
86
+ processing_index: int
87
+ Index of the processing (in a pipeline)
88
+ config: dict, optional
89
+ Dictionary containing the configuration.
90
+ """
91
+ entry_path = self.data_path
92
+ nx_process_path = "/".join([entry_path, process_name])
93
+
94
+ if config is not None:
95
+ config.update(
96
+ {
97
+ "@NX_class": "NXcollection",
98
+ }
99
+ )
100
+
101
+ class HDF5Volume(_HDF5VolumeBase):
102
+ # TODO: not a big fan of redefining a class to set the dataset name
103
+ # but the default value "data" is the same as in tomoscan.
104
+ # so it should be enough for automation
105
+ DATA_DATASET_NAME = f"results/{data_name}"
106
+
107
+ volume = HDF5Volume(
108
+ file_path=self.fname,
109
+ data_path=nx_process_path,
110
+ metadata=config,
111
+ overwrite=self.overwrite,
112
+ )
113
+ assert volume.data_url is not None
114
+
115
+ # on which case
116
+ if isinstance(result, dict):
117
+ pass
118
+ elif isinstance(result, np.ndarray):
119
+ if result.ndim == 2:
120
+ result = result.reshape(1, result.shape[0], result.shape[1])
121
+ volume.data = result
122
+ elif isinstance(result, VirtualLayout):
123
+ # TODO: add test on tomoscan to ensure this use case is handled
124
+ volume.data = result
125
+ else:
126
+ raise TypeError(f"result is expected to be a dict or a numpy arrau. Not {type(result)}")
127
+
128
+ if volume.metadata is not None:
129
+ volume.metadata = convert_dict_values(
130
+ volume.metadata,
131
+ {None: "None"},
132
+ )
133
+ # if result is a dictionary then we only have some metadata to be saved
134
+ if isinstance(result, dict):
135
+ volume.save_metadata()
136
+ results_path = posix_join(nx_process_path, "results")
137
+ else:
138
+ volume.save()
139
+ results_path = posix_join(nx_process_path, "results", data_name)
140
+
141
+ # adding nabu specific information
142
+ nabu_process_info = {
143
+ "@NX_class": "NXentry",
144
+ f"{process_name}@NX_class": "NXprocess",
145
+ f"{process_name}/program": "nabu",
146
+ f"{process_name}/version": nabu_version,
147
+ f"{process_name}/date": get_datetime(),
148
+ f"{process_name}/sequence_index": np.int32(processing_index),
149
+ }
150
+ if isinstance(result, np.ndarray):
151
+ nabu_process_info.update(
152
+ {
153
+ f"{process_name}/results@NX_class": "NXdata",
154
+ f"{process_name}/results@signal": data_name,
155
+ }
156
+ )
157
+ if is_frames_stack:
158
+ nabu_process_info.update(
159
+ {
160
+ f"{process_name}/results@interpretation": "image",
161
+
162
+ }
163
+ )
164
+
165
+ # prepare the direct access plots
166
+ nabu_process_info.update(
167
+ {
168
+ f"{process_name}@default": "results",
169
+ "@default": f"{process_name}/results",
170
+ }
171
+ )
172
+ elif isinstance(result, dict):
173
+ nabu_process_info.update(
174
+ {
175
+ "/".join([f"{process_name}/results"]): convert_dict_values(
176
+ result,
177
+ {None: "None"},
178
+ ),
179
+ }
180
+ )
181
+
182
+ dicttonx(
183
+ nabu_process_info,
184
+ h5file=self.fname,
185
+ h5path=entry_path,
186
+ update_mode="replace",
187
+ mode="a",
188
+ )
189
+ return results_path
190
+
191
+
192
+ def create_virtual_layout(files_or_pattern, h5_path, base_dir=None, axis=0):
193
+ """
194
+ Create a HDF5 virtual layout.
195
+
196
+ Parameters
197
+ ----------
198
+ files_or_pattern: str or list
199
+ A list of file names, or a wildcard pattern.
200
+ If a list is provided, it will not be sorted! This will have to be
201
+ done before calling this function.
202
+ h5_path: str
203
+ Path inside the HDF5 input file(s)
204
+ base_dir: str, optional
205
+ Base directory when using relative file names.
206
+ axis: int, optional
207
+ Data axis to merge. Default is 0.
208
+ """
209
+ prev_cwd = None
210
+ if base_dir is not None:
211
+ prev_cwd = getcwd()
212
+ chdir(base_dir)
213
+ if isinstance(files_or_pattern, str):
214
+ files_list = glob(files_or_pattern)
215
+ files_list.sort()
216
+ else: # list
217
+ files_list = files_or_pattern
218
+ if files_list == []:
219
+ raise ValueError("Nothing found as pattern %s" % files_or_pattern)
220
+ virtual_sources = []
221
+ shapes = []
222
+ for fname in files_list:
223
+ with HDF5File(fname, "r", swmr=True) as fid:
224
+ shape = fid[h5_path].shape
225
+ vsource = VirtualSource(fname, name=h5_path, shape=shape)
226
+ virtual_sources.append(vsource)
227
+ shapes.append(shape)
228
+ total_shape = merged_shape(shapes, axis=axis)
229
+
230
+ virtual_layout = VirtualLayout(
231
+ shape=total_shape,
232
+ dtype='f'
233
+ )
234
+ start_idx = 0
235
+ for vsource, shape in zip(virtual_sources, shapes):
236
+ n_imgs = shape[axis]
237
+ # Perhaps there is more elegant
238
+ if axis == 0:
239
+ virtual_layout[start_idx:start_idx + n_imgs] = vsource
240
+ elif axis == 1:
241
+ virtual_layout[:, start_idx:start_idx + n_imgs, :] = vsource
242
+ elif axis == 2:
243
+ virtual_layout[:, :, start_idx:start_idx + n_imgs] = vsource
244
+ else:
245
+ raise ValueError("Only axis 0,1,2 are supported")
246
+ #
247
+ start_idx += n_imgs
248
+
249
+ if base_dir is not None:
250
+ chdir(prev_cwd)
251
+ return virtual_layout
252
+
253
+
254
+
255
+ def merge_hdf5_files(
256
+ files_or_pattern, h5_path, output_file, process_name,
257
+ output_entry=None, output_filemode="a", data_name="data",
258
+ processing_index=0, config=None, base_dir=None,
259
+ axis=0, overwrite=False
260
+ ):
261
+ """
262
+ Parameters
263
+ -----------
264
+ files_or_pattern: str or list
265
+ A list of file names, or a wildcard pattern.
266
+ If a list is provided, it will not be sorted! This will have to be
267
+ done before calling this function.
268
+ h5_path: str
269
+ Path inside the HDF5 input file(s)
270
+ output_file: str
271
+ Path of the output file
272
+ process_name: str
273
+ Name of the process
274
+ output_entry: str, optional
275
+ Output HDF5 root entry (default is "/entry")
276
+ output_filemode: str, optional
277
+ File mode for output file. Default is "a" (append)
278
+ processing_index: int, optional
279
+ Processing index for the output file. Default is 0.
280
+ config: dict, optional
281
+ Dictionary describing the configuration needed to get the results.
282
+ base_dir: str, optional
283
+ Base directory when using relative file names.
284
+ axis: int, optional
285
+ Data axis to merge. Default is 0.
286
+ overwrite: bool, optional
287
+ Whether to overwrite already existing data in the final file.
288
+ Default is False.
289
+ """
290
+ if base_dir is not None:
291
+ prev_cwd = getcwd()
292
+ virtual_layout = create_virtual_layout(files_or_pattern, h5_path, base_dir=base_dir, axis=axis)
293
+ nx_file = NXProcessWriter(
294
+ output_file,
295
+ entry=output_entry, filemode=output_filemode, overwrite=overwrite
296
+ )
297
+ nx_file.write(
298
+ virtual_layout,
299
+ process_name,
300
+ processing_index=processing_index,
301
+ config=config,
302
+ data_name=data_name,
303
+ is_frames_stack=True
304
+ )
305
+ if base_dir is not None and prev_cwd != getcwd():
306
+ chdir(prev_cwd)
307
+
308
+
309
+ class TIFFWriter(Writer):
310
+ def __init__(self, fname, multiframe=False, start_index=0, filemode=None, append=False, big_tiff=None):
311
+ """
312
+ Tiff writer.
313
+
314
+ Parameters
315
+ -----------
316
+ fname: str
317
+ Path to the output file name
318
+ multiframe: bool, optional
319
+ Whether to write all data in one single file. Default is False.
320
+ start_index: int, optional
321
+ When writing a stack of images, each image is written in a dedicated file
322
+ (unless multiframe is set to True).
323
+ In this case, the output is a series of files `filename_0000.tif`,
324
+ `filename_0001.tif`, etc. This parameter is the starting index for
325
+ file names.
326
+ This option is ignored when multiframe is True.
327
+ filemode: str, optional
328
+ DEPRECATED. Will be ignored. Please refer to 'append'
329
+ append: bool, optional
330
+ Whether to append data to the file rather than overwriting. Default is False.
331
+ big_tiff: bool, optional
332
+ Whether to write in "big tiff" format: https://www.awaresystems.be/imaging/tiff/bigtiff.html
333
+ Default is True when multiframe is True.
334
+ Note that default "standard" tiff cannot exceed 4 GB.
335
+
336
+ Notes
337
+ ------
338
+ If multiframe is False (default), then each image will be written in a
339
+ dedicated tiff file.
340
+ """
341
+ super().__init__(fname)
342
+ self.multiframe = multiframe
343
+ self.start_index = start_index
344
+ self.append = append
345
+ if big_tiff is None:
346
+ big_tiff = multiframe
347
+ if multiframe and not big_tiff:
348
+ # raise error ?
349
+ print("big_tiff was set to False while multiframe was set to True. This will probably be problematic.")
350
+ self.big_tiff = big_tiff
351
+ # Compat.
352
+ self.filemode = filemode
353
+ if filemode is not None:
354
+ deprecation_warning("Ignored parameter 'filemode'. Please use the 'append' parameter")
355
+
356
+ def write(self, data, *args, config=None, **kwargs):
357
+ ext = None
358
+ if not isinstance(data, np.ndarray):
359
+ raise TypeError(f"data is expected to be a numpy array and not {type(data)}")
360
+ # Single image, or multiple image in the same file
361
+ if self.multiframe:
362
+ volume = MultiTIFFVolume(
363
+ self.fname,
364
+ data=data,
365
+ metadata={
366
+ "config": config,
367
+ },
368
+ )
369
+ file_path = self.fname
370
+ # Multiple image, one file per image
371
+ else:
372
+ if data.ndim == 2 or (data.ndim == 3 and data.shape[0] == 1):
373
+ data = data.reshape(1, data.shape[0], data.shape[1])
374
+ file_path = self.fname
375
+ volume = MultiTIFFVolume(
376
+ self.fname,
377
+ data=data,
378
+ metadata={
379
+ "config": config,
380
+ },
381
+ )
382
+ else:
383
+ file_path, ext = path.splitext(self.fname)
384
+ # as in nabu the base name of the tiff file can be different from the folder name we
385
+ # need to redefine it. For interoperability with the rest of the tomotools suites
386
+ # it must highly recommended that they stay the same.
387
+ # TODO: to somplify think we must ensure this the case by default
388
+ # (ensure that if no output is provided then we lay back to having the output file name prefix being the folder name)
389
+ class TIFFVolume(_TIFFVolumeBase):
390
+ # we are not ensure that the output directory name is the base name of the file_path
391
+ DEFAULT_DATA_DATA_PATH_PATTERN = path.basename(file_path) + "_{index_zfill4}" + ext
392
+
393
+ volume = TIFFVolume(
394
+ path.dirname(file_path),
395
+ data=data,
396
+ metadata={
397
+ "config": config,
398
+ },
399
+ start_index=self.start_index,
400
+ )
401
+
402
+ volume.save()
403
+
404
+
405
+ class EDFWriter(Writer):
406
+ def __init__(self, fname, start_index=0, filemode="w"):
407
+ """
408
+ EDF (ESRF Data Format) writer.
409
+
410
+ Parameters
411
+ -----------
412
+ fname: str
413
+ Path to the output file name
414
+ start_index: int, optional
415
+ When writing a stack of images, each image is written in a dedicated file
416
+ In this case, the output is a series of files `filename_0000.tif`,
417
+ `filename_0001.edf`, etc. This parameter is the starting index for
418
+ file names.
419
+ """
420
+ super().__init__(fname)
421
+ self.filemode = filemode
422
+ self.start_index = start_index
423
+
424
+ def write(self, data, *args, config=None, **kwargs):
425
+ if not isinstance(data, np.ndarray):
426
+ raise TypeError(f"data is expected to be a numpy array and not {type(data)}")
427
+ header = {
428
+ "software": "nabu",
429
+ "data": get_datetime(),
430
+ }
431
+ if data.ndim == 2:
432
+ data = data.reshape(1, data.shape[0], data.shape[1])
433
+
434
+ volume = EDFVolume(
435
+ path.dirname(self.fname),
436
+ data=data,
437
+ start_index=self.start_index,
438
+ header=header
439
+ )
440
+ volume.save()
441
+
442
+
443
+ class JP2Writer(Writer):
444
+ def __init__(
445
+ self, fname, start_index=0, filemode="wb",
446
+ psnr=None, cratios=None, auto_convert=True, float_clip_values=None, n_threads=None
447
+ ):
448
+ """
449
+ JPEG2000 writer. This class requires the python package `glymur` and the
450
+ library `libopenjp2`.
451
+
452
+ Parameters
453
+ -----------
454
+ fname: str
455
+ Path to the output file name
456
+ start_index: int, optional
457
+ When writing a stack of images, each image is written in a dedicated file
458
+ The output is a series of files `filename_0000.tif`, `filename_0001.tif`, etc.
459
+ This parameter is the starting index for file names.
460
+ psnr: list of int, optional
461
+ The PSNR (Peak Signal-to-Noise ratio) for each jpeg2000 layer.
462
+ This defines a quality metric for lossy compression.
463
+ The number "0" stands for lossless compression.
464
+ cratios: list of int, optional
465
+ Compression ratio for each jpeg2000 layer
466
+ auto_convert: bool, optional
467
+ Whether to automatically cast floating point data to uint16.
468
+ Default is True.
469
+ float_clip_values: tuple of floats, optional
470
+ If set to a tuple of two values (min, max), then each image values will be clipped
471
+ to these minimum and maximum values.
472
+ n_threads: int, optional
473
+ Number of threads to use for encoding. Default is the number of available threads.
474
+ Needs libopenjpeg >= 2.4.0.
475
+ """
476
+ super().__init__(fname)
477
+ if not(__have_jp2k__):
478
+ raise ValueError("Need glymur python package and libopenjp2 library")
479
+ self.n_threads = n_threads
480
+ # self.setup_multithread_encoding(n_threads=n_threads, what_if_not_available="ignore")
481
+ # self.filemode = filemode
482
+ self.start_index = start_index
483
+ self.auto_convert = auto_convert
484
+ if psnr is not None and np.isscalar(psnr):
485
+ psnr = [psnr]
486
+ self.psnr = psnr
487
+ self.cratios = cratios
488
+ self._vmin = None
489
+ self._vmax = None
490
+ self.clip_float = False
491
+ if float_clip_values is not None:
492
+ self._float_clip_min, self._float_clip_max = float_clip_values
493
+ self.clip_float = True
494
+
495
+ def write(self, data, *args, **kwargs):
496
+ if not isinstance(data, np.ndarray):
497
+ raise TypeError(f"data is expected to be a numpy array and not {type(data)}")
498
+
499
+ if data.ndim == 2:
500
+ data = data.reshape(1, data.shape[0], data.shape[1])
501
+
502
+ if data.ndim == 3 and data.shape[0] == 1:
503
+ # TODO: add an option with of without pattern instead ? This would look more "reliable"
504
+ class JP2KVolume(_JP2KVolumeBase):
505
+ # we are not ensure that the output directory name is the base name of the file_path
506
+ DEFAULT_DATA_DATA_PATH_PATTERN = self.fname
507
+ else:
508
+ file_path, ext = path.splitext(self.fname)
509
+ # as in nabu the base name of the tiff file can be different from the folder name we
510
+ # need to redefine it. For interoperability with the rest of the tomotools suites
511
+ # it must highly recommended that they stay the same.
512
+ # TODO: to somplify think we must ensure this the case by default
513
+ # (ensure that if no output is provided then we lay back to having the output file name prefix being the folder name)
514
+ class JP2KVolume(_JP2KVolumeBase):
515
+ # we are not ensure that the output directory name is the base name of the file_path
516
+ DEFAULT_DATA_DATA_PATH_PATTERN = path.basename(file_path) + "_{index_zfill4}" + ext
517
+
518
+ volume = JP2KVolume(
519
+ folder=path.dirname(self.fname),
520
+ start_index=self.start_index,
521
+ cratios=self.cratios,
522
+ psnr=self.psnr,
523
+ n_threads=self.n_threads,
524
+ )
525
+
526
+ if data.dtype != np.uint16 and self.auto_convert:
527
+ if self.clip_float:
528
+ data = np.clip(data, self._float_clip_min, self._float_clip_max)
529
+ data = rescale_data(data, 0, 65535, data_min=self._vmin, data_max=self._vmax)
530
+ data = data.astype(np.uint16)
531
+
532
+ volume.data = data
533
+ config = kwargs.get("config", None)
534
+ if config is not None:
535
+ volume.metadata = {"config": config}
536
+ volume.save()
537
+
538
+
539
+ Writers = {
540
+ "h5": NXProcessWriter,
541
+ "hdf5": NXProcessWriter,
542
+ "nx": NXProcessWriter,
543
+ "nexus": NXProcessWriter,
544
+ "tif": TIFFWriter,
545
+ "tiff": TIFFWriter,
546
+ "j2k": JP2Writer,
547
+ "jp2": JP2Writer,
548
+ "jp2k": JP2Writer,
549
+ "edf": EDFWriter,
550
+ }