nabu 2022.3.0a1__py3-none-any.whl → 2023.1.0a2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (96) hide show
  1. nabu/__init__.py +1 -1
  2. nabu/app/bootstrap.py +7 -1
  3. nabu/app/cast_volume.py +8 -2
  4. nabu/app/cli_configs.py +69 -0
  5. nabu/app/composite_cor.py +97 -0
  6. nabu/app/create_distortion_map_from_poly.py +118 -0
  7. nabu/app/nx_z_splitter.py +1 -1
  8. nabu/app/prepare_weights_double.py +21 -16
  9. nabu/app/reconstruct_helical.py +0 -1
  10. nabu/app/utils.py +10 -5
  11. nabu/cuda/processing.py +1 -0
  12. nabu/cuda/tests/test_padding.py +1 -0
  13. nabu/cuda/utils.py +1 -0
  14. nabu/distributed/__init__.py +0 -0
  15. nabu/distributed/utils.py +57 -0
  16. nabu/distributed/worker.py +543 -0
  17. nabu/estimation/cor.py +3 -7
  18. nabu/estimation/cor_sino.py +2 -1
  19. nabu/estimation/distortion.py +6 -4
  20. nabu/io/cast_volume.py +10 -1
  21. nabu/io/detector_distortion.py +305 -0
  22. nabu/io/reader.py +37 -7
  23. nabu/io/reader_helical.py +0 -3
  24. nabu/io/tests/test_cast_volume.py +16 -4
  25. nabu/io/tests/test_detector_distortion.py +178 -0
  26. nabu/io/tests/test_writers.py +2 -2
  27. nabu/io/tiffwriter_zmm.py +2 -3
  28. nabu/io/writer.py +84 -1
  29. nabu/io/writer_BACKUP_193259.py +556 -0
  30. nabu/io/writer_BACKUP_193381.py +556 -0
  31. nabu/io/writer_BASE_193259.py +548 -0
  32. nabu/io/writer_BASE_193381.py +548 -0
  33. nabu/io/writer_LOCAL_193259.py +550 -0
  34. nabu/io/writer_LOCAL_193381.py +550 -0
  35. nabu/io/writer_REMOTE_193259.py +557 -0
  36. nabu/io/writer_REMOTE_193381.py +557 -0
  37. nabu/misc/fourier_filters.py +2 -0
  38. nabu/misc/rotation.py +0 -1
  39. nabu/misc/tests/test_rotation.py +1 -0
  40. nabu/pipeline/config_validators.py +10 -0
  41. nabu/pipeline/datadump.py +1 -1
  42. nabu/pipeline/dataset_validator.py +0 -1
  43. nabu/pipeline/detector_distortion_provider.py +20 -0
  44. nabu/pipeline/estimators.py +35 -21
  45. nabu/pipeline/fallback_utils.py +1 -1
  46. nabu/pipeline/fullfield/chunked.py +30 -15
  47. nabu/pipeline/fullfield/chunked_black.py +881 -0
  48. nabu/pipeline/fullfield/chunked_cuda.py +34 -4
  49. nabu/pipeline/fullfield/chunked_fb.py +966 -0
  50. nabu/pipeline/fullfield/chunked_google.py +921 -0
  51. nabu/pipeline/fullfield/chunked_pep8.py +920 -0
  52. nabu/pipeline/fullfield/computations.py +7 -6
  53. nabu/pipeline/fullfield/dataset_validator.py +1 -1
  54. nabu/pipeline/fullfield/grouped_cuda.py +6 -0
  55. nabu/pipeline/fullfield/nabu_config.py +15 -3
  56. nabu/pipeline/fullfield/processconfig.py +5 -0
  57. nabu/pipeline/fullfield/reconstruction.py +1 -2
  58. nabu/pipeline/helical/gridded_accumulator.py +1 -8
  59. nabu/pipeline/helical/helical_chunked_regridded.py +48 -33
  60. nabu/pipeline/helical/helical_reconstruction.py +1 -9
  61. nabu/pipeline/helical/nabu_config.py +11 -14
  62. nabu/pipeline/helical/span_strategy.py +11 -4
  63. nabu/pipeline/helical/tests/test_accumulator.py +0 -3
  64. nabu/pipeline/helical/tests/test_pipeline_elements_full.py +0 -6
  65. nabu/pipeline/helical/tests/test_strategy.py +0 -1
  66. nabu/pipeline/helical/weight_balancer.py +0 -1
  67. nabu/pipeline/params.py +4 -0
  68. nabu/pipeline/processconfig.py +6 -2
  69. nabu/pipeline/writer.py +9 -4
  70. nabu/preproc/distortion.py +4 -3
  71. nabu/preproc/double_flatfield.py +16 -4
  72. nabu/preproc/double_flatfield_cuda.py +3 -2
  73. nabu/preproc/double_flatfield_variable_region.py +13 -4
  74. nabu/preproc/flatfield.py +29 -7
  75. nabu/preproc/flatfield_cuda.py +0 -1
  76. nabu/preproc/flatfield_variable_region.py +5 -2
  77. nabu/preproc/phase.py +0 -1
  78. nabu/preproc/phase_cuda.py +0 -1
  79. nabu/preproc/tests/test_ctf.py +4 -3
  80. nabu/preproc/tests/test_flatfield.py +6 -7
  81. nabu/reconstruction/fbp_opencl.py +1 -1
  82. nabu/reconstruction/filtering.py +0 -1
  83. nabu/reconstruction/tests/test_fbp.py +1 -0
  84. nabu/resources/dataset_analyzer.py +0 -1
  85. nabu/resources/templates/bm05_pag.conf +34 -0
  86. nabu/resources/templates/id16_ctf.conf +2 -1
  87. nabu/resources/tests/test_nxflatfield.py +0 -1
  88. nabu/resources/tests/test_units.py +0 -1
  89. nabu/stitching/frame_composition.py +7 -1
  90. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/METADATA +2 -7
  91. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/RECORD +96 -75
  92. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/WHEEL +1 -1
  93. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/entry_points.txt +2 -1
  94. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/LICENSE +0 -0
  95. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/top_level.txt +0 -0
  96. {nabu-2022.3.0a1.dist-info → nabu-2023.1.0a2.dist-info}/zip-safe +0 -0
@@ -0,0 +1,548 @@
1
+ from glob import glob
2
+ from os import path, getcwd, chdir
3
+ from posixpath import join as posix_join
4
+ from datetime import datetime
5
+ import numpy as np
6
+ from h5py import VirtualSource, VirtualLayout
7
+ from tomoscan.io import HDF5File
8
+ from tomoscan.esrf import EDFVolume
9
+ from tomoscan.esrf import HDF5Volume as _HDF5VolumeBase
10
+ from tomoscan.esrf import TIFFVolume as _TIFFVolumeBase, MultiTIFFVolume
11
+ from tomoscan.esrf import JP2KVolume as _JP2KVolumeBase
12
+ from .. import version as nabu_version
13
+ from ..utils import merged_shape, deprecation_warning
14
+ from ..misc.utils import rescale_data
15
+ from .utils import check_h5py_version, convert_dict_values
16
+ from silx.io.dictdump import dicttonx
17
+ try:
18
+ from glymur import Jp2k, set_option as glymur_set_option
19
+ from glymur.version import openjpeg_version, version as glymur_version
20
+ __have_jp2k__ = True
21
+ except ImportError:
22
+ __have_jp2k__ = False
23
+
24
+ def get_datetime():
25
+ """
26
+ Function used by some writers to indicate the current date.
27
+ """
28
+ return datetime.now().replace(microsecond=0).isoformat()
29
+
30
+
31
+ class Writer:
32
+ """
33
+ Base class for all writers.
34
+ """
35
+ def __init__(self, fname):
36
+ self.fname = fname
37
+
38
+
39
+ def get_filename(self):
40
+ return self.fname
41
+
42
+
43
+ class NXProcessWriter(Writer):
44
+ """
45
+ A class to write Nexus file with a processing result.
46
+ """
47
+ def __init__(self, fname, entry=None, filemode=None, overwrite=False):
48
+ """
49
+ Initialize a NXProcessWriter.
50
+
51
+ Parameters
52
+ -----------
53
+ fname: str
54
+ Path to the HDF5 file.
55
+ entry: str, optional
56
+ Entry in the HDF5 file. Default is "entry"
57
+ """
58
+ super().__init__(fname)
59
+ self._set_entry(entry)
60
+ self.overwrite = overwrite
61
+ check_h5py_version()
62
+ if filemode is not None:
63
+ deprecation_warning(
64
+ "'filemode' is deprecated and has no effect", func_name="nxprocess_init"
65
+ )
66
+
67
+
68
+ def _set_entry(self, entry):
69
+ self.entry = entry or "entry"
70
+ data_path = posix_join("/", self.entry)
71
+ self.data_path = data_path
72
+
73
+
74
+ def write(self, result, process_name, processing_index=0, config=None, data_name="data", is_frames_stack=True) -> str:
75
+ """
76
+ Write the result in the current NXProcess group.
77
+
78
+ Parameters
79
+ ----------
80
+ result: numpy.ndarray
81
+ Array containing the processing result
82
+ process_name: str
83
+ Name of the processing
84
+ processing_index: int
85
+ Index of the processing (in a pipeline)
86
+ config: dict, optional
87
+ Dictionary containing the configuration.
88
+ """
89
+ entry_path = self.data_path
90
+ nx_process_path = "/".join([entry_path, process_name])
91
+
92
+ if config is not None:
93
+ config.update(
94
+ {
95
+ "@NX_class": "NXcollection",
96
+ }
97
+ )
98
+
99
+ class HDF5Volume(_HDF5VolumeBase):
100
+ # TODO: not a big fan of redefining a class to set the dataset name
101
+ # but the default value "data" is the same as in tomoscan.
102
+ # so it should be enough for automation
103
+ DATA_DATASET_NAME = f"results/{data_name}"
104
+
105
+ volume = HDF5Volume(
106
+ file_path=self.fname,
107
+ data_path=nx_process_path,
108
+ metadata=config,
109
+ overwrite=self.overwrite,
110
+ )
111
+ assert volume.data_url is not None
112
+
113
+ # on which case
114
+ if isinstance(result, dict):
115
+ pass
116
+ elif isinstance(result, np.ndarray):
117
+ if result.ndim == 2:
118
+ result = result.reshape(1, result.shape[0], result.shape[1])
119
+ volume.data = result
120
+ elif isinstance(result, VirtualLayout):
121
+ # TODO: add test on tomoscan to ensure this use case is handled
122
+ volume.data = result
123
+ else:
124
+ raise TypeError(f"result is expected to be a dict or a numpy arrau. Not {type(result)}")
125
+
126
+ if volume.metadata is not None:
127
+ volume.metadata = convert_dict_values(
128
+ volume.metadata,
129
+ {None: "None"},
130
+ )
131
+ # if result is a dictionary then we only have some metadata to be saved
132
+ if isinstance(result, dict):
133
+ volume.save_metadata()
134
+ results_path = posix_join(nx_process_path, "results")
135
+ else:
136
+ volume.save()
137
+ results_path = posix_join(nx_process_path, "results", data_name)
138
+
139
+ # adding nabu specific information
140
+ nabu_process_info = {
141
+ "@NX_class": "NXentry",
142
+ f"{process_name}@NX_class": "NXprocess",
143
+ f"{process_name}/program": "nabu",
144
+ f"{process_name}/version": nabu_version,
145
+ f"{process_name}/date": get_datetime(),
146
+ f"{process_name}/sequence_index": np.int32(processing_index),
147
+ }
148
+ if isinstance(result, np.ndarray):
149
+ nabu_process_info.update(
150
+ {
151
+ f"{process_name}/results@NX_class": "NXdata",
152
+ f"{process_name}/results@signal": data_name,
153
+ }
154
+ )
155
+ if is_frames_stack:
156
+ nabu_process_info.update(
157
+ {
158
+ f"{process_name}/results@interpretation": "image",
159
+
160
+ }
161
+ )
162
+
163
+ # prepare the direct access plots
164
+ nabu_process_info.update(
165
+ {
166
+ f"{process_name}@default": "results",
167
+ "@default": f"{process_name}/results",
168
+ }
169
+ )
170
+ elif isinstance(result, dict):
171
+ nabu_process_info.update(
172
+ {
173
+ "/".join([f"{process_name}/results"]): convert_dict_values(
174
+ result,
175
+ {None: "None"},
176
+ ),
177
+ }
178
+ )
179
+
180
+ dicttonx(
181
+ nabu_process_info,
182
+ h5file=self.fname,
183
+ h5path=entry_path,
184
+ update_mode="replace",
185
+ mode="a",
186
+ )
187
+ return results_path
188
+
189
+
190
+ def create_virtual_layout(files_or_pattern, h5_path, base_dir=None, axis=0):
191
+ """
192
+ Create a HDF5 virtual layout.
193
+
194
+ Parameters
195
+ ----------
196
+ files_or_pattern: str or list
197
+ A list of file names, or a wildcard pattern.
198
+ If a list is provided, it will not be sorted! This will have to be
199
+ done before calling this function.
200
+ h5_path: str
201
+ Path inside the HDF5 input file(s)
202
+ base_dir: str, optional
203
+ Base directory when using relative file names.
204
+ axis: int, optional
205
+ Data axis to merge. Default is 0.
206
+ """
207
+ prev_cwd = None
208
+ if base_dir is not None:
209
+ prev_cwd = getcwd()
210
+ chdir(base_dir)
211
+ if isinstance(files_or_pattern, str):
212
+ files_list = glob(files_or_pattern)
213
+ files_list.sort()
214
+ else: # list
215
+ files_list = files_or_pattern
216
+ if files_list == []:
217
+ raise ValueError("Nothing found as pattern %s" % files_or_pattern)
218
+ virtual_sources = []
219
+ shapes = []
220
+ for fname in files_list:
221
+ with HDF5File(fname, "r", swmr=True) as fid:
222
+ shape = fid[h5_path].shape
223
+ vsource = VirtualSource(fname, name=h5_path, shape=shape)
224
+ virtual_sources.append(vsource)
225
+ shapes.append(shape)
226
+ total_shape = merged_shape(shapes, axis=axis)
227
+
228
+ virtual_layout = VirtualLayout(
229
+ shape=total_shape,
230
+ dtype='f'
231
+ )
232
+ start_idx = 0
233
+ for vsource, shape in zip(virtual_sources, shapes):
234
+ n_imgs = shape[axis]
235
+ # Perhaps there is more elegant
236
+ if axis == 0:
237
+ virtual_layout[start_idx:start_idx + n_imgs] = vsource
238
+ elif axis == 1:
239
+ virtual_layout[:, start_idx:start_idx + n_imgs, :] = vsource
240
+ elif axis == 2:
241
+ virtual_layout[:, :, start_idx:start_idx + n_imgs] = vsource
242
+ else:
243
+ raise ValueError("Only axis 0,1,2 are supported")
244
+ #
245
+ start_idx += n_imgs
246
+
247
+ if base_dir is not None:
248
+ chdir(prev_cwd)
249
+ return virtual_layout
250
+
251
+
252
+
253
+ def merge_hdf5_files(
254
+ files_or_pattern, h5_path, output_file, process_name,
255
+ output_entry=None, output_filemode="a", data_name="data",
256
+ processing_index=0, config=None, base_dir=None,
257
+ axis=0, overwrite=False
258
+ ):
259
+ """
260
+ Parameters
261
+ -----------
262
+ files_or_pattern: str or list
263
+ A list of file names, or a wildcard pattern.
264
+ If a list is provided, it will not be sorted! This will have to be
265
+ done before calling this function.
266
+ h5_path: str
267
+ Path inside the HDF5 input file(s)
268
+ output_file: str
269
+ Path of the output file
270
+ process_name: str
271
+ Name of the process
272
+ output_entry: str, optional
273
+ Output HDF5 root entry (default is "/entry")
274
+ output_filemode: str, optional
275
+ File mode for output file. Default is "a" (append)
276
+ processing_index: int, optional
277
+ Processing index for the output file. Default is 0.
278
+ config: dict, optional
279
+ Dictionary describing the configuration needed to get the results.
280
+ base_dir: str, optional
281
+ Base directory when using relative file names.
282
+ axis: int, optional
283
+ Data axis to merge. Default is 0.
284
+ overwrite: bool, optional
285
+ Whether to overwrite already existing data in the final file.
286
+ Default is False.
287
+ """
288
+ if base_dir is not None:
289
+ prev_cwd = getcwd()
290
+ virtual_layout = create_virtual_layout(files_or_pattern, h5_path, base_dir=base_dir, axis=axis)
291
+ nx_file = NXProcessWriter(
292
+ output_file,
293
+ entry=output_entry, filemode=output_filemode, overwrite=overwrite
294
+ )
295
+ nx_file.write(
296
+ virtual_layout,
297
+ process_name,
298
+ processing_index=processing_index,
299
+ config=config,
300
+ data_name=data_name,
301
+ is_frames_stack=True
302
+ )
303
+ if base_dir is not None and prev_cwd != getcwd():
304
+ chdir(prev_cwd)
305
+
306
+
307
+ class TIFFWriter(Writer):
308
+ def __init__(self, fname, multiframe=False, start_index=0, filemode=None, append=False, big_tiff=None):
309
+ """
310
+ Tiff writer.
311
+
312
+ Parameters
313
+ -----------
314
+ fname: str
315
+ Path to the output file name
316
+ multiframe: bool, optional
317
+ Whether to write all data in one single file. Default is False.
318
+ start_index: int, optional
319
+ When writing a stack of images, each image is written in a dedicated file
320
+ (unless multiframe is set to True).
321
+ In this case, the output is a series of files `filename_0000.tif`,
322
+ `filename_0001.tif`, etc. This parameter is the starting index for
323
+ file names.
324
+ This option is ignored when multiframe is True.
325
+ filemode: str, optional
326
+ DEPRECATED. Will be ignored. Please refer to 'append'
327
+ append: bool, optional
328
+ Whether to append data to the file rather than overwriting. Default is False.
329
+ big_tiff: bool, optional
330
+ Whether to write in "big tiff" format: https://www.awaresystems.be/imaging/tiff/bigtiff.html
331
+ Default is True when multiframe is True.
332
+ Note that default "standard" tiff cannot exceed 4 GB.
333
+
334
+ Notes
335
+ ------
336
+ If multiframe is False (default), then each image will be written in a
337
+ dedicated tiff file.
338
+ """
339
+ super().__init__(fname)
340
+ self.multiframe = multiframe
341
+ self.start_index = start_index
342
+ self.append = append
343
+ if big_tiff is None:
344
+ big_tiff = multiframe
345
+ if multiframe and not big_tiff:
346
+ # raise error ?
347
+ print("big_tiff was set to False while multiframe was set to True. This will probably be problematic.")
348
+ self.big_tiff = big_tiff
349
+ # Compat.
350
+ self.filemode = filemode
351
+ if filemode is not None:
352
+ deprecation_warning("Ignored parameter 'filemode'. Please use the 'append' parameter")
353
+
354
+ def write(self, data, *args, config=None, **kwargs):
355
+ ext = None
356
+ if not isinstance(data, np.ndarray):
357
+ raise TypeError(f"data is expected to be a numpy array and not {type(data)}")
358
+ # Single image, or multiple image in the same file
359
+ if self.multiframe:
360
+ volume = MultiTIFFVolume(
361
+ self.fname,
362
+ data=data,
363
+ metadata={
364
+ "config": config,
365
+ },
366
+ )
367
+ file_path = self.fname
368
+ # Multiple image, one file per image
369
+ else:
370
+ if data.ndim == 2 or (data.ndim == 3 and data.shape[0] == 1):
371
+ data = data.reshape(1, data.shape[0], data.shape[1])
372
+ file_path = self.fname
373
+ volume = MultiTIFFVolume(
374
+ self.fname,
375
+ data=data,
376
+ metadata={
377
+ "config": config,
378
+ },
379
+ )
380
+ else:
381
+ file_path, ext = path.splitext(self.fname)
382
+ # as in nabu the base name of the tiff file can be different from the folder name we
383
+ # need to redefine it. For interoperability with the rest of the tomotools suites
384
+ # it must highly recommended that they stay the same.
385
+ # TODO: to somplify think we must ensure this the case by default
386
+ # (ensure that if no output is provided then we lay back to having the output file name prefix being the folder name)
387
+ class TIFFVolume(_TIFFVolumeBase):
388
+ # we are not ensure that the output directory name is the base name of the file_path
389
+ DEFAULT_DATA_DATA_PATH_PATTERN = path.basename(file_path) + "_{index_zfill4}" + ext
390
+
391
+ volume = TIFFVolume(
392
+ path.dirname(file_path),
393
+ data=data,
394
+ metadata={
395
+ "config": config,
396
+ },
397
+ start_index=self.start_index,
398
+ )
399
+
400
+ volume.save()
401
+
402
+
403
+ class EDFWriter(Writer):
404
+ def __init__(self, fname, start_index=0, filemode="w"):
405
+ """
406
+ EDF (ESRF Data Format) writer.
407
+
408
+ Parameters
409
+ -----------
410
+ fname: str
411
+ Path to the output file name
412
+ start_index: int, optional
413
+ When writing a stack of images, each image is written in a dedicated file
414
+ In this case, the output is a series of files `filename_0000.tif`,
415
+ `filename_0001.edf`, etc. This parameter is the starting index for
416
+ file names.
417
+ """
418
+ super().__init__(fname)
419
+ self.filemode = filemode
420
+ self.start_index = start_index
421
+
422
+ def write(self, data, *args, config=None, **kwargs):
423
+ if not isinstance(data, np.ndarray):
424
+ raise TypeError(f"data is expected to be a numpy array and not {type(data)}")
425
+ header = {
426
+ "software": "nabu",
427
+ "data": get_datetime(),
428
+ }
429
+ if data.ndim == 2:
430
+ data = data.reshape(1, data.shape[0], data.shape[1])
431
+
432
+ volume = EDFVolume(
433
+ path.dirname(self.fname),
434
+ data=data,
435
+ start_index=self.start_index,
436
+ header=header
437
+ )
438
+ volume.save()
439
+
440
+
441
+ class JP2Writer(Writer):
442
+ def __init__(
443
+ self, fname, start_index=0, filemode="wb",
444
+ psnr=None, cratios=None, auto_convert=True, float_clip_values=None, n_threads=None
445
+ ):
446
+ """
447
+ JPEG2000 writer. This class requires the python package `glymur` and the
448
+ library `libopenjp2`.
449
+
450
+ Parameters
451
+ -----------
452
+ fname: str
453
+ Path to the output file name
454
+ start_index: int, optional
455
+ When writing a stack of images, each image is written in a dedicated file
456
+ The output is a series of files `filename_0000.tif`, `filename_0001.tif`, etc.
457
+ This parameter is the starting index for file names.
458
+ psnr: list of int, optional
459
+ The PSNR (Peak Signal-to-Noise ratio) for each jpeg2000 layer.
460
+ This defines a quality metric for lossy compression.
461
+ The number "0" stands for lossless compression.
462
+ cratios: list of int, optional
463
+ Compression ratio for each jpeg2000 layer
464
+ auto_convert: bool, optional
465
+ Whether to automatically cast floating point data to uint16.
466
+ Default is True.
467
+ float_clip_values: tuple of floats, optional
468
+ If set to a tuple of two values (min, max), then each image values will be clipped
469
+ to these minimum and maximum values.
470
+ n_threads: int, optional
471
+ Number of threads to use for encoding. Default is the number of available threads.
472
+ Needs libopenjpeg >= 2.4.0.
473
+ """
474
+ super().__init__(fname)
475
+ if not(__have_jp2k__):
476
+ raise ValueError("Need glymur python package and libopenjp2 library")
477
+ self.n_threads = n_threads
478
+ # self.setup_multithread_encoding(n_threads=n_threads, what_if_not_available="ignore")
479
+ # self.filemode = filemode
480
+ self.start_index = start_index
481
+ self.auto_convert = auto_convert
482
+ if psnr is not None and np.isscalar(psnr):
483
+ psnr = [psnr]
484
+ self.psnr = psnr
485
+ self.cratios = cratios
486
+ self._vmin = None
487
+ self._vmax = None
488
+ self.clip_float = False
489
+ if float_clip_values is not None:
490
+ self._float_clip_min, self._float_clip_max = float_clip_values
491
+ self.clip_float = True
492
+
493
+ def write(self, data, *args, **kwargs):
494
+ if not isinstance(data, np.ndarray):
495
+ raise TypeError(f"data is expected to be a numpy array and not {type(data)}")
496
+
497
+ if data.ndim == 2:
498
+ data = data.reshape(1, data.shape[0], data.shape[1])
499
+
500
+ if data.ndim == 3 and data.shape[0] == 1:
501
+ # TODO: add an option with of without pattern instead ? This would look more "reliable"
502
+ class JP2KVolume(_JP2KVolumeBase):
503
+ # we are not ensure that the output directory name is the base name of the file_path
504
+ DEFAULT_DATA_DATA_PATH_PATTERN = self.fname
505
+ else:
506
+ file_path, ext = path.splitext(self.fname)
507
+ # as in nabu the base name of the tiff file can be different from the folder name we
508
+ # need to redefine it. For interoperability with the rest of the tomotools suites
509
+ # it must highly recommended that they stay the same.
510
+ # TODO: to somplify think we must ensure this the case by default
511
+ # (ensure that if no output is provided then we lay back to having the output file name prefix being the folder name)
512
+ class JP2KVolume(_JP2KVolumeBase):
513
+ # we are not ensure that the output directory name is the base name of the file_path
514
+ DEFAULT_DATA_DATA_PATH_PATTERN = path.basename(file_path) + "_{index_zfill4}" + ext
515
+
516
+ volume = JP2KVolume(
517
+ folder=path.dirname(self.fname),
518
+ start_index=self.start_index,
519
+ cratios=self.cratios,
520
+ psnr=self.psnr,
521
+ n_threads=self.n_threads,
522
+ )
523
+
524
+ if data.dtype != np.uint16 and self.auto_convert:
525
+ if self.clip_float:
526
+ data = np.clip(data, self._float_clip_min, self._float_clip_max)
527
+ data = rescale_data(data, 0, 65535, data_min=self._vmin, data_max=self._vmax)
528
+ data = data.astype(np.uint16)
529
+
530
+ volume.data = data
531
+ config = kwargs.get("config", None)
532
+ if config is not None:
533
+ volume.metadata = {"config": config}
534
+ volume.save()
535
+
536
+
537
+ Writers = {
538
+ "h5": NXProcessWriter,
539
+ "hdf5": NXProcessWriter,
540
+ "nx": NXProcessWriter,
541
+ "nexus": NXProcessWriter,
542
+ "tif": TIFFWriter,
543
+ "tiff": TIFFWriter,
544
+ "j2k": JP2Writer,
545
+ "jp2": JP2Writer,
546
+ "jp2k": JP2Writer,
547
+ "edf": EDFWriter,
548
+ }