ChessAnalysisPipeline 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

Files changed (47) hide show
  1. CHAP/__init__.py +3 -0
  2. CHAP/common/__init__.py +19 -0
  3. CHAP/common/models/__init__.py +2 -0
  4. CHAP/common/models/integration.py +515 -0
  5. CHAP/common/models/map.py +535 -0
  6. CHAP/common/processor.py +644 -0
  7. CHAP/common/reader.py +119 -0
  8. CHAP/common/utils/__init__.py +37 -0
  9. CHAP/common/utils/fit.py +2613 -0
  10. CHAP/common/utils/general.py +1225 -0
  11. CHAP/common/utils/material.py +231 -0
  12. CHAP/common/utils/scanparsers.py +785 -0
  13. CHAP/common/writer.py +96 -0
  14. CHAP/edd/__init__.py +7 -0
  15. CHAP/edd/models.py +215 -0
  16. CHAP/edd/processor.py +321 -0
  17. CHAP/edd/reader.py +5 -0
  18. CHAP/edd/writer.py +5 -0
  19. CHAP/inference/__init__.py +3 -0
  20. CHAP/inference/processor.py +68 -0
  21. CHAP/inference/reader.py +5 -0
  22. CHAP/inference/writer.py +5 -0
  23. CHAP/pipeline.py +1 -1
  24. CHAP/processor.py +11 -818
  25. CHAP/reader.py +18 -113
  26. CHAP/saxswaxs/__init__.py +6 -0
  27. CHAP/saxswaxs/processor.py +5 -0
  28. CHAP/saxswaxs/reader.py +5 -0
  29. CHAP/saxswaxs/writer.py +5 -0
  30. CHAP/sin2psi/__init__.py +7 -0
  31. CHAP/sin2psi/processor.py +5 -0
  32. CHAP/sin2psi/reader.py +5 -0
  33. CHAP/sin2psi/writer.py +5 -0
  34. CHAP/tomo/__init__.py +5 -0
  35. CHAP/tomo/models.py +125 -0
  36. CHAP/tomo/processor.py +2009 -0
  37. CHAP/tomo/reader.py +5 -0
  38. CHAP/tomo/writer.py +5 -0
  39. CHAP/writer.py +17 -167
  40. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/METADATA +1 -1
  41. ChessAnalysisPipeline-0.0.4.dist-info/RECORD +50 -0
  42. CHAP/async.py +0 -56
  43. ChessAnalysisPipeline-0.0.2.dist-info/RECORD +0 -17
  44. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/LICENSE +0 -0
  45. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/WHEEL +0 -0
  46. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/entry_points.txt +0 -0
  47. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/top_level.txt +0 -0
CHAP/tomo/processor.py ADDED
@@ -0,0 +1,2009 @@
1
+ #!/usr/bin/env python
2
+ #-*- coding: utf-8 -*-
3
+ #pylint: disable=
4
+ '''
5
+ File : processor.py
6
+ Author : Rolf Verberg <rolfverberg AT gmail dot com>
7
+ Description: Module for Processors used only by tomography experiments
8
+ '''
9
+
10
+ # system modules
11
+ from os import mkdir
12
+ from os import path as os_path
13
+ from time import time
14
+
15
+ # third party modules
16
+ from nexusformat.nexus import NXobject
17
+ import numpy as np
18
+
19
+ # local modules
20
+ from CHAP.common.utils.general import clear_plot, clear_imshow, quick_plot, quick_imshow
21
+ from CHAP.processor import Processor
22
+
23
+ num_core_tomopy_limit = 24
24
+
25
+
26
+ class TomoDataProcessor(Processor):
27
+ '''Class representing the processes to reconstruct a set of Tomographic images returning
28
+ either a dictionary or a `nexusformat.nexus.NXroot` object containing the (meta) data after
29
+ processing each individual step.
30
+ '''
31
+
32
+ def _process(self, data):
33
+ '''Process the output of a `Reader` that contains a map or a `nexusformat.nexus.NXroot`
34
+ object and one that contains the step specific instructions and return either a dictionary
35
+ or a `nexusformat.nexus.NXroot` returning the processed result.
36
+
37
+ :param data: Result of `Reader.read` where at least one item is of type
38
+ `nexusformat.nexus.NXroot` or has the value `'MapConfig'` for the `'schema'` key,
39
+ and at least one item has the value `'TomoReduceConfig'` for the `'schema'` key.
40
+ :type data: list[dict[str,object]]
41
+ :return: processed (meta)data
42
+ :rtype: dict or nexusformat.nexus.NXroot
43
+ '''
44
+
45
+ tomo = Tomo(save_figs='only')
46
+ nxroot = None
47
+ center_config = None
48
+
49
+ # Get and validate the relevant configuration objects in data
50
+ configs = self.get_configs(data)
51
+
52
+ # Setup the pipeline for a tomography reconstruction
53
+ if 'setup' in configs:
54
+ configs.pop('nxroot', None)
55
+ nxroot = self.get_nxroot(configs.pop('map'), configs.pop('setup'))
56
+ else:
57
+ nxroot = configs.pop('nxroot', None)
58
+
59
+ # Reduce tomography images
60
+ if 'reduce' in configs:
61
+ tool_config = configs.pop('reduce')
62
+ if nxroot is None:
63
+ map_config = configs.pop('map')
64
+ nxroot = self.get_nxroot(map_config, tool_config)
65
+ nxroot = tomo.gen_reduced_data(nxroot, img_x_bounds=tool_config.img_x_bounds)
66
+
67
+ # Find rotation axis centers for the tomography stacks
68
+ # Pass tool_config directly to tomo.find_centers?
69
+ if 'find_center' in configs:
70
+ tool_config = configs.pop('find_center')
71
+ center_rows = [tool_config.lower_row, tool_config.upper_row]
72
+ if (None in center_rows or tool_config.lower_center_offset is None or
73
+ tool_config.upper_center_offset is None):
74
+ center_config = tomo.find_centers(nxroot, center_rows=center_rows,
75
+ center_stack_index=tool_config.center_stack_index)
76
+ else:
77
+ #RV make a convert to dict in basemodel?
78
+ center_config = {'lower_row': tool_config.lower_row,
79
+ 'lower_center_offset': tool_config.lower_center_offset,
80
+ 'upper_row': tool_config.upper_row,
81
+ 'upper_center_offset': tool_config.upper_center_offset}
82
+
83
+ # Reconstruct tomography stacks
84
+ # Pass tool_config and center_config directly to tomo.reconstruct_data
85
+ if 'reconstruct' in configs:
86
+ tool_config = configs.pop('reconstruct')
87
+ nxroot = tomo.reconstruct_data(nxroot, center_config, x_bounds=tool_config.x_bounds,
88
+ y_bounds=tool_config.y_bounds, z_bounds=tool_config.z_bounds)
89
+ center_config = None
90
+
91
+ # Combine reconstructed tomography stacks
92
+ if 'combine' in configs:
93
+ tool_config = configs.pop('combine')
94
+ nxroot = tomo.combine_data(nxroot, x_bounds=tool_config.x_bounds,
95
+ y_bounds=tool_config.y_bounds, z_bounds=tool_config.z_bounds)
96
+
97
+ if center_config is not None:
98
+ return center_config
99
+ else:
100
+ return nxroot
101
+
102
+ def get_configs(self, data):
103
+ '''Get instances of the configuration objects needed by this
104
+ `Processor` from a returned value of `Reader.read`
105
+
106
+ :param data: Result of `Reader.read` where at least one item
107
+ is of type `nexusformat.nexus.NXroot` or has the value
108
+ `'MapConfig'` for the `'schema'` key, and at least one item
109
+ has the value `'TomoSetupConfig'`, or `'TomoReduceConfig'`,
110
+ or `'TomoFindCenterConfig'`, or `'TomoReconstructConfig'`,
111
+ or `'TomoCombineConfig'` for the `'schema'` key.
112
+ :type data: list[dict[str,object]]
113
+ :raises Exception: If valid config objects cannot be constructed
114
+ from `data`.
115
+ :return: valid instances of the configuration objects with field
116
+ values taken from `data`.
117
+ :rtype: dict
118
+ '''
119
+ #:rtype: dict{'map': MapConfig, 'reduce': TomoReduceConfig} RV: Is there a way to denote optional items?
120
+ from CHAP.common.models import MapConfig
121
+ from CHAP.tomo.models import TomoSetupConfig, TomoReduceConfig, TomoFindCenterConfig, \
122
+ TomoReconstructConfig, TomoCombineConfig
123
+ from nexusformat.nexus import NXroot
124
+
125
+ configs = {}
126
+ if isinstance(data, list):
127
+ for item in data:
128
+ if isinstance(item, dict):
129
+ schema = item.get('schema')
130
+ if isinstance(item.get('data'), NXroot):
131
+ configs['nxroot'] = item.get('data')
132
+ if schema == 'MapConfig':
133
+ configs['map'] = MapConfig(**(item.get('data')))
134
+ if schema == 'TomoSetupConfig':
135
+ configs['setup'] = TomoSetupConfig(**(item.get('data')))
136
+ if schema == 'TomoReduceConfig':
137
+ configs['reduce'] = TomoReduceConfig(**(item.get('data')))
138
+ elif schema == 'TomoFindCenterConfig':
139
+ configs['find_center'] = TomoFindCenterConfig(**(item.get('data')))
140
+ elif schema == 'TomoReconstructConfig':
141
+ configs['reconstruct'] = TomoReconstructConfig(**(item.get('data')))
142
+ elif schema == 'TomoCombineConfig':
143
+ configs['combine'] = TomoCombineConfig(**(item.get('data')))
144
+
145
+ return configs
146
+
147
+ def get_nxroot(self, map_config, tool_config):
148
+ '''Get a map of the collected tomography data from the scans in `map_config`. The
149
+ data will be reduced based on additional parameters included in `tool_config`.
150
+ The data will be returned along with relevant metadata in the form of a NeXus structure.
151
+
152
+ :param map_config: the map configuration
153
+ :type map_config: MapConfig
154
+ :param tool_config: the tomography image reduction configuration
155
+ :type tool_config: TomoReduceConfig
156
+ :return: a map of the collected tomography data along with the data reduction configuration
157
+ :rtype: nexusformat.nexus.NXroot
158
+ '''
159
+ from CHAP.common import MapProcessor
160
+ from CHAP.common.models.map import import_scanparser
161
+ from CHAP.common.utils.general import index_nearest
162
+ from copy import deepcopy
163
+ from nexusformat.nexus import NXcollection, NXdata, NXdetector, NXinstrument, NXsample, \
164
+ NXsource, NXsubentry, NXroot
165
+
166
+ include_raw_data = getattr(tool_config, "include_raw_data", False)
167
+
168
+ # Construct NXroot
169
+ nxroot = NXroot()
170
+
171
+ # Construct base NXentry and add to NXroot
172
+ nxentry = MapProcessor.get_nxentry(map_config)
173
+ nxroot[map_config.title] = nxentry
174
+ nxroot.attrs['default'] = map_config.title
175
+ nxentry.definition = 'NXtomo'
176
+ if 'data' in nxentry:
177
+ del nxentry['data']
178
+
179
+ # Add an NXinstrument to the NXentry
180
+ nxinstrument = NXinstrument()
181
+ nxentry.instrument = nxinstrument
182
+
183
+ # Add an NXsource to the NXinstrument
184
+ nxsource = NXsource()
185
+ nxinstrument.source = nxsource
186
+ nxsource.type = 'Synchrotron X-ray Source'
187
+ nxsource.name = 'CHESS'
188
+ nxsource.probe = 'x-ray'
189
+
190
+ # Tag the NXsource with the runinfo (as an attribute)
191
+ # nxsource.attrs['cycle'] = map_config.cycle
192
+ # nxsource.attrs['btr'] = map_config.btr
193
+ nxsource.attrs['station'] = map_config.station
194
+ nxsource.attrs['experiment_type'] = map_config.experiment_type
195
+
196
+ # Add an NXdetector to the NXinstrument (don't fill in data fields yet)
197
+ nxdetector = NXdetector()
198
+ nxinstrument.detector = nxdetector
199
+ nxdetector.local_name = tool_config.detector.prefix
200
+ pixel_size = tool_config.detector.pixel_size
201
+ if len(pixel_size) == 1:
202
+ nxdetector.x_pixel_size = pixel_size[0]/tool_config.detector.lens_magnification
203
+ nxdetector.y_pixel_size = pixel_size[0]/tool_config.detector.lens_magnification
204
+ else:
205
+ nxdetector.x_pixel_size = pixel_size[0]/tool_config.detector.lens_magnification
206
+ nxdetector.y_pixel_size = pixel_size[1]/tool_config.detector.lens_magnification
207
+ nxdetector.x_pixel_size.attrs['units'] = 'mm'
208
+ nxdetector.y_pixel_size.attrs['units'] = 'mm'
209
+
210
+ if include_raw_data:
211
+ # Add an NXsample to NXentry (don't fill in data fields yet)
212
+ nxsample = NXsample()
213
+ nxentry.sample = nxsample
214
+ nxsample.name = map_config.sample.name
215
+ nxsample.description = map_config.sample.description
216
+
217
+ # Add NXcollection's to NXentry to hold metadata about the spec scans in the map
218
+ # Also obtain the data fields in NXsample and NXdetector if requested
219
+ import_scanparser(map_config.station, map_config.experiment_type)
220
+ image_keys = []
221
+ sequence_numbers = []
222
+ image_stacks = []
223
+ rotation_angles = []
224
+ x_translations = []
225
+ z_translations = []
226
+ for scans in map_config.spec_scans:
227
+ for scan_number in scans.scan_numbers:
228
+ scanparser = scans.get_scanparser(scan_number)
229
+ if map_config.station in ('id1a3', 'id3a'):
230
+ scan_type = scanparser.scan_type
231
+ if scan_type == 'df1':
232
+ image_key = 2
233
+ field_name = 'dark_field'
234
+ elif scan_type == 'bf1':
235
+ image_key = 1
236
+ field_name = 'bright_field'
237
+ elif scan_type == 'ts1':
238
+ image_key = 0
239
+ field_name = 'tomo_fields'
240
+ else:
241
+ raise RuntimeError('Invalid scan type: {scan_type}')
242
+ elif map_config.station in ('id3b'):
243
+ if scans.spec_file.endswith('_dark'):
244
+ image_key = 2
245
+ field_name = 'dark_field'
246
+ elif scans.spec_file.endswith('_flat'):
247
+ #RV not yet tested with an actual fmb run
248
+ image_key = 1
249
+ field_name = 'bright_field'
250
+ else:
251
+ image_key = 0
252
+ field_name = 'tomo_fields'
253
+ else:
254
+ raise RuntimeError(f'Invalid station: {station}')
255
+
256
+ # Create an NXcollection for each field type
257
+ if field_name in nxentry.spec_scans:
258
+ nxcollection = nxentry.spec_scans[field_name]
259
+ if nxcollection.attrs['spec_file'] != str(scans.spec_file):
260
+ raise RuntimeError(f'Multiple SPEC files for a single field type not yet '+
261
+ f'implemented; field name: {field_name}, '+
262
+ f'SPEC file: {str(scans.spec_file)}')
263
+ else:
264
+ nxcollection = NXcollection()
265
+ nxentry.spec_scans[field_name] = nxcollection
266
+ nxcollection.attrs['spec_file'] = str(scans.spec_file)
267
+ nxcollection.attrs['date'] = scanparser.spec_scan.file_date
268
+
269
+ # Get thetas
270
+ image_offset = scanparser.starting_image_offset
271
+ if map_config.station in ('id1a3', 'id3a'):
272
+ theta_vals = scanparser.theta_vals
273
+ thetas = np.linspace(theta_vals.get('start'), theta_vals.get('end'),
274
+ theta_vals.get('num'))
275
+ else:
276
+ if len(scans.scan_numbers) != 1:
277
+ raise RuntimeError('Multiple scans not yet implemented for '+
278
+ f'{map_config.station}')
279
+ scan_number = scans.scan_numbers[0]
280
+ thetas = []
281
+ for dim in map_config.independent_dimensions:
282
+ if dim.label != 'theta':
283
+ continue
284
+ for index in range(scanparser.spec_scan_npts):
285
+ thetas.append(dim.get_value(scans, scan_number, index))
286
+ if not len(thetas):
287
+ raise RuntimeError(f'Unable to obtain thetas for {field_name}')
288
+ if thetas[image_offset] <= 0.0 and thetas[-1] >= 180.0:
289
+ image_offset = index_nearest(thetas, 0.0)
290
+ thetas = thetas[image_offset:index_nearest(thetas, 180.0)]
291
+ elif thetas[-1]-thetas[image_offset] >= 180:
292
+ thetas = thetas[image_offset:index_nearest(thetas, thetas[0]+180.0)]
293
+ else:
294
+ thetas = thetas[image_offset:]
295
+
296
+ # x and z translations
297
+ x_translation = scanparser.horizontal_shift
298
+ z_translation = scanparser.vertical_shift
299
+
300
+ # Add an NXsubentry to the NXcollection for each scan
301
+ entry_name = f'scan_{scan_number}'
302
+ nxsubentry = NXsubentry()
303
+ nxcollection[entry_name] = nxsubentry
304
+ nxsubentry.start_time = scanparser.spec_scan.date
305
+ nxsubentry.spec_command = scanparser.spec_command
306
+ # Add an NXinstrument to the scan's NXsubentry
307
+ nxsubentry.instrument = NXinstrument()
308
+ # Add an NXdetector to the NXinstrument to the scan's NXsubentry
309
+ nxsubentry.instrument.detector = deepcopy(nxdetector)
310
+ nxsubentry.instrument.detector.frame_start_number = image_offset
311
+ nxsubentry.instrument.detector.image_key = image_key
312
+ # Add an NXsample to the scan's NXsubentry
313
+ nxsubentry.sample = NXsample()
314
+ nxsubentry.sample.rotation_angle = thetas
315
+ nxsubentry.sample.rotation_angle.units = 'degrees'
316
+ nxsubentry.sample.x_translation = x_translation
317
+ nxsubentry.sample.x_translation.units = 'mm'
318
+ nxsubentry.sample.z_translation = z_translation
319
+ nxsubentry.sample.z_translation.units = 'mm'
320
+
321
+ if include_raw_data:
322
+ num_image = len(thetas)
323
+ image_keys += num_image*[image_key]
324
+ sequence_numbers += list(range(num_image))
325
+ image_stacks.append(scanparser.get_detector_data(tool_config.detector.prefix,
326
+ scan_step_index=(image_offset, image_offset+num_image)))
327
+ rotation_angles += list(thetas)
328
+ x_translations += num_image*[x_translation]
329
+ z_translations += num_image*[z_translation]
330
+
331
+ if include_raw_data:
332
+ # Add image data to NXdetector
333
+ nxinstrument.detector.image_key = image_keys
334
+ nxinstrument.detector.sequence_number = sequence_numbers
335
+ nxinstrument.detector.data = np.concatenate([image for image in image_stacks])
336
+
337
+ # Add image data to NXsample
338
+ nxsample.rotation_angle = rotation_angles
339
+ nxsample.rotation_angle.attrs['units'] = 'degrees'
340
+ nxsample.x_translation = x_translations
341
+ nxsample.x_translation.attrs['units'] = 'mm'
342
+ nxsample.z_translation = z_translations
343
+ nxsample.z_translation.attrs['units'] = 'mm'
344
+
345
+ # Add an NXdata to NXentry
346
+ nxdata = NXdata()
347
+ nxentry.data = nxdata
348
+ nxdata.makelink(nxentry.instrument.detector.data, name='data')
349
+ nxdata.makelink(nxentry.instrument.detector.image_key)
350
+ nxdata.makelink(nxentry.sample.rotation_angle)
351
+ nxdata.makelink(nxentry.sample.x_translation)
352
+ nxdata.makelink(nxentry.sample.z_translation)
353
+ # nxdata.attrs['axes'] = ['field', 'row', 'column']
354
+ # nxdata.attrs['field_indices'] = 0
355
+ # nxdata.attrs['row_indices'] = 1
356
+ # nxdata.attrs['column_indices'] = 2
357
+
358
+ return(nxroot)
359
+
360
+
361
+ def nxcopy(nxobject:NXobject, exclude_nxpaths:list[str]=[], nxpath_prefix:str='') -> NXobject:
362
+ '''Function that returns a copy of a nexus object, optionally exluding certain child items.
363
+
364
+ :param nxobject: the original nexus object to return a "copy" of
365
+ :type nxobject: nexusformat.nexus.NXobject
366
+ :param exlude_nxpaths: a list of paths to child nexus objects that
367
+ should be exluded from the returned "copy", defaults to `[]`
368
+ :type exclude_nxpaths: list[str], optional
369
+ :param nxpath_prefix: For use in recursive calls from inside this
370
+ function only!
371
+ :type nxpath_prefix: str
372
+ :return: a copy of `nxobject` with some children optionally exluded.
373
+ :rtype: NXobject
374
+ '''
375
+ from nexusformat.nexus import NXgroup
376
+
377
+ nxobject_copy = nxobject.__class__()
378
+ if not len(nxpath_prefix):
379
+ if 'default' in nxobject.attrs:
380
+ nxobject_copy.attrs['default'] = nxobject.attrs['default']
381
+ else:
382
+ for k, v in nxobject.attrs.items():
383
+ nxobject_copy.attrs[k] = v
384
+
385
+ for k, v in nxobject.items():
386
+ nxpath = os_path.join(nxpath_prefix, k)
387
+
388
+ if nxpath in exclude_nxpaths:
389
+ continue
390
+
391
+ if isinstance(v, NXgroup):
392
+ nxobject_copy[k] = nxcopy(v, exclude_nxpaths=exclude_nxpaths,
393
+ nxpath_prefix=os_path.join(nxpath_prefix, k))
394
+ else:
395
+ nxobject_copy[k] = v
396
+
397
+ return(nxobject_copy)
398
+
399
+
400
+ class set_numexpr_threads:
401
+ def __init__(self, num_core):
402
+ from multiprocessing import cpu_count
403
+
404
+ if num_core is None or num_core < 1 or num_core > cpu_count():
405
+ self.num_core = cpu_count()
406
+ else:
407
+ self.num_core = num_core
408
+
409
+ def __enter__(self):
410
+ import numexpr as ne
411
+
412
+ self.num_core_org = ne.set_num_threads(min(self.num_core, ne.MAX_THREADS))
413
+
414
+ def __exit__(self, exc_type, exc_value, traceback):
415
+ import numexpr as ne
416
+
417
+ ne.set_num_threads(self.num_core_org)
418
+
419
+
420
+ class Tomo:
421
+ """Processing tomography data with misalignment.
422
+ """
423
+ def __init__(self, galaxy_flag=False, num_core=-1, output_folder='.', save_figs=None,
424
+ test_mode=False):
425
+ """Initialize with optional config input file or dictionary
426
+ """
427
+ from logging import getLogger
428
+
429
+ from multiprocessing import cpu_count
430
+
431
+ self.__name__ = self.__class__.__name__
432
+ self.logger = getLogger(self.__name__)
433
+ self.logger.propagate = False
434
+
435
+ if not isinstance(galaxy_flag, bool):
436
+ raise ValueError(f'Invalid parameter galaxy_flag ({galaxy_flag})')
437
+ self.galaxy_flag = galaxy_flag
438
+ self.num_core = num_core
439
+ if self.galaxy_flag:
440
+ if output_folder != '.':
441
+ self.logger.warning('Ignoring output_folder in galaxy mode')
442
+ self.output_folder = '.'
443
+ if test_mode != False:
444
+ self.logger.warning('Ignoring test_mode in galaxy mode')
445
+ self.test_mode = False
446
+ if save_figs is not None:
447
+ self.logger.warning('Ignoring save_figs in galaxy mode')
448
+ save_figs = 'only'
449
+ else:
450
+ self.output_folder = os_path.abspath(output_folder)
451
+ if not os_path.isdir(output_folder):
452
+ mkdir(os_path.abspath(output_folder))
453
+ if not isinstance(test_mode, bool):
454
+ raise ValueError(f'Invalid parameter test_mode ({test_mode})')
455
+ self.test_mode = test_mode
456
+ if save_figs is None:
457
+ save_figs = 'no'
458
+ self.test_config = {}
459
+ if self.test_mode:
460
+ if save_figs != 'only':
461
+ self.logger.warning('Ignoring save_figs in test mode')
462
+ save_figs = 'only'
463
+ if save_figs == 'only':
464
+ self.save_only = True
465
+ self.save_figs = True
466
+ elif save_figs == 'yes':
467
+ self.save_only = False
468
+ self.save_figs = True
469
+ elif save_figs == 'no':
470
+ self.save_only = False
471
+ self.save_figs = False
472
+ else:
473
+ raise ValueError(f'Invalid parameter save_figs ({save_figs})')
474
+ if self.save_only:
475
+ self.block = False
476
+ else:
477
+ self.block = True
478
+ if self.num_core == -1:
479
+ self.num_core = cpu_count()
480
+ if not isinstance(self.num_core, int) or self.num_core < 0:
481
+ raise ValueError(f'Invalid parameter num_core ({num_core})')
482
+ if self.num_core > cpu_count():
483
+ self.logger.warning(f'num_core = {self.num_core} is larger than the number of '
484
+ f'available processors and reduced to {cpu_count()}')
485
+ self.num_core= cpu_count()
486
+
487
+ def read(self, filename):
488
+ extension = os_path.splitext(filename)[1]
489
+ if extension == '.yml' or extension == '.yaml':
490
+ with open(filename, 'r') as f:
491
+ config = safe_load(f)
492
+ # if len(config) > 1:
493
+ # raise ValueError(f'Multiple root entries in {filename} not yet implemented')
494
+ # if len(list(config.values())[0]) > 1:
495
+ # raise ValueError(f'Multiple sample maps in {filename} not yet implemented')
496
+ return(config)
497
+ elif extension == '.nxs':
498
+ with NXFile(filename, mode='r') as nxfile:
499
+ nxroot = nxfile.readfile()
500
+ return(nxroot)
501
+ else:
502
+ raise ValueError(f'Invalid filename extension ({extension})')
503
+
504
+ def write(self, data, filename):
505
+ extension = os_path.splitext(filename)[1]
506
+ if extension == '.yml' or extension == '.yaml':
507
+ with open(filename, 'w') as f:
508
+ safe_dump(data, f)
509
+ elif extension == '.nxs':
510
+ data.save(filename, mode='w')
511
+ elif extension == '.nc':
512
+ data.to_netcdf(os_path=filename)
513
+ else:
514
+ raise ValueError(f'Invalid filename extension ({extension})')
515
+
516
+ def gen_reduced_data(self, data, img_x_bounds=None):
517
+ """Generate the reduced tomography images.
518
+ """
519
+ from nexusformat.nexus import NXdata, NXprocess, NXroot
520
+
521
+ from CHAP.common.models.map import import_scanparser
522
+
523
+ self.logger.info('Generate the reduced tomography images')
524
+ if img_x_bounds is not None:
525
+ if not isinstance(img_x_bounds, (tuple, list)):
526
+ raise ValueError(f'Invalid parameter img_x_bounds ({img_x_bounds})')
527
+ img_x_bounds = tuple(img_x_bounds)
528
+
529
+ # Create plot galaxy path directory if needed
530
+ if self.galaxy_flag and not os_path.exists('tomo_reduce_plots'):
531
+ mkdir('tomo_reduce_plots')
532
+
533
+ if isinstance(data, dict):
534
+ # Create Nexus format object from input dictionary
535
+ wf = TomoWorkflow(**data)
536
+ if len(wf.sample_maps) > 1:
537
+ raise ValueError(f'Multiple sample maps not yet implemented')
538
+ nxroot = NXroot()
539
+ t0 = time()
540
+ for sample_map in wf.sample_maps:
541
+ self.logger.info(f'Start constructing the {sample_map.title} map.')
542
+ import_scanparser(sample_map.station)
543
+ sample_map.construct_nxentry(nxroot, include_raw_data=False)
544
+ self.logger.info(f'Constructed all sample maps in {time()-t0:.2f} seconds.')
545
+ nxentry = nxroot[nxroot.attrs['default']]
546
+ # Get test mode configuration info
547
+ if self.test_mode:
548
+ self.test_config = data['sample_maps'][0]['test_mode']
549
+ elif isinstance(data, NXroot):
550
+ nxentry = data[data.attrs['default']]
551
+ else:
552
+ raise ValueError(f'Invalid parameter data ({data})')
553
+ if 'data' in nxentry:
554
+ del nxentry['data']
555
+
556
+ # Create an NXprocess to store data reduction (meta)data
557
+ reduced_data = NXprocess()
558
+
559
+ # Generate dark field
560
+ if 'dark_field' in nxentry['spec_scans']:
561
+ reduced_data = self._gen_dark(nxentry, reduced_data)
562
+
563
+ # Generate bright field
564
+ reduced_data = self._gen_bright(nxentry, reduced_data)
565
+
566
+ # Set vertical detector bounds for image stack
567
+ img_x_bounds = self._set_detector_bounds(nxentry, reduced_data, img_x_bounds=img_x_bounds)
568
+ self.logger.info(f'img_x_bounds = {img_x_bounds}')
569
+ reduced_data['img_x_bounds'] = img_x_bounds
570
+
571
+ # Set zoom and/or theta skip to reduce memory the requirement
572
+ zoom_perc, num_theta_skip = self._set_zoom_or_skip()
573
+ if zoom_perc is not None:
574
+ reduced_data.attrs['zoom_perc'] = zoom_perc
575
+ if num_theta_skip is not None:
576
+ reduced_data.attrs['num_theta_skip'] = num_theta_skip
577
+
578
+ # Generate reduced tomography fields
579
+ reduced_data = self._gen_tomo(nxentry, reduced_data)
580
+
581
+ # Create a copy of the input Nexus object and remove raw and any existing reduced data
582
+ if isinstance(data, NXroot):
583
+ exclude_items = [f'{nxentry._name}/reduced_data/data',
584
+ f'{nxentry._name}/instrument/detector/data',
585
+ f'{nxentry._name}/instrument/detector/image_key',
586
+ f'{nxentry._name}/instrument/detector/sequence_number',
587
+ f'{nxentry._name}/sample/rotation_angle',
588
+ f'{nxentry._name}/sample/x_translation',
589
+ f'{nxentry._name}/sample/z_translation',
590
+ f'{nxentry._name}/data/data',
591
+ f'{nxentry._name}/data/image_key',
592
+ f'{nxentry._name}/data/rotation_angle',
593
+ f'{nxentry._name}/data/x_translation',
594
+ f'{nxentry._name}/data/z_translation']
595
+ nxroot = nxcopy(data, exclude_nxpaths=exclude_items)
596
+ nxentry = nxroot[nxroot.attrs['default']]
597
+
598
+ # Add the reduced data NXprocess
599
+ nxentry.reduced_data = reduced_data
600
+
601
+ if 'data' not in nxentry:
602
+ nxentry.data = NXdata()
603
+ nxentry.attrs['default'] = 'data'
604
+ nxentry.data.makelink(nxentry.reduced_data.data.tomo_fields, name='reduced_data')
605
+ nxentry.data.makelink(nxentry.reduced_data.rotation_angle, name='rotation_angle')
606
+ nxentry.data.attrs['signal'] = 'reduced_data'
607
+
608
+ return(nxroot)
609
+
610
+ def find_centers(self, nxroot, center_rows=None, center_stack_index=None):
611
+ """Find the calibrated center axis info
612
+ """
613
+ from nexusformat.nexus import NXentry, NXroot
614
+
615
+ from CHAP.common.utils.general import is_int_pair
616
+
617
+ self.logger.info('Find the calibrated center axis info')
618
+
619
+ if not isinstance(nxroot, NXroot):
620
+ raise ValueError(f'Invalid parameter nxroot ({nxroot})')
621
+ nxentry = nxroot[nxroot.attrs['default']]
622
+ if not isinstance(nxentry, NXentry):
623
+ raise ValueError(f'Invalid nxentry ({nxentry})')
624
+ if self.galaxy_flag:
625
+ if center_rows is not None:
626
+ center_rows = tuple(center_rows)
627
+ if not is_int_pair(center_rows):
628
+ raise ValueError(f'Invalid parameter center_rows ({center_rows})')
629
+ elif center_rows is not None:
630
+ # self.logger.warning(f'Ignoring parameter center_rows ({center_rows})')
631
+ # center_rows = None
632
+ if not isinstance(center_rows, (tuple, list)) or len(center_rows) != 2:
633
+ raise ValueError(f'Invalid parameter center_rows ({center_rows})')
634
+ if self.galaxy_flag:
635
+ if center_stack_index is not None and (not isinstance(center_stack_index, int) or
636
+ center_stack_index < 0):
637
+ raise ValueError(f'Invalid parameter center_stack_index ({center_stack_index})')
638
+
639
+ # Create plot galaxy path directory and path if needed
640
+ if self.galaxy_flag:
641
+ if not os_path.exists('tomo_find_centers_plots'):
642
+ mkdir('tomo_find_centers_plots')
643
+ path = 'tomo_find_centers_plots'
644
+ else:
645
+ path = self.output_folder
646
+
647
+ # Check if reduced data is available
648
+ if ('reduced_data' not in nxentry or 'reduced_data' not in nxentry.data):
649
+ raise KeyError(f'Unable to find valid reduced data in {nxentry}.')
650
+
651
+ # Select the image stack to calibrate the center axis
652
+ # reduced data axes order: stack,theta,row,column
653
+ # Note: Nexus cannot follow a link if the data it points to is too big,
654
+ # so get the data from the actual place, not from nxentry.data
655
+ tomo_fields_shape = nxentry.reduced_data.data.tomo_fields.shape
656
+ if len(tomo_fields_shape) != 4 or any(True for dim in tomo_fields_shape if not dim):
657
+ raise KeyError('Unable to load the required reduced tomography stack')
658
+ num_tomo_stacks = tomo_fields_shape[0]
659
+ if num_tomo_stacks == 1:
660
+ center_stack_index = 0
661
+ default = 'n'
662
+ else:
663
+ if self.test_mode:
664
+ center_stack_index = self.test_config['center_stack_index']-1 # make offset 0
665
+ elif self.galaxy_flag:
666
+ if center_stack_index is None:
667
+ center_stack_index = int(num_tomo_stacks/2)
668
+ if center_stack_index >= num_tomo_stacks:
669
+ raise ValueError(f'Invalid parameter center_stack_index ({center_stack_index})')
670
+ else:
671
+ if center_stack_index is None:
672
+ center_stack_index = input_int('\nEnter tomography stack index to calibrate '
673
+ 'the center axis', ge=1, le=num_tomo_stacks,
674
+ default=int(1+num_tomo_stacks/2))
675
+ else:
676
+ if (not isinstance(center_stack_index, int) or
677
+ not 0 < center_stack_index <= num_tomo_stacks):
678
+ raise ValueError('Invalid parameter center_stack_index '+
679
+ f'({center_stack_index})')
680
+ center_stack_index -= 1
681
+ default = 'y'
682
+
683
+ # Get thetas (in degrees)
684
+ thetas = np.asarray(nxentry.reduced_data.rotation_angle)
685
+
686
+ # Get effective pixel_size
687
+ if 'zoom_perc' in nxentry.reduced_data:
688
+ eff_pixel_size = 100.*(nxentry.instrument.detector.x_pixel_size/
689
+ nxentry.reduced_data.attrs['zoom_perc'])
690
+ else:
691
+ eff_pixel_size = nxentry.instrument.detector.x_pixel_size
692
+
693
+ # Get cross sectional diameter
694
+ cross_sectional_dim = tomo_fields_shape[3]*eff_pixel_size
695
+ self.logger.debug(f'cross_sectional_dim = {cross_sectional_dim}')
696
+
697
+ # Determine center offset at sample row boundaries
698
+ self.logger.info('Determine center offset at sample row boundaries')
699
+
700
+ # Lower row center
701
+ if self.test_mode:
702
+ lower_row = self.test_config['lower_row']
703
+ elif self.galaxy_flag:
704
+ if center_rows is None:
705
+ lower_row = 0
706
+ else:
707
+ lower_row = min(center_rows)
708
+ if not 0 <= lower_row < tomo_fields_shape[2]-1:
709
+ raise ValueError(f'Invalid parameter center_rows ({center_rows})')
710
+ else:
711
+ if center_rows is not None and center_rows[0] is not None:
712
+ lower_row = center_rows[0]
713
+ if lower_row == -1:
714
+ lower_row = 0
715
+ if not 0 <= lower_row < tomo_fields_shape[2]-1:
716
+ raise ValueError(f'Invalid parameter center_rows ({center_rows})')
717
+ else:
718
+ lower_row = select_one_image_bound(
719
+ nxentry.reduced_data.data.tomo_fields[center_stack_index,0,:,:],
720
+ 0, bound=0, title=f'theta={round(thetas[0], 2)+0}',
721
+ bound_name='row index to find lower center', default=default,
722
+ raise_error=True)
723
+ self.logger.debug('Finding center...')
724
+ t0 = time()
725
+ lower_center_offset = self._find_center_one_plane(
726
+ #np.asarray(nxentry.reduced_data.data.tomo_fields[center_stack_index,:,lower_row,:]),
727
+ nxentry.reduced_data.data.tomo_fields[center_stack_index,:,lower_row,:],
728
+ lower_row, thetas, eff_pixel_size, cross_sectional_dim, path=path,
729
+ num_core=self.num_core)
730
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
731
+ self.logger.debug(f'lower_row = {lower_row:.2f}')
732
+ self.logger.debug(f'lower_center_offset = {lower_center_offset:.2f}')
733
+
734
+ # Upper row center
735
+ if self.test_mode:
736
+ upper_row = self.test_config['upper_row']
737
+ elif self.galaxy_flag:
738
+ if center_rows is None:
739
+ upper_row = tomo_fields_shape[2]-1
740
+ else:
741
+ upper_row = max(center_rows)
742
+ if not lower_row < upper_row < tomo_fields_shape[2]:
743
+ raise ValueError(f'Invalid parameter center_rows ({center_rows})')
744
+ else:
745
+ if center_rows is not None and center_rows[1] is not None:
746
+ upper_row = center_rows[1]
747
+ if upper_row == -1:
748
+ upper_row = tomo_fields_shape[2]-1
749
+ if not lower_row < upper_row < tomo_fields_shape[2]:
750
+ raise ValueError(f'Invalid parameter center_rows ({center_rows})')
751
+ else:
752
+ upper_row = select_one_image_bound(
753
+ nxentry.reduced_data.data.tomo_fields[center_stack_index,0,:,:],
754
+ 0, bound=tomo_fields_shape[2]-1, title=f'theta={round(thetas[0], 2)+0}',
755
+ bound_name='row index to find upper center', default=default,
756
+ raise_error=True)
757
+ self.logger.debug('Finding center...')
758
+ t0 = time()
759
+ upper_center_offset = self._find_center_one_plane(
760
+ #np.asarray(nxentry.reduced_data.data.tomo_fields[center_stack_index,:,upper_row,:]),
761
+ nxentry.reduced_data.data.tomo_fields[center_stack_index,:,upper_row,:],
762
+ upper_row, thetas, eff_pixel_size, cross_sectional_dim, path=path,
763
+ num_core=self.num_core)
764
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
765
+ self.logger.debug(f'upper_row = {upper_row:.2f}')
766
+ self.logger.debug(f'upper_center_offset = {upper_center_offset:.2f}')
767
+
768
+ center_config = {'lower_row': lower_row, 'lower_center_offset': lower_center_offset,
769
+ 'upper_row': upper_row, 'upper_center_offset': upper_center_offset}
770
+ if num_tomo_stacks > 1:
771
+ center_config['center_stack_index'] = center_stack_index+1 # save as offset 1
772
+
773
+ # Save test data to file
774
+ if self.test_mode:
775
+ with open(f'{self.output_folder}/center_config.yaml', 'w') as f:
776
+ safe_dump(center_config, f)
777
+
778
+ return(center_config)
779
+
780
+ def reconstruct_data(self, nxroot, center_info, x_bounds=None, y_bounds=None, z_bounds=None):
781
+ """Reconstruct the tomography data.
782
+ """
783
+ from nexusformat.nexus import NXdata, NXentry, NXprocess, NXroot
784
+
785
+ from CHAP.common.utils.general import is_int_pair
786
+
787
+ self.logger.info('Reconstruct the tomography data')
788
+
789
+ if not isinstance(nxroot, NXroot):
790
+ raise ValueError(f'Invalid parameter nxroot ({nxroot})')
791
+ nxentry = nxroot[nxroot.attrs['default']]
792
+ if not isinstance(nxentry, NXentry):
793
+ raise ValueError(f'Invalid nxentry ({nxentry})')
794
+ if not isinstance(center_info, dict):
795
+ raise ValueError(f'Invalid parameter center_info ({center_info})')
796
+ if x_bounds is not None:
797
+ if not isinstance(x_bounds, (tuple, list)):
798
+ raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
799
+ x_bounds = tuple(x_bounds)
800
+ if y_bounds is not None:
801
+ if not isinstance(y_bounds, (tuple, list)):
802
+ raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
803
+ y_bounds = tuple(y_bounds)
804
+ if z_bounds is not None:
805
+ if not isinstance(z_bounds, (tuple, list)):
806
+ raise ValueError(f'Invalid parameter z_bounds ({z_bounds})')
807
+ z_bounds = tuple(z_bounds)
808
+
809
+ # Create plot galaxy path directory and path if needed
810
+ if self.galaxy_flag:
811
+ if not os_path.exists('tomo_reconstruct_plots'):
812
+ mkdir('tomo_reconstruct_plots')
813
+ path = 'tomo_reconstruct_plots'
814
+ else:
815
+ path = self.output_folder
816
+
817
+ # Check if reduced data is available
818
+ if ('reduced_data' not in nxentry or 'reduced_data' not in nxentry.data):
819
+ raise KeyError(f'Unable to find valid reduced data in {nxentry}.')
820
+
821
+ # Create an NXprocess to store image reconstruction (meta)data
822
+ nxprocess = NXprocess()
823
+
824
+ # Get rotation axis rows and centers
825
+ lower_row = center_info.get('lower_row')
826
+ lower_center_offset = center_info.get('lower_center_offset')
827
+ upper_row = center_info.get('upper_row')
828
+ upper_center_offset = center_info.get('upper_center_offset')
829
+ if (lower_row is None or lower_center_offset is None or upper_row is None or
830
+ upper_center_offset is None):
831
+ raise KeyError(f'Unable to find valid calibrated center axis info in {center_info}.')
832
+ center_slope = (upper_center_offset-lower_center_offset)/(upper_row-lower_row)
833
+
834
+ # Get thetas (in degrees)
835
+ thetas = np.asarray(nxentry.reduced_data.rotation_angle)
836
+
837
+ # Reconstruct tomography data
838
+ # reduced data axes order: stack,theta,row,column
839
+ # reconstructed data order in each stack: row/z,x,y
840
+ # Note: Nexus cannot follow a link if the data it points to is too big,
841
+ # so get the data from the actual place, not from nxentry.data
842
+ if 'zoom_perc' in nxentry.reduced_data:
843
+ res_title = f'{nxentry.reduced_data.attrs["zoom_perc"]}p'
844
+ else:
845
+ res_title = 'fullres'
846
+ load_error = False
847
+ num_tomo_stacks = nxentry.reduced_data.data.tomo_fields.shape[0]
848
+ tomo_recon_stacks = num_tomo_stacks*[np.array([])]
849
+ for i in range(num_tomo_stacks):
850
+ # Convert reduced data stack from theta,row,column to row,theta,column
851
+ self.logger.debug(f'Reading reduced data stack {i+1}...')
852
+ t0 = time()
853
+ tomo_stack = np.asarray(nxentry.reduced_data.data.tomo_fields[i])
854
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
855
+ if len(tomo_stack.shape) != 3 or any(True for dim in tomo_stack.shape if not dim):
856
+ raise ValueError(f'Unable to load tomography stack {i+1} for reconstruction')
857
+ tomo_stack = np.swapaxes(tomo_stack, 0, 1)
858
+ assert(len(thetas) == tomo_stack.shape[1])
859
+ assert(0 <= lower_row < upper_row < tomo_stack.shape[0])
860
+ center_offsets = [lower_center_offset-lower_row*center_slope,
861
+ upper_center_offset+(tomo_stack.shape[0]-1-upper_row)*center_slope]
862
+ t0 = time()
863
+ self.logger.debug(f'Running _reconstruct_one_tomo_stack on {self.num_core} cores ...')
864
+ tomo_recon_stack = self._reconstruct_one_tomo_stack(tomo_stack, thetas,
865
+ center_offsets=center_offsets, num_core=self.num_core, algorithm='gridrec')
866
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
867
+ self.logger.info(f'Reconstruction of stack {i+1} took {time()-t0:.2f} seconds')
868
+
869
+ # Combine stacks
870
+ tomo_recon_stacks[i] = tomo_recon_stack
871
+
872
+ # Resize the reconstructed tomography data
873
+ # reconstructed data order in each stack: row/z,x,y
874
+ if self.test_mode:
875
+ x_bounds = tuple(self.test_config.get('x_bounds'))
876
+ y_bounds = tuple(self.test_config.get('y_bounds'))
877
+ z_bounds = None
878
+ elif self.galaxy_flag:
879
+ if x_bounds is not None and not is_int_pair(x_bounds, ge=0,
880
+ lt=tomo_recon_stacks[0].shape[1]):
881
+ raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
882
+ if y_bounds is not None and not is_int_pair(y_bounds, ge=0,
883
+ lt=tomo_recon_stacks[0].shape[1]):
884
+ raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
885
+ z_bounds = None
886
+ else:
887
+ x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(tomo_recon_stacks,
888
+ x_bounds=x_bounds, y_bounds=y_bounds, z_bounds=z_bounds)
889
+ if x_bounds is None:
890
+ x_range = (0, tomo_recon_stacks[0].shape[1])
891
+ x_slice = int(x_range[1]/2)
892
+ else:
893
+ x_range = (min(x_bounds), max(x_bounds))
894
+ x_slice = int((x_bounds[0]+x_bounds[1])/2)
895
+ if y_bounds is None:
896
+ y_range = (0, tomo_recon_stacks[0].shape[2])
897
+ y_slice = int(y_range[1]/2)
898
+ else:
899
+ y_range = (min(y_bounds), max(y_bounds))
900
+ y_slice = int((y_bounds[0]+y_bounds[1])/2)
901
+ if z_bounds is None:
902
+ z_range = (0, tomo_recon_stacks[0].shape[0])
903
+ z_slice = int(z_range[1]/2)
904
+ else:
905
+ z_range = (min(z_bounds), max(z_bounds))
906
+ z_slice = int((z_bounds[0]+z_bounds[1])/2)
907
+
908
+ # Plot a few reconstructed image slices
909
+ if self.save_figs:
910
+ for i, stack in enumerate(tomo_recon_stacks):
911
+ if num_tomo_stacks == 1:
912
+ basetitle = 'recon'
913
+ else:
914
+ basetitle = f'recon stack {i+1}'
915
+ title = f'{basetitle} {res_title} xslice{x_slice}'
916
+ quick_imshow(stack[z_range[0]:z_range[1],x_slice,y_range[0]:y_range[1]],
917
+ title=title, path=path, save_fig=True, save_only=True)
918
+ title = f'{basetitle} {res_title} yslice{y_slice}'
919
+ quick_imshow(stack[z_range[0]:z_range[1],x_range[0]:x_range[1],y_slice],
920
+ title=title, path=path, save_fig=True, save_only=True)
921
+ title = f'{basetitle} {res_title} zslice{z_slice}'
922
+ quick_imshow(stack[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
923
+ title=title, path=path, save_fig=True, save_only=True)
924
+
925
+ # Save test data to file
926
+ # reconstructed data order in each stack: row/z,x,y
927
+ if self.test_mode:
928
+ for i, stack in enumerate(tomo_recon_stacks):
929
+ np.savetxt(f'{self.output_folder}/recon_stack_{i+1}.txt',
930
+ stack[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]], fmt='%.6e')
931
+
932
+ # Add image reconstruction to reconstructed data NXprocess
933
+ # reconstructed data order in each stack: row/z,x,y
934
+ nxprocess.data = NXdata()
935
+ nxprocess.attrs['default'] = 'data'
936
+ for k, v in center_info.items():
937
+ nxprocess[k] = v
938
+ if x_bounds is not None:
939
+ nxprocess.x_bounds = x_bounds
940
+ if y_bounds is not None:
941
+ nxprocess.y_bounds = y_bounds
942
+ if z_bounds is not None:
943
+ nxprocess.z_bounds = z_bounds
944
+ nxprocess.data['reconstructed_data'] = np.asarray([stack[z_range[0]:z_range[1],
945
+ x_range[0]:x_range[1],y_range[0]:y_range[1]] for stack in tomo_recon_stacks])
946
+ nxprocess.data.attrs['signal'] = 'reconstructed_data'
947
+
948
+ # Create a copy of the input Nexus object and remove reduced data
949
+ exclude_items = [f'{nxentry._name}/reduced_data/data', f'{nxentry._name}/data/reduced_data']
950
+ nxroot_copy = nxcopy(nxroot, exclude_nxpaths=exclude_items)
951
+
952
+ # Add the reconstructed data NXprocess to the new Nexus object
953
+ nxentry_copy = nxroot_copy[nxroot_copy.attrs['default']]
954
+ nxentry_copy.reconstructed_data = nxprocess
955
+ if 'data' not in nxentry_copy:
956
+ nxentry_copy.data = NXdata()
957
+ nxentry_copy.attrs['default'] = 'data'
958
+ nxentry_copy.data.makelink(nxprocess.data.reconstructed_data, name='reconstructed_data')
959
+ nxentry_copy.data.attrs['signal'] = 'reconstructed_data'
960
+
961
+ return(nxroot_copy)
962
+
963
+ def combine_data(self, nxroot, x_bounds=None, y_bounds=None, z_bounds=None):
964
+ """Combine the reconstructed tomography stacks.
965
+ """
966
+ from nexusformat.nexus import NXdata, NXentry, NXprocess, NXroot
967
+
968
+ from CHAP.common.utils.general import is_int_pair
969
+
970
+ self.logger.info('Combine the reconstructed tomography stacks')
971
+
972
+ if not isinstance(nxroot, NXroot):
973
+ raise ValueError(f'Invalid parameter nxroot ({nxroot})')
974
+ nxentry = nxroot[nxroot.attrs['default']]
975
+ if not isinstance(nxentry, NXentry):
976
+ raise ValueError(f'Invalid nxentry ({nxentry})')
977
+ if x_bounds is not None:
978
+ if not isinstance(x_bounds, (tuple, list)):
979
+ raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
980
+ x_bounds = tuple(x_bounds)
981
+ if y_bounds is not None:
982
+ if not isinstance(y_bounds, (tuple, list)):
983
+ raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
984
+ y_bounds = tuple(y_bounds)
985
+ if z_bounds is not None:
986
+ if not isinstance(z_bounds, (tuple, list)):
987
+ raise ValueError(f'Invalid parameter z_bounds ({z_bounds})')
988
+ z_bounds = tuple(z_bounds)
989
+
990
+ # Create plot galaxy path directory and path if needed
991
+ if self.galaxy_flag:
992
+ if not os_path.exists('tomo_combine_plots'):
993
+ mkdir('tomo_combine_plots')
994
+ path = 'tomo_combine_plots'
995
+ else:
996
+ path = self.output_folder
997
+
998
+ # Check if reconstructed image data is available
999
+ if ('reconstructed_data' not in nxentry or 'reconstructed_data' not in nxentry.data):
1000
+ raise KeyError(f'Unable to find valid reconstructed image data in {nxentry}.')
1001
+
1002
+ # Create an NXprocess to store combined image reconstruction (meta)data
1003
+ nxprocess = NXprocess()
1004
+
1005
+ # Get the reconstructed data
1006
+ # reconstructed data order: stack,row(z),x,y
1007
+ # Note: Nexus cannot follow a link if the data it points to is too big,
1008
+ # so get the data from the actual place, not from nxentry.data
1009
+ num_tomo_stacks = nxentry.reconstructed_data.data.reconstructed_data.shape[0]
1010
+ if num_tomo_stacks == 1:
1011
+ self.logger.info('Only one stack available: leaving combine_data')
1012
+ return(None)
1013
+
1014
+ # Combine the reconstructed stacks
1015
+ # (load one stack at a time to reduce risk of hitting Nexus data access limit)
1016
+ t0 = time()
1017
+ self.logger.debug(f'Combining the reconstructed stacks ...')
1018
+ tomo_recon_combined = np.asarray(nxentry.reconstructed_data.data.reconstructed_data[0])
1019
+ if num_tomo_stacks > 2:
1020
+ tomo_recon_combined = np.concatenate([tomo_recon_combined]+
1021
+ [nxentry.reconstructed_data.data.reconstructed_data[i]
1022
+ for i in range(1, num_tomo_stacks-1)])
1023
+ if num_tomo_stacks > 1:
1024
+ tomo_recon_combined = np.concatenate([tomo_recon_combined]+
1025
+ [nxentry.reconstructed_data.data.reconstructed_data[num_tomo_stacks-1]])
1026
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1027
+ self.logger.info(f'Combining the reconstructed stacks took {time()-t0:.2f} seconds')
1028
+
1029
+ # Resize the combined tomography data stacks
1030
+ # combined data order: row/z,x,y
1031
+ if self.test_mode:
1032
+ x_bounds = None
1033
+ y_bounds = None
1034
+ z_bounds = tuple(self.test_config.get('z_bounds'))
1035
+ elif self.galaxy_flag:
1036
+ if x_bounds is not None and not is_int_pair(x_bounds, ge=0,
1037
+ lt=tomo_recon_stacks[0].shape[1]):
1038
+ raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
1039
+ if y_bounds is not None and not is_int_pair(y_bounds, ge=0,
1040
+ lt=tomo_recon_stacks[0].shape[1]):
1041
+ raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
1042
+ z_bounds = None
1043
+ else:
1044
+ if x_bounds is None and x_bounds in nxentry.reconstructed_data:
1045
+ x_bounds = (-1, -1)
1046
+ if y_bounds is None and y_bounds in nxentry.reconstructed_data:
1047
+ y_bounds = (-1, -1)
1048
+ x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(tomo_recon_combined,
1049
+ z_only=True)
1050
+ if x_bounds is None:
1051
+ x_range = (0, tomo_recon_combined.shape[1])
1052
+ x_slice = int(x_range[1]/2)
1053
+ else:
1054
+ x_range = x_bounds
1055
+ x_slice = int((x_bounds[0]+x_bounds[1])/2)
1056
+ if y_bounds is None:
1057
+ y_range = (0, tomo_recon_combined.shape[2])
1058
+ y_slice = int(y_range[1]/2)
1059
+ else:
1060
+ y_range = y_bounds
1061
+ y_slice = int((y_bounds[0]+y_bounds[1])/2)
1062
+ if z_bounds is None:
1063
+ z_range = (0, tomo_recon_combined.shape[0])
1064
+ z_slice = int(z_range[1]/2)
1065
+ else:
1066
+ z_range = z_bounds
1067
+ z_slice = int((z_bounds[0]+z_bounds[1])/2)
1068
+
1069
+ # Plot a few combined image slices
1070
+ if self.save_figs:
1071
+ quick_imshow(tomo_recon_combined[z_range[0]:z_range[1],x_slice,y_range[0]:y_range[1]],
1072
+ title=f'recon combined xslice{x_slice}', path=path, save_fig=True,
1073
+ save_only=True)
1074
+ quick_imshow(tomo_recon_combined[z_range[0]:z_range[1],x_range[0]:x_range[1],y_slice],
1075
+ title=f'recon combined yslice{y_slice}', path=path, save_fig=True,
1076
+ save_only=True)
1077
+ quick_imshow(tomo_recon_combined[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
1078
+ title=f'recon combined zslice{z_slice}', path=path, save_fig=True,
1079
+ save_only=True)
1080
+
1081
+ # Save test data to file
1082
+ # combined data order: row/z,x,y
1083
+ if self.test_mode:
1084
+ np.savetxt(f'{self.output_folder}/recon_combined.txt', tomo_recon_combined[
1085
+ z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]], fmt='%.6e')
1086
+
1087
+ # Add image reconstruction to reconstructed data NXprocess
1088
+ # combined data order: row/z,x,y
1089
+ nxprocess.data = NXdata()
1090
+ nxprocess.attrs['default'] = 'data'
1091
+ if x_bounds is not None:
1092
+ nxprocess.x_bounds = x_bounds
1093
+ if y_bounds is not None:
1094
+ nxprocess.y_bounds = y_bounds
1095
+ if z_bounds is not None:
1096
+ nxprocess.z_bounds = z_bounds
1097
+ nxprocess.data['combined_data'] = tomo_recon_combined[
1098
+ z_range[0]:z_range[1],x_range[0]:x_range[1],y_range[0]:y_range[1]]
1099
+ nxprocess.data.attrs['signal'] = 'combined_data'
1100
+
1101
+ # Create a copy of the input Nexus object and remove reconstructed data
1102
+ exclude_items = [f'{nxentry._name}/reconstructed_data/data',
1103
+ f'{nxentry._name}/data/reconstructed_data']
1104
+ nxroot_copy = nxcopy(nxroot, exclude_nxpaths=exclude_items)
1105
+
1106
+ # Add the combined data NXprocess to the new Nexus object
1107
+ nxentry_copy = nxroot_copy[nxroot_copy.attrs['default']]
1108
+ nxentry_copy.combined_data = nxprocess
1109
+ if 'data' not in nxentry_copy:
1110
+ nxentry_copy.data = NXdata()
1111
+ nxentry_copy.attrs['default'] = 'data'
1112
+ nxentry_copy.data.makelink(nxprocess.data.combined_data, name='combined_data')
1113
+ nxentry_copy.data.attrs['signal'] = 'combined_data'
1114
+
1115
+ return(nxroot_copy)
1116
+
1117
+ def _gen_dark(self, nxentry, reduced_data):
1118
+ """Generate dark field.
1119
+ """
1120
+ from nexusformat.nexus import NXdata
1121
+
1122
+ from CHAP.common.models.map import get_scanparser, import_scanparser
1123
+
1124
+ # Get the dark field images
1125
+ image_key = nxentry.instrument.detector.get('image_key', None)
1126
+ if image_key and 'data' in nxentry.instrument.detector:
1127
+ field_indices = [index for index, key in enumerate(image_key) if key == 2]
1128
+ tdf_stack = nxentry.instrument.detector.data[field_indices,:,:]
1129
+ # RV the default NXtomo form does not accomodate bright or dark field stacks
1130
+ else:
1131
+ import_scanparser(nxentry.instrument.source.attrs['station'],
1132
+ nxentry.instrument.source.attrs['experiment_type'])
1133
+ dark_field_scans = nxentry.spec_scans.dark_field
1134
+ detector_prefix = str(nxentry.instrument.detector.local_name)
1135
+ tdf_stack = []
1136
+ for nxsubentry_name, nxsubentry in dark_field_scans.items():
1137
+ scan_number = int(nxsubentry_name.split('_')[-1])
1138
+ scanparser = get_scanparser(dark_field_scans.attrs['spec_file'], scan_number)
1139
+ image_offset = int(nxsubentry.instrument.detector.frame_start_number)
1140
+ num_image = len(nxsubentry.sample.rotation_angle)
1141
+ tdf_stack.append(scanparser.get_detector_data(detector_prefix,
1142
+ (image_offset, image_offset+num_image)))
1143
+ if isinstance(tdf_stack, list):
1144
+ assert(len(tdf_stack) == 1) # TODO
1145
+ tdf_stack = tdf_stack[0]
1146
+
1147
+ # Take median
1148
+ if tdf_stack.ndim == 2:
1149
+ tdf = tdf_stack
1150
+ elif tdf_stack.ndim == 3:
1151
+ tdf = np.median(tdf_stack, axis=0)
1152
+ del tdf_stack
1153
+ else:
1154
+ raise ValueError(f'Invalid tdf_stack shape ({tdf_stack.shape})')
1155
+
1156
+ # Remove dark field intensities above the cutoff
1157
+ #RV tdf_cutoff = None
1158
+ tdf_cutoff = tdf.min()+2*(np.median(tdf)-tdf.min())
1159
+ self.logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1160
+ if tdf_cutoff is not None:
1161
+ if not isinstance(tdf_cutoff, (int, float)) or tdf_cutoff < 0:
1162
+ self.logger.warning(f'Ignoring illegal value of tdf_cutoff {tdf_cutoff}')
1163
+ else:
1164
+ tdf[tdf > tdf_cutoff] = np.nan
1165
+ self.logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1166
+
1167
+ # Remove nans
1168
+ tdf_mean = np.nanmean(tdf)
1169
+ self.logger.debug(f'tdf_mean = {tdf_mean}')
1170
+ np.nan_to_num(tdf, copy=False, nan=tdf_mean, posinf=tdf_mean, neginf=0.)
1171
+
1172
+ # Plot dark field
1173
+ if self.save_figs:
1174
+ if self.galaxy_flag:
1175
+ quick_imshow(tdf, title='dark field', path='tomo_reduce_plots', save_fig=True,
1176
+ save_only=True)
1177
+ else:
1178
+ quick_imshow(tdf, title='dark field', path=self.output_folder, save_fig=True,
1179
+ save_only=True)
1180
+
1181
+ # Add dark field to reduced data NXprocess
1182
+ reduced_data.data = NXdata()
1183
+ reduced_data.data['dark_field'] = tdf
1184
+
1185
+ return(reduced_data)
1186
+
1187
+ def _gen_bright(self, nxentry, reduced_data):
1188
+ """Generate bright field.
1189
+ """
1190
+ from nexusformat.nexus import NXdata
1191
+
1192
+ from CHAP.common.models.map import get_scanparser, import_scanparser
1193
+
1194
+ # Get the bright field images
1195
+ image_key = nxentry.instrument.detector.get('image_key', None)
1196
+ if image_key and 'data' in nxentry.instrument.detector:
1197
+ field_indices = [index for index, key in enumerate(image_key) if key == 1]
1198
+ tbf_stack = nxentry.instrument.detector.data[field_indices,:,:]
1199
+ # RV the default NXtomo form does not accomodate bright or dark field stacks
1200
+ else:
1201
+ import_scanparser(nxentry.instrument.source.attrs['station'],
1202
+ nxentry.instrument.source.attrs['experiment_type'])
1203
+ bright_field_scans = nxentry.spec_scans.bright_field
1204
+ detector_prefix = str(nxentry.instrument.detector.local_name)
1205
+ tbf_stack = []
1206
+ for nxsubentry_name, nxsubentry in bright_field_scans.items():
1207
+ scan_number = int(nxsubentry_name.split('_')[-1])
1208
+ scanparser = get_scanparser(bright_field_scans.attrs['spec_file'], scan_number)
1209
+ image_offset = int(nxsubentry.instrument.detector.frame_start_number)
1210
+ num_image = len(nxsubentry.sample.rotation_angle)
1211
+ tbf_stack.append(scanparser.get_detector_data(detector_prefix,
1212
+ (image_offset, image_offset+num_image)))
1213
+ if isinstance(tbf_stack, list):
1214
+ assert(len(tbf_stack) == 1) # TODO
1215
+ tbf_stack = tbf_stack[0]
1216
+
1217
+ # Take median if more than one image
1218
+ """Median or mean: It may be best to try the median because of some image
1219
+ artifacts that arise due to crinkles in the upstream kapton tape windows
1220
+ causing some phase contrast images to appear on the detector.
1221
+ One thing that also may be useful in a future implementation is to do a
1222
+ brightfield adjustment on EACH frame of the tomo based on a ROI in the
1223
+ corner of the frame where there is no sample but there is the direct X-ray
1224
+ beam because there is frame to frame fluctuations from the incoming beam.
1225
+ We don’t typically account for them but potentially could.
1226
+ """
1227
+ from nexusformat.nexus import NXdata
1228
+
1229
+ if tbf_stack.ndim == 2:
1230
+ tbf = tbf_stack
1231
+ elif tbf_stack.ndim == 3:
1232
+ tbf = np.median(tbf_stack, axis=0)
1233
+ del tbf_stack
1234
+ else:
1235
+ raise ValueError(f'Invalid tbf_stack shape ({tbf_stacks.shape})')
1236
+
1237
+ # Subtract dark field
1238
+ if 'data' in reduced_data and 'dark_field' in reduced_data.data:
1239
+ tbf -= reduced_data.data.dark_field
1240
+ else:
1241
+ self.logger.warning('Dark field unavailable')
1242
+
1243
+ # Set any non-positive values to one
1244
+ # (avoid negative bright field values for spikes in dark field)
1245
+ tbf[tbf < 1] = 1
1246
+
1247
+ # Plot bright field
1248
+ if self.save_figs:
1249
+ if self.galaxy_flag:
1250
+ quick_imshow(tbf, title='bright field', path='tomo_reduce_plots', save_fig=True,
1251
+ save_only=True)
1252
+ else:
1253
+ quick_imshow(tbf, title='bright field', path=self.output_folder, save_fig=True,
1254
+ save_only=True)
1255
+
1256
+ # Add bright field to reduced data NXprocess
1257
+ if 'data' not in reduced_data:
1258
+ reduced_data.data = NXdata()
1259
+ reduced_data.data['bright_field'] = tbf
1260
+
1261
+ return(reduced_data)
1262
+
1263
+ def _set_detector_bounds(self, nxentry, reduced_data, img_x_bounds=None):
1264
+ """Set vertical detector bounds for each image stack.
1265
+ Right now the range is the same for each set in the image stack.
1266
+ """
1267
+ from CHAP.common.models.map import get_scanparser, import_scanparser
1268
+ from CHAP.common.utils.general import is_index_range
1269
+
1270
+ if self.test_mode:
1271
+ return(tuple(self.test_config['img_x_bounds']))
1272
+
1273
+ # Get the first tomography image and the reference heights
1274
+ image_key = nxentry.instrument.detector.get('image_key', None)
1275
+ if image_key and 'data' in nxentry.instrument.detector:
1276
+ field_indices = [index for index, key in enumerate(image_key) if key == 0]
1277
+ first_image = np.asarray(nxentry.instrument.detector.data[field_indices[0],:,:])
1278
+ theta = float(nxentry.sample.rotation_angle[field_indices[0]])
1279
+ z_translation_all = nxentry.sample.z_translation[field_indices]
1280
+ vertical_shifts = sorted(list(set(z_translation_all)))
1281
+ num_tomo_stacks = len(vertical_shifts)
1282
+ else:
1283
+ import_scanparser(nxentry.instrument.source.attrs['station'],
1284
+ nxentry.instrument.source.attrs['experiment_type'])
1285
+ tomo_field_scans = nxentry.spec_scans.tomo_fields
1286
+ num_tomo_stacks = len(tomo_field_scans.keys())
1287
+ center_stack_index = int(num_tomo_stacks/2)
1288
+ detector_prefix = str(nxentry.instrument.detector.local_name)
1289
+ vertical_shifts = []
1290
+ for i, nxsubentry in enumerate(tomo_field_scans.items()):
1291
+ scan_number = int(nxsubentry[0].split('_')[-1])
1292
+ scanparser = get_scanparser(tomo_field_scans.attrs['spec_file'], scan_number)
1293
+ image_offset = int(nxsubentry[1].instrument.detector.frame_start_number)
1294
+ vertical_shifts.append(nxsubentry[1].sample.z_translation)
1295
+ if i == center_stack_index:
1296
+ first_image = scanparser.get_detector_data(detector_prefix, image_offset)
1297
+ theta = float(nxsubentry[1].sample.rotation_angle[0])
1298
+
1299
+ # Select image bounds
1300
+ title = f'tomography image at theta={round(theta, 2)+0}'
1301
+ if img_x_bounds is not None:
1302
+ if not is_index_range(img_x_bounds, ge=0, le=first_image.shape[0]):
1303
+ raise ValueError(f'Invalid parameter img_x_bounds ({img_x_bounds})')
1304
+ #RV TODO make interactive upon request?
1305
+ return(img_x_bounds)
1306
+ if nxentry.instrument.source.attrs['station'] in ('id1a3', 'id3a'):
1307
+ pixel_size = nxentry.instrument.detector.x_pixel_size
1308
+ # Try to get a fit from the bright field
1309
+ tbf = np.asarray(reduced_data.data.bright_field)
1310
+ tbf_shape = tbf.shape
1311
+ x_sum = np.sum(tbf, 1)
1312
+ x_sum_min = x_sum.min()
1313
+ x_sum_max = x_sum.max()
1314
+ fit = Fit.fit_data(x_sum, 'rectangle', x=np.array(range(len(x_sum))), form='atan',
1315
+ guess=True)
1316
+ parameters = fit.best_values
1317
+ x_low_fit = parameters.get('center1', None)
1318
+ x_upp_fit = parameters.get('center2', None)
1319
+ sig_low = parameters.get('sigma1', None)
1320
+ sig_upp = parameters.get('sigma2', None)
1321
+ have_fit = fit.success and x_low_fit is not None and x_upp_fit is not None and \
1322
+ sig_low is not None and sig_upp is not None and \
1323
+ 0 <= x_low_fit < x_upp_fit <= x_sum.size and \
1324
+ (sig_low+sig_upp)/(x_upp_fit-x_low_fit) < 0.1
1325
+ if have_fit:
1326
+ # Set a 5% margin on each side
1327
+ margin = 0.05*(x_upp_fit-x_low_fit)
1328
+ x_low_fit = max(0, x_low_fit-margin)
1329
+ x_upp_fit = min(tbf_shape[0], x_upp_fit+margin)
1330
+ if num_tomo_stacks == 1:
1331
+ if have_fit:
1332
+ # Set the default range to enclose the full fitted window
1333
+ x_low = int(x_low_fit)
1334
+ x_upp = int(x_upp_fit)
1335
+ else:
1336
+ # Center a default range of 1 mm (RV: can we get this from the slits?)
1337
+ num_x_min = int((1.0-0.5*pixel_size)/pixel_size)
1338
+ x_low = int(0.5*(tbf_shape[0]-num_x_min))
1339
+ x_upp = x_low+num_x_min
1340
+ else:
1341
+ # Get the default range from the reference heights
1342
+ delta_z = vertical_shifts[1]-vertical_shifts[0]
1343
+ for i in range(2, num_tomo_stacks):
1344
+ delta_z = min(delta_z, vertical_shifts[i]-vertical_shifts[i-1])
1345
+ self.logger.debug(f'delta_z = {delta_z}')
1346
+ num_x_min = int((delta_z-0.5*pixel_size)/pixel_size)
1347
+ self.logger.debug(f'num_x_min = {num_x_min}')
1348
+ if num_x_min > tbf_shape[0]:
1349
+ self.logger.warning('Image bounds and pixel size prevent seamless stacking')
1350
+ if have_fit:
1351
+ # Center the default range relative to the fitted window
1352
+ x_low = int(0.5*(x_low_fit+x_upp_fit-num_x_min))
1353
+ x_upp = x_low+num_x_min
1354
+ else:
1355
+ # Center the default range
1356
+ x_low = int(0.5*(tbf_shape[0]-num_x_min))
1357
+ x_upp = x_low+num_x_min
1358
+ if self.galaxy_flag:
1359
+ img_x_bounds = (x_low, x_upp)
1360
+ else:
1361
+ tmp = np.copy(tbf)
1362
+ tmp_max = tmp.max()
1363
+ tmp[x_low,:] = tmp_max
1364
+ tmp[x_upp-1,:] = tmp_max
1365
+ quick_imshow(tmp, title='bright field')
1366
+ tmp = np.copy(first_image)
1367
+ tmp_max = tmp.max()
1368
+ tmp[x_low,:] = tmp_max
1369
+ tmp[x_upp-1,:] = tmp_max
1370
+ quick_imshow(tmp, title=title)
1371
+ del tmp
1372
+ quick_plot((range(x_sum.size), x_sum),
1373
+ ([x_low, x_low], [x_sum_min, x_sum_max], 'r-'),
1374
+ ([x_upp, x_upp], [x_sum_min, x_sum_max], 'r-'),
1375
+ title='sum over theta and y')
1376
+ print(f'lower bound = {x_low} (inclusive)')
1377
+ print(f'upper bound = {x_upp} (exclusive)]')
1378
+ accept = input_yesno('Accept these bounds (y/n)?', 'y')
1379
+ clear_imshow('bright field')
1380
+ clear_imshow(title)
1381
+ clear_plot('sum over theta and y')
1382
+ if accept:
1383
+ img_x_bounds = (x_low, x_upp)
1384
+ else:
1385
+ while True:
1386
+ mask, img_x_bounds = draw_mask_1d(x_sum, title='select x data range',
1387
+ legend='sum over theta and y')
1388
+ if len(img_x_bounds) == 1:
1389
+ break
1390
+ else:
1391
+ print(f'Choose a single connected data range')
1392
+ img_x_bounds = tuple(img_x_bounds[0])
1393
+ if (num_tomo_stacks > 1 and img_x_bounds[1]-img_x_bounds[0]+1 <
1394
+ int((delta_z-0.5*pixel_size)/pixel_size)):
1395
+ self.logger.warning('Image bounds and pixel size prevent seamless stacking')
1396
+ else:
1397
+ if num_tomo_stacks > 1:
1398
+ raise NotImplementedError('Selecting image bounds for multiple stacks on FMB')
1399
+ # For FMB: use the first tomography image to select range
1400
+ # RV: revisit if they do tomography with multiple stacks
1401
+ x_sum = np.sum(first_image, 1)
1402
+ x_sum_min = x_sum.min()
1403
+ x_sum_max = x_sum.max()
1404
+ if self.galaxy_flag:
1405
+ if img_x_bounds is None:
1406
+ img_x_bounds = (0, first_image.shape[0])
1407
+ else:
1408
+ print('Select vertical data reduction range from first tomography image')
1409
+ img_x_bounds = select_image_bounds(first_image, 0, title=title)
1410
+ if img_x_bounds is None:
1411
+ raise ValueError('Unable to select image bounds')
1412
+
1413
+ # Plot results
1414
+ if self.save_figs:
1415
+ if self.galaxy_flag:
1416
+ path = 'tomo_reduce_plots'
1417
+ else:
1418
+ path = self.output_folder
1419
+ x_low = img_x_bounds[0]
1420
+ x_upp = img_x_bounds[1]
1421
+ tmp = np.copy(first_image)
1422
+ tmp_max = tmp.max()
1423
+ tmp[x_low,:] = tmp_max
1424
+ tmp[x_upp-1,:] = tmp_max
1425
+ quick_imshow(tmp, title=title, path=path, save_fig=True, save_only=True)
1426
+ quick_plot((range(x_sum.size), x_sum),
1427
+ ([x_low, x_low], [x_sum_min, x_sum_max], 'r-'),
1428
+ ([x_upp, x_upp], [x_sum_min, x_sum_max], 'r-'),
1429
+ title='sum over theta and y', path=path, save_fig=True, save_only=True)
1430
+ del tmp
1431
+
1432
+ return(img_x_bounds)
1433
+
1434
+ def _set_zoom_or_skip(self):
1435
+ """Set zoom and/or theta skip to reduce memory the requirement for the analysis.
1436
+ """
1437
+ # if input_yesno('\nDo you want to zoom in to reduce memory requirement (y/n)?', 'n'):
1438
+ # zoom_perc = input_int(' Enter zoom percentage', ge=1, le=100)
1439
+ # else:
1440
+ # zoom_perc = None
1441
+ zoom_perc = None
1442
+ # if input_yesno('Do you want to skip thetas to reduce memory requirement (y/n)?', 'n'):
1443
+ # num_theta_skip = input_int(' Enter the number skip theta interval', ge=0,
1444
+ # lt=num_theta)
1445
+ # else:
1446
+ # num_theta_skip = None
1447
+ num_theta_skip = None
1448
+ self.logger.debug(f'zoom_perc = {zoom_perc}')
1449
+ self.logger.debug(f'num_theta_skip = {num_theta_skip}')
1450
+
1451
+ return(zoom_perc, num_theta_skip)
1452
+
1453
+ def _gen_tomo(self, nxentry, reduced_data):
1454
+ """Generate tomography fields.
1455
+ """
1456
+ import numexpr as ne
1457
+ import scipy.ndimage as spi
1458
+
1459
+ from CHAP.common.models.map import get_scanparser, import_scanparser
1460
+
1461
+ # Get full bright field
1462
+ tbf = np.asarray(reduced_data.data.bright_field)
1463
+ tbf_shape = tbf.shape
1464
+
1465
+ # Get image bounds
1466
+ img_x_bounds = tuple(reduced_data.get('img_x_bounds', (0, tbf_shape[0])))
1467
+ img_y_bounds = tuple(reduced_data.get('img_y_bounds', (0, tbf_shape[1])))
1468
+
1469
+ # Get resized dark field
1470
+ # if 'dark_field' in data:
1471
+ # tbf = np.asarray(reduced_data.data.dark_field[
1472
+ # img_x_bounds[0]:img_x_bounds[1],img_y_bounds[0]:img_y_bounds[1]])
1473
+ # else:
1474
+ # self.logger.warning('Dark field unavailable')
1475
+ # tdf = None
1476
+ tdf = None
1477
+
1478
+ # Resize bright field
1479
+ if img_x_bounds != (0, tbf.shape[0]) or img_y_bounds != (0, tbf.shape[1]):
1480
+ tbf = tbf[img_x_bounds[0]:img_x_bounds[1],img_y_bounds[0]:img_y_bounds[1]]
1481
+
1482
+ # Get the tomography images
1483
+ image_key = nxentry.instrument.detector.get('image_key', None)
1484
+ if image_key and 'data' in nxentry.instrument.detector:
1485
+ field_indices_all = [index for index, key in enumerate(image_key) if key == 0]
1486
+ z_translation_all = nxentry.sample.z_translation[field_indices_all]
1487
+ z_translation_levels = sorted(list(set(z_translation_all)))
1488
+ num_tomo_stacks = len(z_translation_levels)
1489
+ tomo_stacks = num_tomo_stacks*[np.array([])]
1490
+ horizontal_shifts = []
1491
+ vertical_shifts = []
1492
+ thetas = None
1493
+ tomo_stacks = []
1494
+ for i, z_translation in enumerate(z_translation_levels):
1495
+ field_indices = [field_indices_all[index]
1496
+ for index, z in enumerate(z_translation_all) if z == z_translation]
1497
+ horizontal_shift = list(set(nxentry.sample.x_translation[field_indices]))
1498
+ assert(len(horizontal_shift) == 1)
1499
+ horizontal_shifts += horizontal_shift
1500
+ vertical_shift = list(set(nxentry.sample.z_translation[field_indices]))
1501
+ assert(len(vertical_shift) == 1)
1502
+ vertical_shifts += vertical_shift
1503
+ sequence_numbers = nxentry.instrument.detector.sequence_number[field_indices]
1504
+ if thetas is None:
1505
+ thetas = np.asarray(nxentry.sample.rotation_angle[field_indices]) \
1506
+ [sequence_numbers]
1507
+ else:
1508
+ assert(all(thetas[i] == nxentry.sample.rotation_angle[field_indices[index]]
1509
+ for i, index in enumerate(sequence_numbers)))
1510
+ assert(list(set(sequence_numbers)) == [i for i in range(len(sequence_numbers))])
1511
+ if list(sequence_numbers) == [i for i in range(len(sequence_numbers))]:
1512
+ tomo_stack = np.asarray(nxentry.instrument.detector.data[field_indices])
1513
+ else:
1514
+ raise ValueError('Unable to load the tomography images')
1515
+ tomo_stacks.append(tomo_stack)
1516
+ else:
1517
+ import_scanparser(nxentry.instrument.source.attrs['station'],
1518
+ nxentry.instrument.source.attrs['experiment_type'])
1519
+ tomo_field_scans = nxentry.spec_scans.tomo_fields
1520
+ num_tomo_stacks = len(tomo_field_scans.keys())
1521
+ center_stack_index = int(num_tomo_stacks/2)
1522
+ detector_prefix = str(nxentry.instrument.detector.local_name)
1523
+ thetas = None
1524
+ tomo_stacks = []
1525
+ horizontal_shifts = []
1526
+ vertical_shifts = []
1527
+ for nxsubentry_name, nxsubentry in tomo_field_scans.items():
1528
+ scan_number = int(nxsubentry_name.split('_')[-1])
1529
+ scanparser = get_scanparser(tomo_field_scans.attrs['spec_file'], scan_number)
1530
+ image_offset = int(nxsubentry.instrument.detector.frame_start_number)
1531
+ if thetas is None:
1532
+ thetas = np.asarray(nxsubentry.sample.rotation_angle)
1533
+ num_image = len(thetas)
1534
+ tomo_stacks.append(scanparser.get_detector_data(detector_prefix,
1535
+ (image_offset, image_offset+num_image)))
1536
+ horizontal_shifts.append(nxsubentry.sample.x_translation)
1537
+ vertical_shifts.append(nxsubentry.sample.z_translation)
1538
+
1539
+ reduced_tomo_stacks = []
1540
+ if self.galaxy_flag:
1541
+ path = 'tomo_reduce_plots'
1542
+ else:
1543
+ path = self.output_folder
1544
+ for i, tomo_stack in enumerate(tomo_stacks):
1545
+ # Resize the tomography images
1546
+ # Right now the range is the same for each set in the image stack.
1547
+ if img_x_bounds != (0, tbf.shape[0]) or img_y_bounds != (0, tbf.shape[1]):
1548
+ t0 = time()
1549
+ tomo_stack = tomo_stack[:,img_x_bounds[0]:img_x_bounds[1],
1550
+ img_y_bounds[0]:img_y_bounds[1]].astype('float64')
1551
+ self.logger.debug(f'Resizing tomography images took {time()-t0:.2f} seconds')
1552
+
1553
+ # Subtract dark field
1554
+ if tdf is not None:
1555
+ t0 = time()
1556
+ with set_numexpr_threads(self.num_core):
1557
+ ne.evaluate('tomo_stack-tdf', out=tomo_stack)
1558
+ self.logger.debug(f'Subtracting dark field took {time()-t0:.2f} seconds')
1559
+
1560
+ # Normalize
1561
+ t0 = time()
1562
+ with set_numexpr_threads(self.num_core):
1563
+ ne.evaluate('tomo_stack/tbf', out=tomo_stack, truediv=True)
1564
+ self.logger.debug(f'Normalizing took {time()-t0:.2f} seconds')
1565
+
1566
+ # Remove non-positive values and linearize data
1567
+ t0 = time()
1568
+ cutoff = 1.e-6
1569
+ with set_numexpr_threads(self.num_core):
1570
+ ne.evaluate('where(tomo_stack<cutoff, cutoff, tomo_stack)', out=tomo_stack)
1571
+ with set_numexpr_threads(self.num_core):
1572
+ ne.evaluate('-log(tomo_stack)', out=tomo_stack)
1573
+ self.logger.debug('Removing non-positive values and linearizing data took '+
1574
+ f'{time()-t0:.2f} seconds')
1575
+
1576
+ # Get rid of nans/infs that may be introduced by normalization
1577
+ t0 = time()
1578
+ np.where(np.isfinite(tomo_stack), tomo_stack, 0.)
1579
+ self.logger.debug(f'Remove nans/infs took {time()-t0:.2f} seconds')
1580
+
1581
+ # Downsize tomography stack to smaller size
1582
+ # TODO use theta_skip as well
1583
+ tomo_stack = tomo_stack.astype('float32')
1584
+ if not self.test_mode:
1585
+ if len(tomo_stacks) == 1:
1586
+ title = f'red fullres theta {round(thetas[0], 2)+0}'
1587
+ else:
1588
+ title = f'red stack {i+1} fullres theta {round(thetas[0], 2)+0}'
1589
+ quick_imshow(tomo_stack[0,:,:], title=title, path=path, save_fig=self.save_figs,
1590
+ save_only=self.save_only, block=self.block)
1591
+ # if not self.block:
1592
+ # clear_imshow(title)
1593
+ if False and zoom_perc != 100:
1594
+ t0 = time()
1595
+ self.logger.debug(f'Zooming in ...')
1596
+ tomo_zoom_list = []
1597
+ for j in range(tomo_stack.shape[0]):
1598
+ tomo_zoom = spi.zoom(tomo_stack[j,:,:], 0.01*zoom_perc)
1599
+ tomo_zoom_list.append(tomo_zoom)
1600
+ tomo_stack = np.stack([tomo_zoom for tomo_zoom in tomo_zoom_list])
1601
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1602
+ self.logger.info(f'Zooming in took {time()-t0:.2f} seconds')
1603
+ del tomo_zoom_list
1604
+ if not self.test_mode:
1605
+ title = f'red stack {zoom_perc}p theta {round(thetas[0], 2)+0}'
1606
+ quick_imshow(tomo_stack[0,:,:], title=title, path=path, save_fig=self.save_figs,
1607
+ save_only=self.save_only, block=self.block)
1608
+ # if not self.block:
1609
+ # clear_imshow(title)
1610
+
1611
+ # Save test data to file
1612
+ if self.test_mode:
1613
+ # row_index = int(tomo_stack.shape[0]/2)
1614
+ # np.savetxt(f'{self.output_folder}/red_stack_{i+1}.txt', tomo_stack[row_index,:,:],
1615
+ # fmt='%.6e')
1616
+ row_index = int(tomo_stack.shape[1]/2)
1617
+ np.savetxt(f'{self.output_folder}/red_stack_{i+1}.txt', tomo_stack[:,row_index,:],
1618
+ fmt='%.6e')
1619
+
1620
+ # Combine resized stacks
1621
+ reduced_tomo_stacks.append(tomo_stack)
1622
+
1623
+ # Add tomo field info to reduced data NXprocess
1624
+ reduced_data['rotation_angle'] = thetas
1625
+ reduced_data['x_translation'] = np.asarray(horizontal_shifts)
1626
+ reduced_data['z_translation'] = np.asarray(vertical_shifts)
1627
+ reduced_data.data['tomo_fields'] = np.asarray(reduced_tomo_stacks)
1628
+
1629
+ if tdf is not None:
1630
+ del tdf
1631
+ del tbf
1632
+
1633
+ return(reduced_data)
1634
+
1635
+ def _find_center_one_plane(self, sinogram, row, thetas, eff_pixel_size, cross_sectional_dim,
1636
+ path=None, tol=0.1, num_core=1):
1637
+ """Find center for a single tomography plane.
1638
+ """
1639
+ import tomopy
1640
+
1641
+ # Try automatic center finding routines for initial value
1642
+ # sinogram index order: theta,column
1643
+ # need column,theta for iradon, so take transpose
1644
+ sinogram = np.asarray(sinogram)
1645
+ sinogram_T = sinogram.T
1646
+ center = sinogram.shape[1]/2
1647
+
1648
+ # Try using Nghia Vo’s method
1649
+ t0 = time()
1650
+ if num_core > num_core_tomopy_limit:
1651
+ self.logger.debug(f'Running find_center_vo on {num_core_tomopy_limit} cores ...')
1652
+ tomo_center = tomopy.find_center_vo(sinogram, ncore=num_core_tomopy_limit)
1653
+ else:
1654
+ self.logger.debug(f'Running find_center_vo on {num_core} cores ...')
1655
+ tomo_center = tomopy.find_center_vo(sinogram, ncore=num_core)
1656
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1657
+ self.logger.info(f'Finding the center using Nghia Vo’s method took {time()-t0:.2f} seconds')
1658
+ center_offset_vo = tomo_center-center
1659
+ self.logger.info(f'Center at row {row} using Nghia Vo’s method = {center_offset_vo:.2f}')
1660
+ t0 = time()
1661
+ self.logger.debug(f'Running _reconstruct_one_plane on {self.num_core} cores ...')
1662
+ recon_plane = self._reconstruct_one_plane(sinogram_T, tomo_center, thetas,
1663
+ eff_pixel_size, cross_sectional_dim, False, num_core)
1664
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1665
+ self.logger.info(f'Reconstructing row {row} took {time()-t0:.2f} seconds')
1666
+
1667
+ title = f'edges row{row} center offset{center_offset_vo:.2f} Vo'
1668
+ self._plot_edges_one_plane(recon_plane, title, path=path)
1669
+
1670
+ # Try using phase correlation method
1671
+ # if input_yesno('Try finding center using phase correlation (y/n)?', 'n'):
1672
+ # t0 = time()
1673
+ # self.logger.debug(f'Running find_center_pc ...')
1674
+ # tomo_center = tomopy.find_center_pc(sinogram, sinogram, tol=0.1, rotc_guess=tomo_center)
1675
+ # error = 1.
1676
+ # while error > tol:
1677
+ # prev = tomo_center
1678
+ # tomo_center = tomopy.find_center_pc(sinogram, sinogram, tol=tol,
1679
+ # rotc_guess=tomo_center)
1680
+ # error = np.abs(tomo_center-prev)
1681
+ # self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1682
+ # self.logger.info('Finding the center using the phase correlation method took '+
1683
+ # f'{time()-t0:.2f} seconds')
1684
+ # center_offset = tomo_center-center
1685
+ # print(f'Center at row {row} using phase correlation = {center_offset:.2f}')
1686
+ # t0 = time()
1687
+ # self.logger.debug(f'Running _reconstruct_one_plane on {self.num_core} cores ...')
1688
+ # recon_plane = self._reconstruct_one_plane(sinogram_T, tomo_center, thetas,
1689
+ # eff_pixel_size, cross_sectional_dim, False, num_core)
1690
+ # self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1691
+ # self.logger.info(f'Reconstructing row {row} took {time()-t0:.2f} seconds')
1692
+ #
1693
+ # title = f'edges row{row} center_offset{center_offset:.2f} PC'
1694
+ # self._plot_edges_one_plane(recon_plane, title, path=path)
1695
+
1696
+ # Select center location
1697
+ # if input_yesno('Accept a center location (y) or continue search (n)?', 'y'):
1698
+ if True:
1699
+ # center_offset = input_num(' Enter chosen center offset', ge=-center, le=center,
1700
+ # default=center_offset_vo)
1701
+ center_offset = center_offset_vo
1702
+ del sinogram_T
1703
+ del recon_plane
1704
+ return float(center_offset)
1705
+
1706
+ # perform center finding search
1707
+ while True:
1708
+ center_offset_low = input_int('\nEnter lower bound for center offset', ge=-center,
1709
+ le=center)
1710
+ center_offset_upp = input_int('Enter upper bound for center offset',
1711
+ ge=center_offset_low, le=center)
1712
+ if center_offset_upp == center_offset_low:
1713
+ center_offset_step = 1
1714
+ else:
1715
+ center_offset_step = input_int('Enter step size for center offset search', ge=1,
1716
+ le=center_offset_upp-center_offset_low)
1717
+ num_center_offset = 1+int((center_offset_upp-center_offset_low)/center_offset_step)
1718
+ center_offsets = np.linspace(center_offset_low, center_offset_upp, num_center_offset)
1719
+ for center_offset in center_offsets:
1720
+ if center_offset == center_offset_vo:
1721
+ continue
1722
+ t0 = time()
1723
+ self.logger.debug(f'Running _reconstruct_one_plane on {num_core} cores ...')
1724
+ recon_plane = self._reconstruct_one_plane(sinogram_T, center_offset+center, thetas,
1725
+ eff_pixel_size, cross_sectional_dim, False, num_core)
1726
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1727
+ self.logger.info(f'Reconstructing center_offset {center_offset} took '+
1728
+ f'{time()-t0:.2f} seconds')
1729
+ title = f'edges row{row} center_offset{center_offset:.2f}'
1730
+ self._plot_edges_one_plane(recon_plane, title, path=path)
1731
+ if input_int('\nContinue (0) or end the search (1)', ge=0, le=1):
1732
+ break
1733
+
1734
+ del sinogram_T
1735
+ del recon_plane
1736
+ center_offset = input_num(' Enter chosen center offset', ge=-center, le=center)
1737
+ return float(center_offset)
1738
+
1739
+ def _reconstruct_one_plane(self, tomo_plane_T, center, thetas, eff_pixel_size,
1740
+ cross_sectional_dim, plot_sinogram=True, num_core=1):
1741
+ """Invert the sinogram for a single tomography plane.
1742
+ """
1743
+ import scipy.ndimage as spi
1744
+ from skimage.transform import iradon
1745
+ import tomopy
1746
+
1747
+ # tomo_plane_T index order: column,theta
1748
+ assert(0 <= center < tomo_plane_T.shape[0])
1749
+ center_offset = center-tomo_plane_T.shape[0]/2
1750
+ two_offset = 2*int(np.round(center_offset))
1751
+ two_offset_abs = np.abs(two_offset)
1752
+ max_rad = int(0.55*(cross_sectional_dim/eff_pixel_size)) # 10% slack to avoid edge effects
1753
+ if max_rad > 0.5*tomo_plane_T.shape[0]:
1754
+ max_rad = 0.5*tomo_plane_T.shape[0]
1755
+ dist_from_edge = max(1, int(np.floor((tomo_plane_T.shape[0]-two_offset_abs)/2.)-max_rad))
1756
+ if two_offset >= 0:
1757
+ self.logger.debug(f'sinogram range = [{two_offset+dist_from_edge}, {-dist_from_edge}]')
1758
+ sinogram = tomo_plane_T[two_offset+dist_from_edge:-dist_from_edge,:]
1759
+ else:
1760
+ self.logger.debug(f'sinogram range = [{dist_from_edge}, {two_offset-dist_from_edge}]')
1761
+ sinogram = tomo_plane_T[dist_from_edge:two_offset-dist_from_edge,:]
1762
+ if not self.galaxy_flag and plot_sinogram:
1763
+ quick_imshow(sinogram.T, f'sinogram center offset{center_offset:.2f}', aspect='auto',
1764
+ path=self.output_folder, save_fig=self.save_figs, save_only=self.save_only,
1765
+ block=self.block)
1766
+
1767
+ # Inverting sinogram
1768
+ t0 = time()
1769
+ recon_sinogram = iradon(sinogram, theta=thetas, circle=True)
1770
+ self.logger.debug(f'Inverting sinogram took {time()-t0:.2f} seconds')
1771
+ del sinogram
1772
+
1773
+ # Performing Gaussian filtering and removing ring artifacts
1774
+ recon_parameters = None#self.config.get('recon_parameters')
1775
+ if recon_parameters is None:
1776
+ sigma = 1.0
1777
+ ring_width = 15
1778
+ else:
1779
+ sigma = recon_parameters.get('gaussian_sigma', 1.0)
1780
+ if not is_num(sigma, ge=0.0):
1781
+ self.logger.warning(f'Invalid gaussian_sigma ({sigma}) in _reconstruct_one_plane, '+
1782
+ 'set to a default value of 1.0')
1783
+ sigma = 1.0
1784
+ ring_width = recon_parameters.get('ring_width', 15)
1785
+ if not isinstance(ring_width, int) or ring_width < 0:
1786
+ self.logger.warning(f'Invalid ring_width ({ring_width}) in '+
1787
+ '_reconstruct_one_plane, set to a default value of 15')
1788
+ ring_width = 15
1789
+ t0 = time()
1790
+ recon_sinogram = spi.gaussian_filter(recon_sinogram, sigma, mode='nearest')
1791
+ recon_clean = np.expand_dims(recon_sinogram, axis=0)
1792
+ del recon_sinogram
1793
+ recon_clean = tomopy.misc.corr.remove_ring(recon_clean, rwidth=ring_width, ncore=num_core)
1794
+ self.logger.debug(f'Filtering and removing ring artifacts took {time()-t0:.2f} seconds')
1795
+
1796
+ return recon_clean
1797
+
1798
+ def _plot_edges_one_plane(self, recon_plane, title, path=None):
1799
+ from skimage.restoration import denoise_tv_chambolle
1800
+
1801
+ vis_parameters = None#self.config.get('vis_parameters')
1802
+ if vis_parameters is None:
1803
+ weight = 0.1
1804
+ else:
1805
+ weight = vis_parameters.get('denoise_weight', 0.1)
1806
+ if not is_num(weight, ge=0.0):
1807
+ self.logger.warning(f'Invalid weight ({weight}) in _plot_edges_one_plane, '+
1808
+ 'set to a default value of 0.1')
1809
+ weight = 0.1
1810
+ edges = denoise_tv_chambolle(recon_plane, weight=weight)
1811
+ vmax = np.max(edges[0,:,:])
1812
+ vmin = -vmax
1813
+ if path is None:
1814
+ path = self.output_folder
1815
+ quick_imshow(edges[0,:,:], f'{title} coolwarm', path=path, cmap='coolwarm',
1816
+ save_fig=self.save_figs, save_only=self.save_only, block=self.block)
1817
+ quick_imshow(edges[0,:,:], f'{title} gray', path=path, cmap='gray', vmin=vmin, vmax=vmax,
1818
+ save_fig=self.save_figs, save_only=self.save_only, block=self.block)
1819
+ del edges
1820
+
1821
+ def _reconstruct_one_tomo_stack(self, tomo_stack, thetas, center_offsets=[], num_core=1,
1822
+ algorithm='gridrec'):
1823
+ """Reconstruct a single tomography stack.
1824
+ """
1825
+ import tomopy
1826
+
1827
+ # tomo_stack order: row,theta,column
1828
+ # input thetas must be in degrees
1829
+ # centers_offset: tomography axis shift in pixels relative to column center
1830
+ # RV should we remove stripes?
1831
+ # https://tomopy.readthedocs.io/en/latest/api/tomopy.prep.stripe.html
1832
+ # RV should we remove rings?
1833
+ # https://tomopy.readthedocs.io/en/latest/api/tomopy.misc.corr.html
1834
+ # RV: Add an option to do (extra) secondary iterations later or to do some sort of convergence test?
1835
+ if not len(center_offsets):
1836
+ centers = np.zeros((tomo_stack.shape[0]))
1837
+ elif len(center_offsets) == 2:
1838
+ centers = np.linspace(center_offsets[0], center_offsets[1], tomo_stack.shape[0])
1839
+ else:
1840
+ if center_offsets.size != tomo_stack.shape[0]:
1841
+ raise ValueError('center_offsets dimension mismatch in reconstruct_one_tomo_stack')
1842
+ centers = center_offsets
1843
+ centers += tomo_stack.shape[2]/2
1844
+
1845
+ # Get reconstruction parameters
1846
+ recon_parameters = None#self.config.get('recon_parameters')
1847
+ if recon_parameters is None:
1848
+ sigma = 2.0
1849
+ secondary_iters = 0
1850
+ ring_width = 15
1851
+ else:
1852
+ sigma = recon_parameters.get('stripe_fw_sigma', 2.0)
1853
+ if not is_num(sigma, ge=0):
1854
+ self.logger.warning(f'Invalid stripe_fw_sigma ({sigma}) in '+
1855
+ '_reconstruct_one_tomo_stack, set to a default value of 2.0')
1856
+ ring_width = 15
1857
+ secondary_iters = recon_parameters.get('secondary_iters', 0)
1858
+ if not isinstance(secondary_iters, int) or secondary_iters < 0:
1859
+ self.logger.warning(f'Invalid secondary_iters ({secondary_iters}) in '+
1860
+ '_reconstruct_one_tomo_stack, set to a default value of 0 (skip them)')
1861
+ ring_width = 0
1862
+ ring_width = recon_parameters.get('ring_width', 15)
1863
+ if not isinstance(ring_width, int) or ring_width < 0:
1864
+ self.logger.warning(f'Invalid ring_width ({ring_width}) in '+
1865
+ '_reconstruct_one_plane, set to a default value of 15')
1866
+ ring_width = 15
1867
+
1868
+ # Remove horizontal stripe
1869
+ t0 = time()
1870
+ if num_core > num_core_tomopy_limit:
1871
+ self.logger.debug('Running remove_stripe_fw on {num_core_tomopy_limit} cores ...')
1872
+ tomo_stack = tomopy.prep.stripe.remove_stripe_fw(tomo_stack, sigma=sigma,
1873
+ ncore=num_core_tomopy_limit)
1874
+ else:
1875
+ self.logger.debug(f'Running remove_stripe_fw on {num_core} cores ...')
1876
+ tomo_stack = tomopy.prep.stripe.remove_stripe_fw(tomo_stack, sigma=sigma,
1877
+ ncore=num_core)
1878
+ self.logger.debug(f'... tomopy.prep.stripe.remove_stripe_fw took {time()-t0:.2f} seconds')
1879
+
1880
+ # Perform initial image reconstruction
1881
+ self.logger.debug('Performing initial image reconstruction')
1882
+ t0 = time()
1883
+ self.logger.debug(f'Running recon on {num_core} cores ...')
1884
+ tomo_recon_stack = tomopy.recon(tomo_stack, np.radians(thetas), centers,
1885
+ sinogram_order=True, algorithm=algorithm, ncore=num_core)
1886
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1887
+ self.logger.info(f'Performing initial image reconstruction took {time()-t0:.2f} seconds')
1888
+
1889
+ # Run optional secondary iterations
1890
+ if secondary_iters > 0:
1891
+ self.logger.debug(f'Running {secondary_iters} secondary iterations')
1892
+ #options = {'method':'SIRT_CUDA', 'proj_type':'cuda', 'num_iter':secondary_iters}
1893
+ #RV: doesn't work for me:
1894
+ #"Error: CUDA error 803: system has unsupported display driver/cuda driver combination."
1895
+ #options = {'method':'SIRT', 'proj_type':'linear', 'MinConstraint': 0, 'num_iter':secondary_iters}
1896
+ #SIRT did not finish while running overnight
1897
+ #options = {'method':'SART', 'proj_type':'linear', 'num_iter':secondary_iters}
1898
+ options = {'method':'SART', 'proj_type':'linear', 'MinConstraint': 0,
1899
+ 'num_iter':secondary_iters}
1900
+ t0 = time()
1901
+ self.logger.debug(f'Running recon on {num_core} cores ...')
1902
+ tomo_recon_stack = tomopy.recon(tomo_stack, np.radians(thetas), centers,
1903
+ init_recon=tomo_recon_stack, options=options, sinogram_order=True,
1904
+ algorithm=tomopy.astra, ncore=num_core)
1905
+ self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1906
+ self.logger.info(f'Performing secondary iterations took {time()-t0:.2f} seconds')
1907
+
1908
+ # Remove ring artifacts
1909
+ t0 = time()
1910
+ tomopy.misc.corr.remove_ring(tomo_recon_stack, rwidth=ring_width, out=tomo_recon_stack,
1911
+ ncore=num_core)
1912
+ self.logger.debug(f'Removing ring artifacts took {time()-t0:.2f} seconds')
1913
+
1914
+ return tomo_recon_stack
1915
+
1916
+ def _resize_reconstructed_data(self, data, x_bounds=None, y_bounds=None, z_bounds=None,
1917
+ z_only=False):
1918
+ """Resize the reconstructed tomography data.
1919
+ """
1920
+ # Data order: row(z),x,y or stack,row(z),x,y
1921
+ if isinstance(data, list):
1922
+ for stack in data:
1923
+ assert(stack.ndim == 3)
1924
+ num_tomo_stacks = len(data)
1925
+ tomo_recon_stacks = data
1926
+ else:
1927
+ assert(data.ndim == 3)
1928
+ num_tomo_stacks = 1
1929
+ tomo_recon_stacks = [data]
1930
+
1931
+ if x_bounds == (-1, -1):
1932
+ x_bounds = None
1933
+ elif not z_only and x_bounds is None:
1934
+ # Selecting x bounds (in yz-plane)
1935
+ tomosum = 0
1936
+ [tomosum := tomosum+np.sum(tomo_recon_stacks[i], axis=(0,2))
1937
+ for i in range(num_tomo_stacks)]
1938
+ select_x_bounds = input_yesno('\nDo you want to change the image x-bounds (y/n)?', 'y')
1939
+ if not select_x_bounds:
1940
+ x_bounds = None
1941
+ else:
1942
+ accept = False
1943
+ index_ranges = None
1944
+ while not accept:
1945
+ mask, x_bounds = draw_mask_1d(tomosum, current_index_ranges=index_ranges,
1946
+ title='select x data range', legend='recon stack sum yz')
1947
+ while len(x_bounds) != 1:
1948
+ print('Please select exactly one continuous range')
1949
+ mask, x_bounds = draw_mask_1d(tomosum, title='select x data range',
1950
+ legend='recon stack sum yz')
1951
+ x_bounds = x_bounds[0]
1952
+ accept = True
1953
+ self.logger.debug(f'x_bounds = {x_bounds}')
1954
+
1955
+ if y_bounds == (-1, -1):
1956
+ y_bounds = None
1957
+ elif not z_only and y_bounds is None:
1958
+ # Selecting y bounds (in xz-plane)
1959
+ tomosum = 0
1960
+ [tomosum := tomosum+np.sum(tomo_recon_stacks[i], axis=(0,1))
1961
+ for i in range(num_tomo_stacks)]
1962
+ select_y_bounds = input_yesno('\nDo you want to change the image y-bounds (y/n)?', 'y')
1963
+ if not select_y_bounds:
1964
+ y_bounds = None
1965
+ else:
1966
+ accept = False
1967
+ index_ranges = None
1968
+ while not accept:
1969
+ mask, y_bounds = draw_mask_1d(tomosum, current_index_ranges=index_ranges,
1970
+ title='select x data range', legend='recon stack sum xz')
1971
+ while len(y_bounds) != 1:
1972
+ print('Please select exactly one continuous range')
1973
+ mask, y_bounds = draw_mask_1d(tomosum, title='select x data range',
1974
+ legend='recon stack sum xz')
1975
+ y_bounds = y_bounds[0]
1976
+ accept = True
1977
+ self.logger.debug(f'y_bounds = {y_bounds}')
1978
+
1979
+ # Selecting z bounds (in xy-plane) (only valid for a single image stack)
1980
+ if z_bounds == (-1, -1):
1981
+ z_bounds = None
1982
+ elif z_bounds is None and num_tomo_stacks != 1:
1983
+ tomosum = 0
1984
+ [tomosum := tomosum+np.sum(tomo_recon_stacks[i], axis=(1,2))
1985
+ for i in range(num_tomo_stacks)]
1986
+ select_z_bounds = input_yesno('Do you want to change the image z-bounds (y/n)?', 'n')
1987
+ if not select_z_bounds:
1988
+ z_bounds = None
1989
+ else:
1990
+ accept = False
1991
+ index_ranges = None
1992
+ while not accept:
1993
+ mask, z_bounds = draw_mask_1d(tomosum, current_index_ranges=index_ranges,
1994
+ title='select x data range', legend='recon stack sum xy')
1995
+ while len(z_bounds) != 1:
1996
+ print('Please select exactly one continuous range')
1997
+ mask, z_bounds = draw_mask_1d(tomosum, title='select x data range',
1998
+ legend='recon stack sum xy')
1999
+ z_bounds = z_bounds[0]
2000
+ accept = True
2001
+ self.logger.debug(f'z_bounds = {z_bounds}')
2002
+
2003
+ return(x_bounds, y_bounds, z_bounds)
2004
+
2005
+
2006
+ if __name__ == '__main__':
2007
+ from CHAP.processor import main
2008
+ main()
2009
+