ChessAnalysisPipeline 0.0.5__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

Files changed (41) hide show
  1. CHAP/TaskManager.py +214 -0
  2. CHAP/common/models/integration.py +392 -249
  3. CHAP/common/models/map.py +350 -198
  4. CHAP/common/processor.py +227 -189
  5. CHAP/common/reader.py +52 -39
  6. CHAP/common/utils/fit.py +1197 -991
  7. CHAP/common/utils/general.py +629 -372
  8. CHAP/common/utils/material.py +158 -121
  9. CHAP/common/utils/scanparsers.py +735 -339
  10. CHAP/common/writer.py +31 -25
  11. CHAP/edd/models.py +63 -49
  12. CHAP/edd/processor.py +130 -109
  13. CHAP/edd/reader.py +1 -1
  14. CHAP/edd/writer.py +1 -1
  15. CHAP/inference/processor.py +35 -28
  16. CHAP/inference/reader.py +1 -1
  17. CHAP/inference/writer.py +1 -1
  18. CHAP/pipeline.py +14 -28
  19. CHAP/processor.py +44 -75
  20. CHAP/reader.py +49 -40
  21. CHAP/runner.py +73 -32
  22. CHAP/saxswaxs/processor.py +1 -1
  23. CHAP/saxswaxs/reader.py +1 -1
  24. CHAP/saxswaxs/writer.py +1 -1
  25. CHAP/server.py +130 -0
  26. CHAP/sin2psi/processor.py +1 -1
  27. CHAP/sin2psi/reader.py +1 -1
  28. CHAP/sin2psi/writer.py +1 -1
  29. CHAP/tomo/__init__.py +1 -4
  30. CHAP/tomo/models.py +53 -31
  31. CHAP/tomo/processor.py +1326 -900
  32. CHAP/tomo/reader.py +4 -2
  33. CHAP/tomo/writer.py +4 -2
  34. CHAP/writer.py +47 -41
  35. {ChessAnalysisPipeline-0.0.5.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/METADATA +1 -1
  36. ChessAnalysisPipeline-0.0.6.dist-info/RECORD +52 -0
  37. ChessAnalysisPipeline-0.0.5.dist-info/RECORD +0 -50
  38. {ChessAnalysisPipeline-0.0.5.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/LICENSE +0 -0
  39. {ChessAnalysisPipeline-0.0.5.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/WHEEL +0 -0
  40. {ChessAnalysisPipeline-0.0.5.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/entry_points.txt +0 -0
  41. {ChessAnalysisPipeline-0.0.5.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/top_level.txt +0 -0
CHAP/tomo/processor.py CHANGED
@@ -1,48 +1,94 @@
1
1
  #!/usr/bin/env python
2
2
  #-*- coding: utf-8 -*-
3
3
  #pylint: disable=
4
- '''
4
+ """
5
5
  File : processor.py
6
6
  Author : Rolf Verberg <rolfverberg AT gmail dot com>
7
7
  Description: Module for Processors used only by tomography experiments
8
- '''
8
+ """
9
9
 
10
- # system modules
10
+ # System modules
11
11
  from os import mkdir
12
12
  from os import path as os_path
13
+ from sys import exit as sys_exit
13
14
  from time import time
14
15
 
15
- # third party modules
16
- from nexusformat.nexus import NXobject
16
+ # Third party modules
17
17
  import numpy as np
18
18
 
19
- # local modules
20
- from CHAP.common.utils.general import clear_plot, clear_imshow, quick_plot, quick_imshow
19
+ # Local modules
20
+ from CHAP.common.utils.general import (
21
+ is_num,
22
+ input_int,
23
+ input_yesno,
24
+ select_image_bounds,
25
+ select_one_image_bound,
26
+ draw_mask_1d,
27
+ clear_plot,
28
+ clear_imshow,
29
+ quick_plot,
30
+ quick_imshow,
31
+ )
32
+ # input_num,
33
+ from CHAP.common.utils.fit import Fit
21
34
  from CHAP.processor import Processor
35
+ from CHAP.reader import main
22
36
 
23
- num_core_tomopy_limit = 24
37
+ NUM_CORE_TOMOPY_LIMIT = 24
24
38
 
25
39
 
26
40
  class TomoDataProcessor(Processor):
27
- '''Class representing the processes to reconstruct a set of Tomographic images returning
28
- either a dictionary or a `nexusformat.nexus.NXroot` object containing the (meta) data after
29
- processing each individual step.
30
- '''
31
-
32
- def _process(self, data):
33
- '''Process the output of a `Reader` that contains a map or a `nexusformat.nexus.NXroot`
34
- object and one that contains the step specific instructions and return either a dictionary
35
- or a `nexusformat.nexus.NXroot` returning the processed result.
36
-
37
- :param data: Result of `Reader.read` where at least one item is of type
38
- `nexusformat.nexus.NXroot` or has the value `'MapConfig'` for the `'schema'` key,
39
- and at least one item has the value `'TomoReduceConfig'` for the `'schema'` key.
41
+ """
42
+ Class representing the processes to reconstruct a set of Tomographic
43
+ images returning either a dictionary or a `nexusformat.nexus.NXroot`
44
+ object containing the (meta) data after processing each individual
45
+ step.
46
+ """
47
+
48
+ def _process(
49
+ self, data, interactive=False, reduce_data=False,
50
+ find_center=False, reconstruct_data=False, combine_data=False,
51
+ output_folder='.', save_figs=None, **kwargs):
52
+ """
53
+ Process the output of a `Reader` that contains a map or a
54
+ `nexusformat.nexus.NXroot` object and one that contains the step
55
+ specific instructions and return either a dictionary or a
56
+ `nexusformat.nexus.NXroot` returning the processed result.
57
+
58
+ :param data: Result of `Reader.read`
40
59
  :type data: list[dict[str,object]]
60
+ :param interactive: Allows interactive actions
61
+ :type bool, optional [False]
62
+ :param reduce_data: Generate reduced tomography images
63
+ :type bool, optional [False]
64
+ :param find_center: Find the calibrated center axis info
65
+ :type bool, optional [False]
66
+ :param reconstruct_data: Reconstruct the tomography data
67
+ :type bool, optional [False]
68
+ :param combine_data: Combine the reconstructed tomography stacks
69
+ :type bool, optional [False]
70
+ :param output_folder: Output folder name
71
+ :type str, optional ['.']
72
+ :param save_figs: Display and/or save figures to file
73
+ :type str, optional
41
74
  :return: processed (meta)data
42
75
  :rtype: dict or nexusformat.nexus.NXroot
43
- '''
76
+ """
44
77
 
45
- tomo = Tomo(save_figs='only')
78
+ if not isinstance(reduce_data, bool):
79
+ raise ValueError(f'Invalid parameter reduce_data ({reduce_data})')
80
+ if not isinstance(find_center, bool):
81
+ raise ValueError(f'Invalid parameter find_center ({find_center})')
82
+ if not isinstance(reconstruct_data, bool):
83
+ raise ValueError(
84
+ f'Invalid parameter reconstruct_data ({reconstruct_data})')
85
+ if not isinstance(combine_data, bool):
86
+ raise ValueError(
87
+ f'Invalid parameter combine_data ({combine_data})')
88
+
89
+ tomo = Tomo(
90
+ interactive=interactive, output_folder=output_folder,
91
+ save_figs=save_figs)
46
92
  nxroot = None
47
93
  center_config = None
48
94
 
@@ -57,50 +103,90 @@ class TomoDataProcessor(Processor):
57
103
  nxroot = configs.pop('nxroot', None)
58
104
 
59
105
  # Reduce tomography images
60
- if 'reduce' in configs:
61
- tool_config = configs.pop('reduce')
106
+ if reduce_data or 'reduce' in configs:
107
+ if 'reduce' in configs:
108
+ tool_config = configs.pop('reduce')
109
+ img_x_bounds = tool_config.img_x_bounds
110
+ else:
111
+ img_x_bounds = None
112
+ if nxroot is None:
113
+ raise RuntimeError(
114
+ 'Unable to reduce the data without providing a '
115
+ + 'reduced_data config file')
62
116
  if nxroot is None:
63
117
  map_config = configs.pop('map')
64
118
  nxroot = self.get_nxroot(map_config, tool_config)
65
- nxroot = tomo.gen_reduced_data(nxroot, img_x_bounds=tool_config.img_x_bounds)
119
+ nxroot = tomo.gen_reduced_data(nxroot, img_x_bounds=img_x_bounds)
66
120
 
67
121
  # Find rotation axis centers for the tomography stacks
68
- # Pass tool_config directly to tomo.find_centers?
69
- if 'find_center' in configs:
70
- tool_config = configs.pop('find_center')
71
- center_rows = [tool_config.lower_row, tool_config.upper_row]
72
- if (None in center_rows or tool_config.lower_center_offset is None or
73
- tool_config.upper_center_offset is None):
74
- center_config = tomo.find_centers(nxroot, center_rows=center_rows,
75
- center_stack_index=tool_config.center_stack_index)
122
+ # RV pass tool_config directly to tomo.find_centers?
123
+ if find_center or 'find_center' in configs:
124
+ if 'find_center' in configs:
125
+ tool_config = configs.pop('find_center')
126
+ center_rows = (tool_config.lower_row, tool_config.upper_row)
127
+ lower_center_offset = tool_config.lower_center_offset
128
+ upper_center_offset = tool_config.upper_center_offset
129
+ center_stack_index = tool_config.center_stack_index
130
+ else:
131
+ center_rows = (None, None)
132
+ lower_center_offset = None
133
+ upper_center_offset = None
134
+ center_stack_index = None
135
+ if (None in center_rows or lower_center_offset is None
136
+ or upper_center_offset is None):
137
+ center_config = tomo.find_centers(
138
+ nxroot, center_rows=center_rows,
139
+ center_stack_index=center_stack_index)
76
140
  else:
77
- #RV make a convert to dict in basemodel?
78
- center_config = {'lower_row': tool_config.lower_row,
79
- 'lower_center_offset': tool_config.lower_center_offset,
80
- 'upper_row': tool_config.upper_row,
81
- 'upper_center_offset': tool_config.upper_center_offset}
141
+ # RV make a convert to dict in basemodel?
142
+ center_config = {
143
+ 'lower_row': tool_config.lower_row,
144
+ 'lower_center_offset': tool_config.lower_center_offset,
145
+ 'upper_row': tool_config.upper_row,
146
+ 'upper_center_offset': tool_config.upper_center_offset,
147
+ 'center_stack_index': tool_config.center_stack_index,
148
+ }
82
149
 
83
150
  # Reconstruct tomography stacks
84
- # Pass tool_config and center_config directly to tomo.reconstruct_data
85
- if 'reconstruct' in configs:
86
- tool_config = configs.pop('reconstruct')
87
- nxroot = tomo.reconstruct_data(nxroot, center_config, x_bounds=tool_config.x_bounds,
88
- y_bounds=tool_config.y_bounds, z_bounds=tool_config.z_bounds)
151
+ # RV pass tool_config and center_config directly to
152
+ # tomo.reconstruct_data?
153
+ if reconstruct_data or 'reconstruct' in configs:
154
+ if 'reconstruct' in configs:
155
+ tool_config = configs.pop('reconstruct')
156
+ x_bounds = tool_config.x_bounds
157
+ y_bounds = tool_config.y_bounds
158
+ z_bounds = tool_config.z_bounds
159
+ else:
160
+ x_bounds = None
161
+ y_bounds = None
162
+ z_bounds = None
163
+ nxroot = tomo.reconstruct_data(
164
+ nxroot, center_config, x_bounds=x_bounds, y_bounds=y_bounds,
165
+ z_bounds=z_bounds)
89
166
  center_config = None
90
167
 
91
168
  # Combine reconstructed tomography stacks
92
- if 'combine' in configs:
93
- tool_config = configs.pop('combine')
94
- nxroot = tomo.combine_data(nxroot, x_bounds=tool_config.x_bounds,
95
- y_bounds=tool_config.y_bounds, z_bounds=tool_config.z_bounds)
169
+ if combine_data or 'combine' in configs:
170
+ if 'combine' in configs:
171
+ tool_config = configs.pop('combine')
172
+ x_bounds = tool_config.x_bounds
173
+ y_bounds = tool_config.y_bounds
174
+ z_bounds = tool_config.z_bounds
175
+ else:
176
+ x_bounds = None
177
+ y_bounds = None
178
+ z_bounds = None
179
+ nxroot = tomo.combine_data(
180
+ nxroot, x_bounds=x_bounds, y_bounds=y_bounds,
181
+ z_bounds=z_bounds)
96
182
 
97
183
  if center_config is not None:
98
184
  return center_config
99
- else:
100
- return nxroot
185
+ return nxroot
101
186
 
102
187
  def get_configs(self, data):
103
- '''Get instances of the configuration objects needed by this
188
+ """
189
+ Get instances of the configuration objects needed by this
104
190
  `Processor` from a returned value of `Reader.read`
105
191
 
106
192
  :param data: Result of `Reader.read` where at least one item
@@ -115,55 +201,85 @@ class TomoDataProcessor(Processor):
115
201
  :return: valid instances of the configuration objects with field
116
202
  values taken from `data`.
117
203
  :rtype: dict
118
- '''
119
- #:rtype: dict{'map': MapConfig, 'reduce': TomoReduceConfig} RV: Is there a way to denote optional items?
120
- from CHAP.common.models.map import MapConfig
121
- from CHAP.tomo.models import TomoSetupConfig, TomoReduceConfig, TomoFindCenterConfig, \
122
- TomoReconstructConfig, TomoCombineConfig
204
+ """
205
+ # :rtype: dict{'map': MapConfig, 'reduce': TomoReduceConfig}
206
+ # RV is there a way to denote optional items?
207
+ # Third party modules
123
208
  from nexusformat.nexus import NXroot
124
209
 
210
+ # Local modules
211
+ from CHAP.common.models.map import MapConfig
212
+ from CHAP.tomo.models import (
213
+ TomoSetupConfig,
214
+ TomoReduceConfig,
215
+ TomoFindCenterConfig,
216
+ TomoReconstructConfig,
217
+ TomoCombineConfig,
218
+ )
219
+
125
220
  configs = {}
126
221
  if isinstance(data, list):
127
222
  for item in data:
128
- if isinstance(item, dict):
223
+ if isinstance(item, dict) and item.get('data') is not None:
129
224
  schema = item.get('schema')
130
225
  if isinstance(item.get('data'), NXroot):
131
226
  configs['nxroot'] = item.get('data')
132
227
  if schema == 'MapConfig':
133
228
  configs['map'] = MapConfig(**(item.get('data')))
134
229
  if schema == 'TomoSetupConfig':
135
- configs['setup'] = TomoSetupConfig(**(item.get('data')))
230
+ configs['setup'] = TomoSetupConfig(
231
+ **(item.get('data')))
136
232
  if schema == 'TomoReduceConfig':
137
- configs['reduce'] = TomoReduceConfig(**(item.get('data')))
233
+ configs['reduce'] = TomoReduceConfig(
234
+ **(item.get('data')))
138
235
  elif schema == 'TomoFindCenterConfig':
139
- configs['find_center'] = TomoFindCenterConfig(**(item.get('data')))
236
+ configs['find_center'] = TomoFindCenterConfig(
237
+ **(item.get('data')))
140
238
  elif schema == 'TomoReconstructConfig':
141
- configs['reconstruct'] = TomoReconstructConfig(**(item.get('data')))
239
+ configs['reconstruct'] = TomoReconstructConfig(
240
+ **(item.get('data')))
142
241
  elif schema == 'TomoCombineConfig':
143
- configs['combine'] = TomoCombineConfig(**(item.get('data')))
242
+ configs['combine'] = TomoCombineConfig(
243
+ **(item.get('data')))
144
244
 
145
245
  return configs
146
246
 
147
247
  def get_nxroot(self, map_config, tool_config):
148
- '''Get a map of the collected tomography data from the scans in `map_config`. The
149
- data will be reduced based on additional parameters included in `tool_config`.
150
- The data will be returned along with relevant metadata in the form of a NeXus structure.
248
+ """
249
+ Get a map of the collected tomography data from the scans in
250
+ `map_config`. The data will be reduced based on additional
251
+ parameters included in `tool_config`. The data will be returned
252
+ along with relevant metadata in the form of a NeXus structure.
151
253
 
152
254
  :param map_config: the map configuration
153
255
  :type map_config: MapConfig
154
256
  :param tool_config: the tomography image reduction configuration
155
257
  :type tool_config: TomoReduceConfig
156
- :return: a map of the collected tomography data along with the data reduction configuration
258
+ :return: a map of the collected tomography data along with the
259
+ data reduction configuration
157
260
  :rtype: nexusformat.nexus.NXroot
158
- '''
261
+ """
262
+ # System modules
263
+ from copy import deepcopy
264
+
265
+ # Third party modules
266
+ from nexusformat.nexus import (
267
+ NXcollection,
268
+ NXdata,
269
+ NXdetector,
270
+ NXinstrument,
271
+ NXroot,
272
+ NXsample,
273
+ NXsource,
274
+ NXsubentry,
275
+ )
276
+
277
+ # Local modules
159
278
  from CHAP.common import MapProcessor
160
279
  from CHAP.common.models.map import import_scanparser
161
280
  from CHAP.common.utils.general import index_nearest
162
- from copy import deepcopy
163
- from nexusformat.nexus import NXcollection, NXdata, NXdetector, NXinstrument, NXsample, \
164
- NXsource, NXsubentry, NXroot
165
281
 
166
- include_raw_data = getattr(tool_config, "include_raw_data", False)
282
+ include_raw_data = getattr(tool_config, 'include_raw_data', False)
167
283
 
168
284
  # Construct NXroot
169
285
  nxroot = NXroot()
@@ -193,29 +309,37 @@ class TomoDataProcessor(Processor):
193
309
  nxsource.attrs['station'] = map_config.station
194
310
  nxsource.attrs['experiment_type'] = map_config.experiment_type
195
311
 
196
- # Add an NXdetector to the NXinstrument (don't fill in data fields yet)
312
+ # Add an NXdetector to the NXinstrument
313
+ # (do not fill in data fields yet)
197
314
  nxdetector = NXdetector()
198
315
  nxinstrument.detector = nxdetector
199
316
  nxdetector.local_name = tool_config.detector.prefix
200
317
  pixel_size = tool_config.detector.pixel_size
201
318
  if len(pixel_size) == 1:
202
- nxdetector.x_pixel_size = pixel_size[0]/tool_config.detector.lens_magnification
203
- nxdetector.y_pixel_size = pixel_size[0]/tool_config.detector.lens_magnification
319
+ nxdetector.x_pixel_size = \
320
+ pixel_size[0]/tool_config.detector.lens_magnification
321
+ nxdetector.y_pixel_size = \
322
+ pixel_size[0]/tool_config.detector.lens_magnification
204
323
  else:
205
- nxdetector.x_pixel_size = pixel_size[0]/tool_config.detector.lens_magnification
206
- nxdetector.y_pixel_size = pixel_size[1]/tool_config.detector.lens_magnification
324
+ nxdetector.x_pixel_size = \
325
+ pixel_size[0]/tool_config.detector.lens_magnification
326
+ nxdetector.y_pixel_size = \
327
+ pixel_size[1]/tool_config.detector.lens_magnification
207
328
  nxdetector.x_pixel_size.attrs['units'] = 'mm'
208
329
  nxdetector.y_pixel_size.attrs['units'] = 'mm'
209
330
 
210
331
  if include_raw_data:
211
- # Add an NXsample to NXentry (don't fill in data fields yet)
332
+ # Add an NXsample to NXentry
333
+ # (do not fill in data fields yet)
212
334
  nxsample = NXsample()
213
335
  nxentry.sample = nxsample
214
336
  nxsample.name = map_config.sample.name
215
337
  nxsample.description = map_config.sample.description
216
338
 
217
- # Add NXcollection's to NXentry to hold metadata about the spec scans in the map
218
- # Also obtain the data fields in NXsample and NXdetector if requested
339
+ # Add NXcollection's to NXentry to hold metadata about the spec
340
+ # scans in the map
341
+ # Also obtain the data fields in NXsample and NXdetector if
342
+ # requested
219
343
  import_scanparser(map_config.station, map_config.experiment_type)
220
344
  image_keys = []
221
345
  sequence_numbers = []
@@ -244,22 +368,24 @@ class TomoDataProcessor(Processor):
244
368
  image_key = 2
245
369
  field_name = 'dark_field'
246
370
  elif scans.spec_file.endswith('_flat'):
247
- #RV not yet tested with an actual fmb run
371
+ # RV not yet tested with an actual fmb run
248
372
  image_key = 1
249
373
  field_name = 'bright_field'
250
374
  else:
251
375
  image_key = 0
252
376
  field_name = 'tomo_fields'
253
377
  else:
254
- raise RuntimeError(f'Invalid station: {station}')
378
+ raise RuntimeError(
379
+ f'Invalid station in map_config: {map_config.station}')
255
380
 
256
381
  # Create an NXcollection for each field type
257
382
  if field_name in nxentry.spec_scans:
258
383
  nxcollection = nxentry.spec_scans[field_name]
259
384
  if nxcollection.attrs['spec_file'] != str(scans.spec_file):
260
- raise RuntimeError(f'Multiple SPEC files for a single field type not yet '+
261
- f'implemented; field name: {field_name}, '+
262
- f'SPEC file: {str(scans.spec_file)}')
385
+ raise RuntimeError(
386
+ 'Multiple SPEC files for a single field type not '
387
+ + f'yet implemented; field name: {field_name}, '
388
+ + f'SPEC file: {str(scans.spec_file)}')
263
389
  else:
264
390
  nxcollection = NXcollection()
265
391
  nxentry.spec_scans[field_name] = nxcollection
@@ -270,26 +396,32 @@ class TomoDataProcessor(Processor):
270
396
  image_offset = scanparser.starting_image_offset
271
397
  if map_config.station in ('id1a3', 'id3a'):
272
398
  theta_vals = scanparser.theta_vals
273
- thetas = np.linspace(theta_vals.get('start'), theta_vals.get('end'),
274
- theta_vals.get('num'))
399
+ thetas = np.linspace(
400
+ theta_vals.get('start'), theta_vals.get('end'),
401
+ theta_vals.get('num'))
275
402
  else:
276
403
  if len(scans.scan_numbers) != 1:
277
- raise RuntimeError('Multiple scans not yet implemented for '+
278
- f'{map_config.station}')
404
+ raise RuntimeError(
405
+ 'Multiple scans not yet implemented for '
406
+ + f'{map_config.station}')
279
407
  scan_number = scans.scan_numbers[0]
280
408
  thetas = []
281
409
  for dim in map_config.independent_dimensions:
282
410
  if dim.label != 'theta':
283
411
  continue
284
412
  for index in range(scanparser.spec_scan_npts):
285
- thetas.append(dim.get_value(scans, scan_number, index))
286
- if not len(thetas):
287
- raise RuntimeError(f'Unable to obtain thetas for {field_name}')
413
+ thetas.append(
414
+ dim.get_value(scans, scan_number, index))
415
+ if not thetas:
416
+ raise RuntimeError(
417
+ f'Unable to obtain thetas for {field_name}')
288
418
  if thetas[image_offset] <= 0.0 and thetas[-1] >= 180.0:
289
419
  image_offset = index_nearest(thetas, 0.0)
290
- thetas = thetas[image_offset:index_nearest(thetas, 180.0)]
420
+ image_end = index_nearest(thetas, 180.0)
421
+ thetas = thetas[image_offset:image_end]
291
422
  elif thetas[-1]-thetas[image_offset] >= 180:
292
- thetas = thetas[image_offset:index_nearest(thetas, thetas[0]+180.0)]
423
+ image_end = index_nearest(thetas, thetas[0]+180.0)
424
+ thetas = thetas[image_offset:image_end]
293
425
  else:
294
426
  thetas = thetas[image_offset:]
295
427
 
@@ -305,9 +437,11 @@ class TomoDataProcessor(Processor):
305
437
  nxsubentry.spec_command = scanparser.spec_command
306
438
  # Add an NXinstrument to the scan's NXsubentry
307
439
  nxsubentry.instrument = NXinstrument()
308
- # Add an NXdetector to the NXinstrument to the scan's NXsubentry
440
+ # Add an NXdetector to the NXinstrument to the scan's
441
+ # NXsubentry
309
442
  nxsubentry.instrument.detector = deepcopy(nxdetector)
310
- nxsubentry.instrument.detector.frame_start_number = image_offset
443
+ nxsubentry.instrument.detector.frame_start_number = \
444
+ image_offset
311
445
  nxsubentry.instrument.detector.image_key = image_key
312
446
  # Add an NXsample to the scan's NXsubentry
313
447
  nxsubentry.sample = NXsample()
@@ -322,8 +456,11 @@ class TomoDataProcessor(Processor):
322
456
  num_image = len(thetas)
323
457
  image_keys += num_image*[image_key]
324
458
  sequence_numbers += list(range(num_image))
325
- image_stacks.append(scanparser.get_detector_data(tool_config.detector.prefix,
326
- scan_step_index=(image_offset, image_offset+num_image)))
459
+ image_stacks.append(
460
+ scanparser.get_detector_data(
461
+ tool_config.detector.prefix,
462
+ scan_step_index=(image_offset,
463
+ image_offset+num_image)))
327
464
  rotation_angles += list(thetas)
328
465
  x_translations += num_image*[x_translation]
329
466
  z_translations += num_image*[z_translation]
@@ -332,7 +469,7 @@ class TomoDataProcessor(Processor):
332
469
  # Add image data to NXdetector
333
470
  nxinstrument.detector.image_key = image_keys
334
471
  nxinstrument.detector.sequence_number = sequence_numbers
335
- nxinstrument.detector.data = np.concatenate([image for image in image_stacks])
472
+ nxinstrument.detector.data = np.concatenate(image_stacks)
336
473
 
337
474
  # Add image data to NXsample
338
475
  nxsample.rotation_angle = rotation_angles
@@ -355,11 +492,13 @@ class TomoDataProcessor(Processor):
355
492
  # nxdata.attrs['row_indices'] = 1
356
493
  # nxdata.attrs['column_indices'] = 2
357
494
 
358
- return(nxroot)
495
+ return nxroot
359
496
 
360
497
 
361
- def nxcopy(nxobject:NXobject, exclude_nxpaths:list[str]=[], nxpath_prefix:str='') -> NXobject:
362
- '''Function that returns a copy of a nexus object, optionally exluding certain child items.
498
+ def nxcopy(nxobject, exclude_nxpaths=None, nxpath_prefix=''):
499
+ """
500
+ Function that returns a copy of a nexus object, optionally exluding
501
+ certain child items.
363
502
 
364
503
  :param nxobject: the original nexus object to return a "copy" of
365
504
  :type nxobject: nexusformat.nexus.NXobject
@@ -371,17 +510,20 @@ def nxcopy(nxobject:NXobject, exclude_nxpaths:list[str]=[], nxpath_prefix:str=''
371
510
  :type nxpath_prefix: str
372
511
  :return: a copy of `nxobject` with some children optionally exluded.
373
512
  :rtype: NXobject
374
- '''
513
+ """
514
+ # Third party modules
375
515
  from nexusformat.nexus import NXgroup
376
516
 
377
517
  nxobject_copy = nxobject.__class__()
378
- if not len(nxpath_prefix):
518
+ if not nxpath_prefix:
379
519
  if 'default' in nxobject.attrs:
380
520
  nxobject_copy.attrs['default'] = nxobject.attrs['default']
381
521
  else:
382
522
  for k, v in nxobject.attrs.items():
383
523
  nxobject_copy.attrs[k] = v
384
524
 
525
+ if exclude_nxpaths is None:
526
+ exclude_nxpaths = []
385
527
  for k, v in nxobject.items():
386
528
  nxpath = os_path.join(nxpath_prefix, k)
387
529
 
@@ -389,164 +531,158 @@ def nxcopy(nxobject:NXobject, exclude_nxpaths:list[str]=[], nxpath_prefix:str=''
389
531
  continue
390
532
 
391
533
  if isinstance(v, NXgroup):
392
- nxobject_copy[k] = nxcopy(v, exclude_nxpaths=exclude_nxpaths,
393
- nxpath_prefix=os_path.join(nxpath_prefix, k))
534
+ nxobject_copy[k] = nxcopy(
535
+ v, exclude_nxpaths=exclude_nxpaths,
536
+ nxpath_prefix=os_path.join(nxpath_prefix, k))
394
537
  else:
395
538
  nxobject_copy[k] = v
396
539
 
397
- return(nxobject_copy)
540
+ return nxobject_copy
398
541
 
399
542
 
400
- class set_numexpr_threads:
543
+ class SetNumexprThreads:
544
+ """
545
+ Class that sets and keeps track of the number of processors used by
546
+ the code in general and by the num_expr package specifically.
547
+
548
+ :ivar num_core: Number of processors used by the num_expr package
549
+ :type num_core: int
550
+ """
551
+
401
552
  def __init__(self, num_core):
553
+ """Initialize SetNumexprThreads."""
554
+ # System modules
402
555
  from multiprocessing import cpu_count
403
556
 
404
557
  if num_core is None or num_core < 1 or num_core > cpu_count():
405
- self.num_core = cpu_count()
558
+ self._num_core = cpu_count()
406
559
  else:
407
- self.num_core = num_core
560
+ self._num_core = num_core
561
+ self._num_core_org = self._num_core
408
562
 
409
563
  def __enter__(self):
410
- import numexpr as ne
564
+ # Third party modules
565
+ from numexpr import (
566
+ MAX_THREADS,
567
+ set_num_threads,
568
+ )
411
569
 
412
- self.num_core_org = ne.set_num_threads(min(self.num_core, ne.MAX_THREADS))
570
+ self._num_core_org = set_num_threads(
571
+ min(self._num_core, MAX_THREADS))
413
572
 
414
573
  def __exit__(self, exc_type, exc_value, traceback):
415
- import numexpr as ne
574
+ # Third party modules
575
+ from numexpr import set_num_threads
416
576
 
417
- ne.set_num_threads(self.num_core_org)
577
+ set_num_threads(self._num_core_org)
418
578
 
419
579
 
420
580
  class Tomo:
421
- """Processing tomography data with misalignment.
422
- """
423
- def __init__(self, galaxy_flag=False, num_core=-1, output_folder='.', save_figs=None,
424
- test_mode=False):
425
- """Initialize with optional config input file or dictionary
426
- """
427
- from logging import getLogger
581
+ """Reconstruct a set of Tomographic images."""
428
582
 
583
+ def __init__(
584
+ self, interactive=False, num_core=-1, output_folder='.',
585
+ save_figs=None, test_mode=False):
586
+ """Initialize Tomo."""
587
+ # System modules
588
+ from logging import getLogger
429
589
  from multiprocessing import cpu_count
430
590
 
431
591
  self.__name__ = self.__class__.__name__
432
- self.logger = getLogger(self.__name__)
433
- self.logger.propagate = False
434
-
435
- if not isinstance(galaxy_flag, bool):
436
- raise ValueError(f'Invalid parameter galaxy_flag ({galaxy_flag})')
437
- self.galaxy_flag = galaxy_flag
438
- self.num_core = num_core
439
- if self.galaxy_flag:
440
- if output_folder != '.':
441
- self.logger.warning('Ignoring output_folder in galaxy mode')
442
- self.output_folder = '.'
443
- if test_mode != False:
444
- self.logger.warning('Ignoring test_mode in galaxy mode')
445
- self.test_mode = False
446
- if save_figs is not None:
447
- self.logger.warning('Ignoring save_figs in galaxy mode')
448
- save_figs = 'only'
592
+ self._logger = getLogger(self.__name__)
593
+ self._logger.propagate = False
594
+
595
+ if not isinstance(interactive, bool):
596
+ raise ValueError(f'Invalid parameter interactive ({interactive})')
597
+ self._interactive = interactive
598
+ self._num_core = num_core
599
+ self._output_folder = os_path.abspath(output_folder)
600
+ if not os_path.isdir(output_folder):
601
+ mkdir(os_path.abspath(output_folder))
602
+ if self._interactive:
603
+ self._test_mode = False
449
604
  else:
450
- self.output_folder = os_path.abspath(output_folder)
451
- if not os_path.isdir(output_folder):
452
- mkdir(os_path.abspath(output_folder))
453
605
  if not isinstance(test_mode, bool):
454
606
  raise ValueError(f'Invalid parameter test_mode ({test_mode})')
455
- self.test_mode = test_mode
607
+ self._test_mode = test_mode
456
608
  if save_figs is None:
457
609
  save_figs = 'no'
458
- self.test_config = {}
459
- if self.test_mode:
610
+ self._test_config = {}
611
+ if self._test_mode:
460
612
  if save_figs != 'only':
461
- self.logger.warning('Ignoring save_figs in test mode')
613
+ self._logger.warning('Ignoring save_figs in test mode')
462
614
  save_figs = 'only'
463
615
  if save_figs == 'only':
464
- self.save_only = True
465
- self.save_figs = True
616
+ self._save_only = True
617
+ self._save_figs = True
466
618
  elif save_figs == 'yes':
467
- self.save_only = False
468
- self.save_figs = True
619
+ self._save_only = False
620
+ self._save_figs = True
469
621
  elif save_figs == 'no':
470
- self.save_only = False
471
- self.save_figs = False
622
+ self._save_only = False
623
+ self._save_figs = False
472
624
  else:
473
625
  raise ValueError(f'Invalid parameter save_figs ({save_figs})')
474
- if self.save_only:
475
- self.block = False
626
+ if self._save_only:
627
+ self._block = False
476
628
  else:
477
- self.block = True
478
- if self.num_core == -1:
479
- self.num_core = cpu_count()
480
- if not isinstance(self.num_core, int) or self.num_core < 0:
629
+ self._block = True
630
+ if self._num_core == -1:
631
+ self._num_core = cpu_count()
632
+ if not isinstance(self._num_core, int) or self._num_core < 0:
481
633
  raise ValueError(f'Invalid parameter num_core ({num_core})')
482
- if self.num_core > cpu_count():
483
- self.logger.warning(f'num_core = {self.num_core} is larger than the number of '
484
- f'available processors and reduced to {cpu_count()}')
485
- self.num_core= cpu_count()
486
-
487
- def read(self, filename):
488
- extension = os_path.splitext(filename)[1]
489
- if extension == '.yml' or extension == '.yaml':
490
- with open(filename, 'r') as f:
491
- config = safe_load(f)
492
- # if len(config) > 1:
493
- # raise ValueError(f'Multiple root entries in {filename} not yet implemented')
494
- # if len(list(config.values())[0]) > 1:
495
- # raise ValueError(f'Multiple sample maps in {filename} not yet implemented')
496
- return(config)
497
- elif extension == '.nxs':
498
- with NXFile(filename, mode='r') as nxfile:
499
- nxroot = nxfile.readfile()
500
- return(nxroot)
501
- else:
502
- raise ValueError(f'Invalid filename extension ({extension})')
503
-
504
- def write(self, data, filename):
505
- extension = os_path.splitext(filename)[1]
506
- if extension == '.yml' or extension == '.yaml':
507
- with open(filename, 'w') as f:
508
- safe_dump(data, f)
509
- elif extension == '.nxs':
510
- data.save(filename, mode='w')
511
- elif extension == '.nc':
512
- data.to_netcdf(os_path=filename)
513
- else:
514
- raise ValueError(f'Invalid filename extension ({extension})')
634
+ if self._num_core > cpu_count():
635
+ self._logger.warning(
636
+ f'num_core = {self._num_core} is larger than the number '
637
+ + f'of available processors and reduced to {cpu_count()}')
638
+ self._num_core = cpu_count()
515
639
 
516
640
  def gen_reduced_data(self, data, img_x_bounds=None):
517
- """Generate the reduced tomography images.
518
641
  """
519
- from nexusformat.nexus import NXdata, NXprocess, NXroot
520
-
521
- from CHAP.common.models.map import import_scanparser
522
-
523
- self.logger.info('Generate the reduced tomography images')
642
+ Generate the reduced tomography images.
643
+
644
+ :param data: Data object containing the raw data info and
645
+ metadata required for a tomography data reduction
646
+ :type data: nexusformat.nexus.NXroot
647
+ :param img_x_bounds: Detector image bounds in the x-direction
648
+ :type img_x_bounds: tuple(int, int), list[int], optional
649
+ :return: Reduced tomography data
650
+ :rtype: nexusformat.nexus.NXroot
651
+ """
652
+ # Third party modules
653
+ from nexusformat.nexus import (
654
+ NXdata,
655
+ NXprocess,
656
+ NXroot,
657
+ )
658
+
659
+ self._logger.info('Generate the reduced tomography images')
524
660
  if img_x_bounds is not None:
525
661
  if not isinstance(img_x_bounds, (tuple, list)):
526
- raise ValueError(f'Invalid parameter img_x_bounds ({img_x_bounds})')
662
+ raise ValueError(
663
+ f'Invalid parameter img_x_bounds ({img_x_bounds})')
527
664
  img_x_bounds = tuple(img_x_bounds)
528
665
 
529
- # Create plot galaxy path directory if needed
530
- if self.galaxy_flag and not os_path.exists('tomo_reduce_plots'):
531
- mkdir('tomo_reduce_plots')
532
-
533
- if isinstance(data, dict):
534
- # Create Nexus format object from input dictionary
535
- wf = TomoWorkflow(**data)
536
- if len(wf.sample_maps) > 1:
537
- raise ValueError(f'Multiple sample maps not yet implemented')
538
- nxroot = NXroot()
539
- t0 = time()
540
- for sample_map in wf.sample_maps:
541
- self.logger.info(f'Start constructing the {sample_map.title} map.')
542
- import_scanparser(sample_map.station)
543
- sample_map.construct_nxentry(nxroot, include_raw_data=False)
544
- self.logger.info(f'Constructed all sample maps in {time()-t0:.2f} seconds.')
545
- nxentry = nxroot[nxroot.attrs['default']]
546
- # Get test mode configuration info
547
- if self.test_mode:
548
- self.test_config = data['sample_maps'][0]['test_mode']
549
- elif isinstance(data, NXroot):
666
+ # if isinstance(data, dict):
667
+ # # Create Nexus format object from input dictionary
668
+ # wf = TomoWorkflow(**data)
669
+ # if len(wf.sample_maps) > 1:
670
+ # raise ValueError('Multiple sample maps not yet implemented')
671
+ # nxroot = NXroot()
672
+ # t0 = time()
673
+ # for sample_map in wf.sample_maps:
674
+ # self._logger.info(
675
+ # f'Start constructing the {sample_map.title} map')
676
+ # import_scanparser(sample_map.station)
677
+ # sample_map.construct_nxentry(nxroot, include_raw_data=False)
678
+ # self._logger.info(
679
+ # f'Constructed all sample maps in {time()-t0:.2f} seconds')
680
+ # nxentry = nxroot[nxroot.attrs['default']]
681
+ # # Get test mode configuration info
682
+ # if self._test_mode:
683
+ # self._test_config = data['sample_maps'][0]['test_mode']
684
+ # elif isinstance(data, NXroot):
685
+ if isinstance(data, NXroot):
550
686
  nxentry = data[data.attrs['default']]
551
687
  else:
552
688
  raise ValueError(f'Invalid parameter data ({data})')
@@ -562,8 +698,9 @@ class Tomo:
562
698
  reduced_data = self._gen_bright(nxentry, reduced_data)
563
699
 
564
700
  # Set vertical detector bounds for image stack
565
- img_x_bounds = self._set_detector_bounds(nxentry, reduced_data, img_x_bounds=img_x_bounds)
566
- self.logger.info(f'img_x_bounds = {img_x_bounds}')
701
+ img_x_bounds = self._set_detector_bounds(
702
+ nxentry, reduced_data, img_x_bounds=img_x_bounds)
703
+ self._logger.info(f'img_x_bounds = {img_x_bounds}')
567
704
  reduced_data['img_x_bounds'] = img_x_bounds
568
705
 
569
706
  # Set zoom and/or theta skip to reduce memory the requirement
@@ -576,20 +713,23 @@ class Tomo:
576
713
  # Generate reduced tomography fields
577
714
  reduced_data = self._gen_tomo(nxentry, reduced_data)
578
715
 
579
- # Create a copy of the input Nexus object and remove raw and any existing reduced data
716
+ # Create a copy of the input Nexus object and remove raw and
717
+ # any existing reduced data
580
718
  if isinstance(data, NXroot):
581
- exclude_items = [f'{nxentry._name}/reduced_data/data',
582
- f'{nxentry._name}/instrument/detector/data',
583
- f'{nxentry._name}/instrument/detector/image_key',
584
- f'{nxentry._name}/instrument/detector/sequence_number',
585
- f'{nxentry._name}/sample/rotation_angle',
586
- f'{nxentry._name}/sample/x_translation',
587
- f'{nxentry._name}/sample/z_translation',
588
- f'{nxentry._name}/data/data',
589
- f'{nxentry._name}/data/image_key',
590
- f'{nxentry._name}/data/rotation_angle',
591
- f'{nxentry._name}/data/x_translation',
592
- f'{nxentry._name}/data/z_translation']
719
+ exclude_items = [
720
+ f'{nxentry.nxname}/reduced_data/data',
721
+ f'{nxentry.nxname}/instrument/detector/data',
722
+ f'{nxentry.nxname}/instrument/detector/image_key',
723
+ f'{nxentry.nxname}/instrument/detector/sequence_number',
724
+ f'{nxentry.nxname}/sample/rotation_angle',
725
+ f'{nxentry.nxname}/sample/x_translation',
726
+ f'{nxentry.nxname}/sample/z_translation',
727
+ f'{nxentry.nxname}/data/data',
728
+ f'{nxentry.nxname}/data/image_key',
729
+ f'{nxentry.nxname}/data/rotation_angle',
730
+ f'{nxentry.nxname}/data/x_translation',
731
+ f'{nxentry.nxname}/data/z_translation',
732
+ ]
593
733
  nxroot = nxcopy(data, exclude_nxpaths=exclude_items)
594
734
  nxentry = nxroot[nxroot.attrs['default']]
595
735
 
@@ -599,83 +739,93 @@ class Tomo:
599
739
  if 'data' not in nxentry:
600
740
  nxentry.data = NXdata()
601
741
  nxentry.attrs['default'] = 'data'
602
- nxentry.data.makelink(nxentry.reduced_data.data.tomo_fields, name='reduced_data')
603
- nxentry.data.makelink(nxentry.reduced_data.rotation_angle, name='rotation_angle')
742
+ nxentry.data.makelink(
743
+ nxentry.reduced_data.data.tomo_fields, name='reduced_data')
744
+ nxentry.data.makelink(
745
+ nxentry.reduced_data.rotation_angle, name='rotation_angle')
604
746
  nxentry.data.attrs['signal'] = 'reduced_data'
605
-
606
- return(nxroot)
747
+
748
+ return nxroot
607
749
 
608
750
  def find_centers(self, nxroot, center_rows=None, center_stack_index=None):
609
- """Find the calibrated center axis info
610
751
  """
611
- from nexusformat.nexus import NXentry, NXroot
612
-
613
- from CHAP.common.utils.general import is_int_pair
752
+ Find the calibrated center axis info
753
+
754
+ :param nxroot: Data object containing the reduced data and
755
+ metadata required to find the calibrated center axis info
756
+ :type data: nexusformat.nexus.NXroot
757
+ :param center_rows: Lower and upper row indices for center
758
+ finding
759
+ :type center_rows: tuple(int, int), list[int], optional
760
+ :return: Calibrated center axis info
761
+ :rtype: dict
762
+ """
763
+ # Third party modules
764
+ from nexusformat.nexus import (
765
+ NXentry,
766
+ NXroot,
767
+ )
768
+ from yaml import safe_dump
614
769
 
615
- self.logger.info('Find the calibrated center axis info')
770
+ self._logger.info('Find the calibrated center axis info')
616
771
 
617
772
  if not isinstance(nxroot, NXroot):
618
773
  raise ValueError(f'Invalid parameter nxroot ({nxroot})')
619
774
  nxentry = nxroot[nxroot.attrs['default']]
620
775
  if not isinstance(nxentry, NXentry):
621
776
  raise ValueError(f'Invalid nxentry ({nxentry})')
622
- if self.galaxy_flag:
623
- if center_rows is not None:
624
- center_rows = tuple(center_rows)
625
- if not is_int_pair(center_rows):
626
- raise ValueError(f'Invalid parameter center_rows ({center_rows})')
627
- elif center_rows is not None:
628
- # self.logger.warning(f'Ignoring parameter center_rows ({center_rows})')
629
- # center_rows = None
630
- if not isinstance(center_rows, (tuple, list)) or len(center_rows) != 2:
631
- raise ValueError(f'Invalid parameter center_rows ({center_rows})')
632
- if self.galaxy_flag:
633
- if center_stack_index is not None and (not isinstance(center_stack_index, int) or
634
- center_stack_index < 0):
635
- raise ValueError(f'Invalid parameter center_stack_index ({center_stack_index})')
636
-
637
- # Create plot galaxy path directory and path if needed
638
- if self.galaxy_flag:
639
- if not os_path.exists('tomo_find_centers_plots'):
640
- mkdir('tomo_find_centers_plots')
641
- path = 'tomo_find_centers_plots'
642
- else:
643
- path = self.output_folder
777
+ if (center_rows is not None
778
+ and (not isinstance(center_rows, (tuple, list))
779
+ or len(center_rows) != 2)):
780
+ raise ValueError(f'Invalid parameter center_rows ({center_rows})')
781
+ if (not self._interactive
782
+ and (center_rows is None
783
+ or (center_rows[0] is None and center_rows[1] is None))):
784
+ self._logger.warning(
785
+ 'center_rows unspecified, find centers at reduced data bounds')
786
+ if (center_stack_index is not None
787
+ and (not isinstance(center_stack_index, int)
788
+ or center_stack_index < 0)):
789
+ raise ValueError(
790
+ 'Invalid parameter center_stack_index '
791
+ + f'({center_stack_index})')
644
792
 
645
793
  # Check if reduced data is available
646
- if ('reduced_data' not in nxentry or 'reduced_data' not in nxentry.data):
794
+ if ('reduced_data' not in nxentry
795
+ or 'reduced_data' not in nxentry.data):
647
796
  raise KeyError(f'Unable to find valid reduced data in {nxentry}.')
648
797
 
649
798
  # Select the image stack to calibrate the center axis
650
- # reduced data axes order: stack,theta,row,column
651
- # Note: Nexus cannot follow a link if the data it points to is too big,
652
- # so get the data from the actual place, not from nxentry.data
799
+ # reduced data axes order: stack,theta,row,column
800
+ # Note: Nexus can't follow a link if the data it points to is
801
+ # too big get the data from the actual place, not from
802
+ # nxentry.data
653
803
  tomo_fields_shape = nxentry.reduced_data.data.tomo_fields.shape
654
- if len(tomo_fields_shape) != 4 or any(True for dim in tomo_fields_shape if not dim):
655
- raise KeyError('Unable to load the required reduced tomography stack')
804
+ if (len(tomo_fields_shape) != 4
805
+ or any(True for dim in tomo_fields_shape if not dim)):
806
+ raise KeyError(
807
+ 'Unable to load the required reduced tomography stack')
656
808
  num_tomo_stacks = tomo_fields_shape[0]
657
809
  if num_tomo_stacks == 1:
658
810
  center_stack_index = 0
659
811
  default = 'n'
660
812
  else:
661
- if self.test_mode:
662
- center_stack_index = self.test_config['center_stack_index']-1 # make offset 0
663
- elif self.galaxy_flag:
813
+ if self._test_mode:
814
+ # Convert input value to offset 0
815
+ center_stack_index = self._test_config['center_stack_index']-1
816
+ elif self._interactive:
664
817
  if center_stack_index is None:
665
- center_stack_index = int(num_tomo_stacks/2)
666
- if center_stack_index >= num_tomo_stacks:
667
- raise ValueError(f'Invalid parameter center_stack_index ({center_stack_index})')
818
+ center_stack_index = input_int(
819
+ '\nEnter tomography stack index to calibrate the '
820
+ + 'center axis', ge=1, le=num_tomo_stacks,
821
+ default=int(1 + num_tomo_stacks/2))
822
+ center_stack_index -= 1
668
823
  else:
669
824
  if center_stack_index is None:
670
- center_stack_index = input_int('\nEnter tomography stack index to calibrate '
671
- 'the center axis', ge=1, le=num_tomo_stacks,
672
- default=int(1+num_tomo_stacks/2))
673
- else:
674
- if (not isinstance(center_stack_index, int) or
675
- not 0 < center_stack_index <= num_tomo_stacks):
676
- raise ValueError('Invalid parameter center_stack_index '+
677
- f'({center_stack_index})')
678
- center_stack_index -= 1
825
+ center_stack_index = int(num_tomo_stacks/2)
826
+ self._logger.warning(
827
+ 'center_stack_index unspecified, use stack '
828
+ + f'{center_stack_index+1} to find centers')
679
829
  default = 'y'
680
830
 
681
831
  # Get thetas (in degrees)
@@ -683,106 +833,145 @@ class Tomo:
683
833
 
684
834
  # Get effective pixel_size
685
835
  if 'zoom_perc' in nxentry.reduced_data:
686
- eff_pixel_size = 100.*(nxentry.instrument.detector.x_pixel_size/
687
- nxentry.reduced_data.attrs['zoom_perc'])
836
+ eff_pixel_size = \
837
+ 100.0 * (nxentry.instrument.detector.x_pixel_size
838
+ / nxentry.reduced_data.attrs['zoom_perc'])
688
839
  else:
689
840
  eff_pixel_size = nxentry.instrument.detector.x_pixel_size
690
841
 
691
842
  # Get cross sectional diameter
692
843
  cross_sectional_dim = tomo_fields_shape[3]*eff_pixel_size
693
- self.logger.debug(f'cross_sectional_dim = {cross_sectional_dim}')
844
+ self._logger.debug(f'cross_sectional_dim = {cross_sectional_dim}')
694
845
 
695
846
  # Determine center offset at sample row boundaries
696
- self.logger.info('Determine center offset at sample row boundaries')
847
+ self._logger.info('Determine center offset at sample row boundaries')
697
848
 
698
849
  # Lower row center
699
- if self.test_mode:
700
- lower_row = self.test_config['lower_row']
701
- elif self.galaxy_flag:
702
- if center_rows is None:
703
- lower_row = 0
704
- else:
705
- lower_row = min(center_rows)
706
- if not 0 <= lower_row < tomo_fields_shape[2]-1:
707
- raise ValueError(f'Invalid parameter center_rows ({center_rows})')
708
- else:
850
+ if self._test_mode:
851
+ lower_row = self._test_config['lower_row']
852
+ elif self._interactive:
709
853
  if center_rows is not None and center_rows[0] is not None:
710
854
  lower_row = center_rows[0]
711
855
  if lower_row == -1:
712
856
  lower_row = 0
713
857
  if not 0 <= lower_row < tomo_fields_shape[2]-1:
714
- raise ValueError(f'Invalid parameter center_rows ({center_rows})')
858
+ raise ValueError(
859
+ f'Invalid parameter center_rows ({center_rows})')
715
860
  else:
716
861
  lower_row = select_one_image_bound(
717
- nxentry.reduced_data.data.tomo_fields[center_stack_index,0,:,:],
718
- 0, bound=0, title=f'theta={round(thetas[0], 2)+0}',
719
- bound_name='row index to find lower center', default=default,
720
- raise_error=True)
721
- self.logger.debug('Finding center...')
862
+ nxentry.reduced_data.data.tomo_fields[
863
+ center_stack_index,0,:,:],
864
+ 0, bound=0, title=f'theta={round(thetas[0], 2)+0}',
865
+ bound_name='row index to find lower center',
866
+ default=default, raise_error=True)
867
+ else:
868
+ if center_rows is None or center_rows[0] is None:
869
+ lower_row = 0
870
+ else:
871
+ lower_row = center_rows[0]
872
+ if lower_row == -1:
873
+ lower_row = 0
874
+ if not 0 <= lower_row < tomo_fields_shape[2]-1:
875
+ raise ValueError(
876
+ f'Invalid parameter center_rows ({center_rows})')
722
877
  t0 = time()
723
878
  lower_center_offset = self._find_center_one_plane(
724
- #np.asarray(nxentry.reduced_data.data.tomo_fields[center_stack_index,:,lower_row,:]),
725
- nxentry.reduced_data.data.tomo_fields[center_stack_index,:,lower_row,:],
726
- lower_row, thetas, eff_pixel_size, cross_sectional_dim, path=path,
727
- num_core=self.num_core)
728
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
729
- self.logger.debug(f'lower_row = {lower_row:.2f}')
730
- self.logger.debug(f'lower_center_offset = {lower_center_offset:.2f}')
879
+ nxentry.reduced_data.data.tomo_fields[
880
+ center_stack_index,:,lower_row,:],
881
+ lower_row, thetas, eff_pixel_size, cross_sectional_dim,
882
+ path=self._output_folder, num_core=self._num_core)
883
+ self._logger.info(f'Finding center took {time()-t0:.2f} seconds')
884
+ self._logger.debug(f'lower_row = {lower_row:.2f}')
885
+ self._logger.debug(f'lower_center_offset = {lower_center_offset:.2f}')
731
886
 
732
887
  # Upper row center
733
- if self.test_mode:
734
- upper_row = self.test_config['upper_row']
735
- elif self.galaxy_flag:
736
- if center_rows is None:
737
- upper_row = tomo_fields_shape[2]-1
738
- else:
739
- upper_row = max(center_rows)
740
- if not lower_row < upper_row < tomo_fields_shape[2]:
741
- raise ValueError(f'Invalid parameter center_rows ({center_rows})')
742
- else:
888
+ if self._test_mode:
889
+ upper_row = self._test_config['upper_row']
890
+ elif self._interactive:
743
891
  if center_rows is not None and center_rows[1] is not None:
744
892
  upper_row = center_rows[1]
745
893
  if upper_row == -1:
746
894
  upper_row = tomo_fields_shape[2]-1
747
895
  if not lower_row < upper_row < tomo_fields_shape[2]:
748
- raise ValueError(f'Invalid parameter center_rows ({center_rows})')
896
+ raise ValueError(
897
+ f'Invalid parameter center_rows ({center_rows})')
749
898
  else:
750
899
  upper_row = select_one_image_bound(
751
- nxentry.reduced_data.data.tomo_fields[center_stack_index,0,:,:],
752
- 0, bound=tomo_fields_shape[2]-1, title=f'theta={round(thetas[0], 2)+0}',
753
- bound_name='row index to find upper center', default=default,
754
- raise_error=True)
755
- self.logger.debug('Finding center...')
900
+ nxentry.reduced_data.data.tomo_fields[
901
+ center_stack_index,0,:,:],
902
+ 0, bound=tomo_fields_shape[2]-1,
903
+ title=f'theta = {round(thetas[0], 2)+0}',
904
+ bound_name='row index to find upper center',
905
+ default=default, raise_error=True)
906
+ else:
907
+ if center_rows is None or center_rows[1] is None:
908
+ upper_row = tomo_fields_shape[2]-1
909
+ else:
910
+ upper_row = center_rows[1]
911
+ if upper_row == -1:
912
+ upper_row = tomo_fields_shape[2]-1
913
+ if not lower_row < upper_row < tomo_fields_shape[2]:
914
+ raise ValueError(
915
+ f'Invalid parameter center_rows ({center_rows})')
756
916
  t0 = time()
757
917
  upper_center_offset = self._find_center_one_plane(
758
- #np.asarray(nxentry.reduced_data.data.tomo_fields[center_stack_index,:,upper_row,:]),
759
- nxentry.reduced_data.data.tomo_fields[center_stack_index,:,upper_row,:],
760
- upper_row, thetas, eff_pixel_size, cross_sectional_dim, path=path,
761
- num_core=self.num_core)
762
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
763
- self.logger.debug(f'upper_row = {upper_row:.2f}')
764
- self.logger.debug(f'upper_center_offset = {upper_center_offset:.2f}')
765
-
766
- center_config = {'lower_row': lower_row, 'lower_center_offset': lower_center_offset,
767
- 'upper_row': upper_row, 'upper_center_offset': upper_center_offset}
918
+ nxentry.reduced_data.data.tomo_fields[
919
+ center_stack_index,:,upper_row,:],
920
+ upper_row, thetas, eff_pixel_size, cross_sectional_dim,
921
+ path=self._output_folder, num_core=self._num_core)
922
+ self._logger.info(f'Finding center took {time()-t0:.2f} seconds')
923
+ self._logger.debug(f'upper_row = {upper_row:.2f}')
924
+ self._logger.debug(f'upper_center_offset = {upper_center_offset:.2f}')
925
+
926
+ center_config = {
927
+ 'lower_row': lower_row,
928
+ 'lower_center_offset': lower_center_offset,
929
+ 'upper_row': upper_row,
930
+ 'upper_center_offset': upper_center_offset,
931
+ }
768
932
  if num_tomo_stacks > 1:
769
- center_config['center_stack_index'] = center_stack_index+1 # save as offset 1
933
+ # Save as offset 1
934
+ center_config['center_stack_index'] = center_stack_index+1
770
935
 
771
936
  # Save test data to file
772
- if self.test_mode:
773
- with open(f'{self.output_folder}/center_config.yaml', 'w') as f:
937
+ if self._test_mode:
938
+ with open(f'{self._output_folder}/center_config.yaml', 'w',
939
+ encoding='utf8') as f:
774
940
  safe_dump(center_config, f)
775
941
 
776
- return(center_config)
942
+ return center_config
777
943
 
778
- def reconstruct_data(self, nxroot, center_info, x_bounds=None, y_bounds=None, z_bounds=None):
779
- """Reconstruct the tomography data.
944
+ def reconstruct_data(
945
+ self, nxroot, center_info, x_bounds=None, y_bounds=None,
946
+ z_bounds=None):
780
947
  """
781
- from nexusformat.nexus import NXdata, NXentry, NXprocess, NXroot
782
-
948
+ Reconstruct the tomography data.
949
+
950
+ :param nxroot: Reduced data
951
+ :type data: nexusformat.nexus.NXroot
952
+ :param center_info: Calibrated center axis info
953
+ :type center_info: dict
954
+ :param x_bounds: Reconstructed image bounds in the x-direction
955
+ :type x_bounds: tuple(int, int), list[int], optional
956
+ :param y_bounds: Reconstructed image bounds in the y-direction
957
+ :type y_bounds: tuple(int, int), list[int], optional
958
+ :param z_bounds: Reconstructed image bounds in the z-direction
959
+ :type z_bounds: tuple(int, int), list[int], optional
960
+ :return: Reconstructed tomography data
961
+ :rtype: dict
962
+ """
963
+ # Third party modules
964
+ from nexusformat.nexus import (
965
+ NXdata,
966
+ NXentry,
967
+ NXprocess,
968
+ NXroot,
969
+ )
970
+
971
+ # Local modules
783
972
  from CHAP.common.utils.general import is_int_pair
784
973
 
785
- self.logger.info('Reconstruct the tomography data')
974
+ self._logger.info('Reconstruct the tomography data')
786
975
 
787
976
  if not isinstance(nxroot, NXroot):
788
977
  raise ValueError(f'Invalid parameter nxroot ({nxroot})')
@@ -804,16 +993,9 @@ class Tomo:
804
993
  raise ValueError(f'Invalid parameter z_bounds ({z_bounds})')
805
994
  z_bounds = tuple(z_bounds)
806
995
 
807
- # Create plot galaxy path directory and path if needed
808
- if self.galaxy_flag:
809
- if not os_path.exists('tomo_reconstruct_plots'):
810
- mkdir('tomo_reconstruct_plots')
811
- path = 'tomo_reconstruct_plots'
812
- else:
813
- path = self.output_folder
814
-
815
996
  # Check if reduced data is available
816
- if ('reduced_data' not in nxentry or 'reduced_data' not in nxentry.data):
997
+ if ('reduced_data' not in nxentry
998
+ or 'reduced_data' not in nxentry.data):
817
999
  raise KeyError(f'Unable to find valid reduced data in {nxentry}.')
818
1000
 
819
1001
  # Create an NXprocess to store image reconstruction (meta)data
@@ -824,111 +1006,137 @@ class Tomo:
824
1006
  lower_center_offset = center_info.get('lower_center_offset')
825
1007
  upper_row = center_info.get('upper_row')
826
1008
  upper_center_offset = center_info.get('upper_center_offset')
827
- if (lower_row is None or lower_center_offset is None or upper_row is None or
828
- upper_center_offset is None):
829
- raise KeyError(f'Unable to find valid calibrated center axis info in {center_info}.')
830
- center_slope = (upper_center_offset-lower_center_offset)/(upper_row-lower_row)
1009
+ if (lower_row is None or lower_center_offset is None
1010
+ or upper_row is None or upper_center_offset is None):
1011
+ raise KeyError(
1012
+ 'Unable to find valid calibrated center axis info in '
1013
+ + f'{center_info}.')
1014
+ center_slope = (upper_center_offset-lower_center_offset) \
1015
+ / (upper_row-lower_row)
831
1016
 
832
1017
  # Get thetas (in degrees)
833
1018
  thetas = np.asarray(nxentry.reduced_data.rotation_angle)
834
1019
 
835
1020
  # Reconstruct tomography data
836
- # reduced data axes order: stack,theta,row,column
837
- # reconstructed data order in each stack: row/z,x,y
838
- # Note: Nexus cannot follow a link if the data it points to is too big,
839
- # so get the data from the actual place, not from nxentry.data
1021
+ # reduced data axes order: stack,theta,row,column
1022
+ # reconstructed data order in each stack: row/z,x,y
1023
+ # Note: Nexus can't follow a link if the data it points to is
1024
+ # too big get the data from the actual place, not from
1025
+ # nxentry.data
840
1026
  if 'zoom_perc' in nxentry.reduced_data:
841
1027
  res_title = f'{nxentry.reduced_data.attrs["zoom_perc"]}p'
842
1028
  else:
843
1029
  res_title = 'fullres'
844
- load_error = False
845
1030
  num_tomo_stacks = nxentry.reduced_data.data.tomo_fields.shape[0]
846
1031
  tomo_recon_stacks = num_tomo_stacks*[np.array([])]
847
1032
  for i in range(num_tomo_stacks):
848
- # Convert reduced data stack from theta,row,column to row,theta,column
849
- self.logger.debug(f'Reading reduced data stack {i+1}...')
1033
+ # Convert reduced data stack from theta,row,column to
1034
+ # row,theta,column
850
1035
  t0 = time()
851
1036
  tomo_stack = np.asarray(nxentry.reduced_data.data.tomo_fields[i])
852
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
853
- if len(tomo_stack.shape) != 3 or any(True for dim in tomo_stack.shape if not dim):
854
- raise ValueError(f'Unable to load tomography stack {i+1} for reconstruction')
1037
+ self._logger.info(
1038
+ f'Reading reduced data stack {i+1} took {time()-t0:.2f} '
1039
+ + 'seconds')
1040
+ if (len(tomo_stack.shape) != 3
1041
+ or any(True for dim in tomo_stack.shape if not dim)):
1042
+ raise RuntimeError(
1043
+ f'Unable to load tomography stack {i+1} for '
1044
+ + 'reconstruction')
855
1045
  tomo_stack = np.swapaxes(tomo_stack, 0, 1)
856
- assert(len(thetas) == tomo_stack.shape[1])
857
- assert(0 <= lower_row < upper_row < tomo_stack.shape[0])
858
- center_offsets = [lower_center_offset-lower_row*center_slope,
859
- upper_center_offset+(tomo_stack.shape[0]-1-upper_row)*center_slope]
1046
+ assert len(thetas) == tomo_stack.shape[1]
1047
+ assert 0 <= lower_row < upper_row < tomo_stack.shape[0]
1048
+ center_offsets = [
1049
+ lower_center_offset-lower_row*center_slope,
1050
+ upper_center_offset + center_slope * (
1051
+ tomo_stack.shape[0]-1-upper_row),
1052
+ ]
860
1053
  t0 = time()
861
- self.logger.debug(f'Running _reconstruct_one_tomo_stack on {self.num_core} cores ...')
862
- tomo_recon_stack = self._reconstruct_one_tomo_stack(tomo_stack, thetas,
863
- center_offsets=center_offsets, num_core=self.num_core, algorithm='gridrec')
864
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
865
- self.logger.info(f'Reconstruction of stack {i+1} took {time()-t0:.2f} seconds')
1054
+ tomo_recon_stack = self._reconstruct_one_tomo_stack(
1055
+ tomo_stack, thetas, center_offsets=center_offsets,
1056
+ num_core=self._num_core, algorithm='gridrec')
1057
+ self._logger.info(
1058
+ f'Reconstruction of stack {i+1} took {time()-t0:.2f} seconds')
866
1059
 
867
1060
  # Combine stacks
868
1061
  tomo_recon_stacks[i] = tomo_recon_stack
869
1062
 
870
1063
  # Resize the reconstructed tomography data
871
- # reconstructed data order in each stack: row/z,x,y
872
- if self.test_mode:
873
- x_bounds = tuple(self.test_config.get('x_bounds'))
874
- y_bounds = tuple(self.test_config.get('y_bounds'))
1064
+ # reconstructed data order in each stack: row/z,x,y
1065
+ if self._test_mode:
1066
+ x_bounds = tuple(self._test_config.get('x_bounds'))
1067
+ y_bounds = tuple(self._test_config.get('y_bounds'))
875
1068
  z_bounds = None
876
- elif self.galaxy_flag:
877
- if x_bounds is not None and not is_int_pair(x_bounds, ge=0,
878
- lt=tomo_recon_stacks[0].shape[1]):
1069
+ elif self._interactive:
1070
+ x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(
1071
+ tomo_recon_stacks, x_bounds=x_bounds, y_bounds=y_bounds,
1072
+ z_bounds=z_bounds)
1073
+ else:
1074
+ if x_bounds is None:
1075
+ self._logger.warning(
1076
+ 'x_bounds unspecified, reconstruct data for full x-range')
1077
+ elif not is_int_pair(x_bounds, ge=0,
1078
+ lt=tomo_recon_stacks[0].shape[1]):
879
1079
  raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
880
- if y_bounds is not None and not is_int_pair(y_bounds, ge=0,
881
- lt=tomo_recon_stacks[0].shape[1]):
1080
+ if y_bounds is None:
1081
+ self._logger.warning(
1082
+ 'y_bounds unspecified, reconstruct data for full y-range')
1083
+ elif not is_int_pair(
1084
+ y_bounds, ge=0, lt=tomo_recon_stacks[0].shape[2]):
882
1085
  raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
883
1086
  z_bounds = None
884
- else:
885
- x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(tomo_recon_stacks,
886
- x_bounds=x_bounds, y_bounds=y_bounds, z_bounds=z_bounds)
887
1087
  if x_bounds is None:
888
1088
  x_range = (0, tomo_recon_stacks[0].shape[1])
889
1089
  x_slice = int(x_range[1]/2)
890
1090
  else:
891
1091
  x_range = (min(x_bounds), max(x_bounds))
892
- x_slice = int((x_bounds[0]+x_bounds[1])/2)
1092
+ x_slice = int((x_bounds[0]+x_bounds[1]) / 2)
893
1093
  if y_bounds is None:
894
1094
  y_range = (0, tomo_recon_stacks[0].shape[2])
895
- y_slice = int(y_range[1]/2)
1095
+ y_slice = int(y_range[1] / 2)
896
1096
  else:
897
1097
  y_range = (min(y_bounds), max(y_bounds))
898
- y_slice = int((y_bounds[0]+y_bounds[1])/2)
1098
+ y_slice = int((y_bounds[0]+y_bounds[1]) / 2)
899
1099
  if z_bounds is None:
900
1100
  z_range = (0, tomo_recon_stacks[0].shape[0])
901
- z_slice = int(z_range[1]/2)
1101
+ z_slice = int(z_range[1] / 2)
902
1102
  else:
903
1103
  z_range = (min(z_bounds), max(z_bounds))
904
- z_slice = int((z_bounds[0]+z_bounds[1])/2)
1104
+ z_slice = int((z_bounds[0]+z_bounds[1]) / 2)
905
1105
 
906
1106
  # Plot a few reconstructed image slices
907
- if self.save_figs:
1107
+ if self._save_figs:
908
1108
  for i, stack in enumerate(tomo_recon_stacks):
909
1109
  if num_tomo_stacks == 1:
910
1110
  basetitle = 'recon'
911
1111
  else:
912
1112
  basetitle = f'recon stack {i+1}'
913
1113
  title = f'{basetitle} {res_title} xslice{x_slice}'
914
- quick_imshow(stack[z_range[0]:z_range[1],x_slice,y_range[0]:y_range[1]],
915
- title=title, path=path, save_fig=True, save_only=True)
1114
+ quick_imshow(
1115
+ stack[z_range[0]:z_range[1],x_slice,y_range[0]:y_range[1]],
1116
+ title=title, path=self._output_folder, save_fig=True,
1117
+ save_only=True)
916
1118
  title = f'{basetitle} {res_title} yslice{y_slice}'
917
- quick_imshow(stack[z_range[0]:z_range[1],x_range[0]:x_range[1],y_slice],
918
- title=title, path=path, save_fig=True, save_only=True)
1119
+ quick_imshow(
1120
+ stack[z_range[0]:z_range[1],x_range[0]:x_range[1],y_slice],
1121
+ title=title, path=self._output_folder, save_fig=True,
1122
+ save_only=True)
919
1123
  title = f'{basetitle} {res_title} zslice{z_slice}'
920
- quick_imshow(stack[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
921
- title=title, path=path, save_fig=True, save_only=True)
1124
+ quick_imshow(
1125
+ stack[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
1126
+ title=title, path=self._output_folder, save_fig=True,
1127
+ save_only=True)
922
1128
 
923
1129
  # Save test data to file
924
- # reconstructed data order in each stack: row/z,x,y
925
- if self.test_mode:
1130
+ # reconstructed data order in each stack: row/z,x,y
1131
+ if self._test_mode:
926
1132
  for i, stack in enumerate(tomo_recon_stacks):
927
- np.savetxt(f'{self.output_folder}/recon_stack_{i+1}.txt',
928
- stack[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]], fmt='%.6e')
1133
+ np.savetxt(
1134
+ f'{self._output_folder}/recon_stack_{i+1}.txt',
1135
+ stack[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
1136
+ fmt='%.6e')
929
1137
 
930
1138
  # Add image reconstruction to reconstructed data NXprocess
931
- # reconstructed data order in each stack: row/z,x,y
1139
+ # reconstructed data order in each stack: row/z,x,y
932
1140
  nxprocess.data = NXdata()
933
1141
  nxprocess.attrs['default'] = 'data'
934
1142
  for k, v in center_info.items():
@@ -939,12 +1147,17 @@ class Tomo:
939
1147
  nxprocess.y_bounds = y_bounds
940
1148
  if z_bounds is not None:
941
1149
  nxprocess.z_bounds = z_bounds
942
- nxprocess.data['reconstructed_data'] = np.asarray([stack[z_range[0]:z_range[1],
943
- x_range[0]:x_range[1],y_range[0]:y_range[1]] for stack in tomo_recon_stacks])
1150
+ nxprocess.data['reconstructed_data'] = np.asarray(
1151
+ [stack[z_range[0]:z_range[1],x_range[0]:x_range[1],
1152
+ y_range[0]:y_range[1]] for stack in tomo_recon_stacks])
944
1153
  nxprocess.data.attrs['signal'] = 'reconstructed_data'
945
1154
 
946
- # Create a copy of the input Nexus object and remove reduced data
947
- exclude_items = [f'{nxentry._name}/reduced_data/data', f'{nxentry._name}/data/reduced_data']
1155
+ # Create a copy of the input Nexus object and remove reduced
1156
+ # data
1157
+ exclude_items = [
1158
+ f'{nxentry.nxname}/reduced_data/data',
1159
+ f'{nxentry.nxname}/data/reduced_data',
1160
+ ]
948
1161
  nxroot_copy = nxcopy(nxroot, exclude_nxpaths=exclude_items)
949
1162
 
950
1163
  # Add the reconstructed data NXprocess to the new Nexus object
@@ -953,19 +1166,39 @@ class Tomo:
953
1166
  if 'data' not in nxentry_copy:
954
1167
  nxentry_copy.data = NXdata()
955
1168
  nxentry_copy.attrs['default'] = 'data'
956
- nxentry_copy.data.makelink(nxprocess.data.reconstructed_data, name='reconstructed_data')
1169
+ nxentry_copy.data.makelink(
1170
+ nxprocess.data.reconstructed_data, name='reconstructed_data')
957
1171
  nxentry_copy.data.attrs['signal'] = 'reconstructed_data'
958
1172
 
959
- return(nxroot_copy)
1173
+ return nxroot_copy
960
1174
 
961
- def combine_data(self, nxroot, x_bounds=None, y_bounds=None, z_bounds=None):
1175
+ def combine_data(
1176
+ self, nxroot, x_bounds=None, y_bounds=None, z_bounds=None):
962
1177
  """Combine the reconstructed tomography stacks.
963
- """
964
- from nexusformat.nexus import NXdata, NXentry, NXprocess, NXroot
965
1178
 
1179
+ :param nxroot: A stack of reconstructed tomography datasets
1180
+ :type data: nexusformat.nexus.NXroot
1181
+ :param x_bounds: Combined image bounds in the x-direction
1182
+ :type x_bounds: tuple(int, int), list[int], optional
1183
+ :param y_bounds: Combined image bounds in the y-direction
1184
+ :type y_bounds: tuple(int, int), list[int], optional
1185
+ :param z_bounds: Combined image bounds in the z-direction
1186
+ :type z_bounds: tuple(int, int), list[int], optional
1187
+ :return: Combined reconstructed tomography data
1188
+ :rtype: dict
1189
+ """
1190
+ # Third party modules
1191
+ from nexusformat.nexus import (
1192
+ NXdata,
1193
+ NXentry,
1194
+ NXprocess,
1195
+ NXroot,
1196
+ )
1197
+
1198
+ # Local modules
966
1199
  from CHAP.common.utils.general import is_int_pair
967
1200
 
968
- self.logger.info('Combine the reconstructed tomography stacks')
1201
+ self._logger.info('Combine the reconstructed tomography stacks')
969
1202
 
970
1203
  if not isinstance(nxroot, NXroot):
971
1204
  raise ValueError(f'Invalid parameter nxroot ({nxroot})')
@@ -985,105 +1218,121 @@ class Tomo:
985
1218
  raise ValueError(f'Invalid parameter z_bounds ({z_bounds})')
986
1219
  z_bounds = tuple(z_bounds)
987
1220
 
988
- # Create plot galaxy path directory and path if needed
989
- if self.galaxy_flag:
990
- if not os_path.exists('tomo_combine_plots'):
991
- mkdir('tomo_combine_plots')
992
- path = 'tomo_combine_plots'
993
- else:
994
- path = self.output_folder
995
-
996
1221
  # Check if reconstructed image data is available
997
- if ('reconstructed_data' not in nxentry or 'reconstructed_data' not in nxentry.data):
998
- raise KeyError(f'Unable to find valid reconstructed image data in {nxentry}.')
1222
+ if ('reconstructed_data' not in nxentry
1223
+ or 'reconstructed_data' not in nxentry.data):
1224
+ raise KeyError(
1225
+ f'Unable to find valid reconstructed image data in {nxentry}')
999
1226
 
1000
- # Create an NXprocess to store combined image reconstruction (meta)data
1227
+ # Create an NXprocess to store combined image reconstruction
1228
+ # (meta)data
1001
1229
  nxprocess = NXprocess()
1002
1230
 
1003
1231
  # Get the reconstructed data
1004
- # reconstructed data order: stack,row(z),x,y
1005
- # Note: Nexus cannot follow a link if the data it points to is too big,
1006
- # so get the data from the actual place, not from nxentry.data
1007
- num_tomo_stacks = nxentry.reconstructed_data.data.reconstructed_data.shape[0]
1232
+ # reconstructed data order: stack,row(z),x,y
1233
+ # Note: Nexus can't follow a link if the data it points to is
1234
+ # too big get the data from the actual place, not from
1235
+ # nxentry.data
1236
+ num_tomo_stacks = \
1237
+ nxentry.reconstructed_data.data.reconstructed_data.shape[0]
1008
1238
  if num_tomo_stacks == 1:
1009
- self.logger.info('Only one stack available: leaving combine_data')
1010
- return(None)
1239
+ self._logger.info('Only one stack available: leaving combine_data')
1240
+ return None
1011
1241
 
1012
1242
  # Combine the reconstructed stacks
1013
- # (load one stack at a time to reduce risk of hitting Nexus data access limit)
1243
+ # (load one stack at a time to reduce risk of hitting Nexus
1244
+ # data access limit)
1014
1245
  t0 = time()
1015
- self.logger.debug(f'Combining the reconstructed stacks ...')
1016
- tomo_recon_combined = np.asarray(nxentry.reconstructed_data.data.reconstructed_data[0])
1246
+ tomo_recon_combined = np.asarray(
1247
+ nxentry.reconstructed_data.data.reconstructed_data[0])
1017
1248
  if num_tomo_stacks > 2:
1018
- tomo_recon_combined = np.concatenate([tomo_recon_combined]+
1019
- [nxentry.reconstructed_data.data.reconstructed_data[i]
1020
- for i in range(1, num_tomo_stacks-1)])
1249
+ tomo_recon_combined = np.concatenate(
1250
+ [tomo_recon_combined]
1251
+ + [nxentry.reconstructed_data.data.reconstructed_data[i]
1252
+ for i in range(1, num_tomo_stacks-1)])
1021
1253
  if num_tomo_stacks > 1:
1022
- tomo_recon_combined = np.concatenate([tomo_recon_combined]+
1023
- [nxentry.reconstructed_data.data.reconstructed_data[num_tomo_stacks-1]])
1024
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1025
- self.logger.info(f'Combining the reconstructed stacks took {time()-t0:.2f} seconds')
1254
+ tomo_recon_combined = np.concatenate(
1255
+ [tomo_recon_combined]
1256
+ + [nxentry.reconstructed_data.data.reconstructed_data[
1257
+ num_tomo_stacks-1]])
1258
+ self._logger.info(
1259
+ f'Combining the reconstructed stacks took {time()-t0:.2f} seconds')
1026
1260
 
1027
1261
  # Resize the combined tomography data stacks
1028
- # combined data order: row/z,x,y
1029
- if self.test_mode:
1262
+ # combined data order: row/z,x,y
1263
+ if self._test_mode:
1030
1264
  x_bounds = None
1031
1265
  y_bounds = None
1032
- z_bounds = tuple(self.test_config.get('z_bounds'))
1033
- elif self.galaxy_flag:
1034
- if x_bounds is not None and not is_int_pair(x_bounds, ge=0,
1035
- lt=tomo_recon_stacks[0].shape[1]):
1036
- raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
1037
- if y_bounds is not None and not is_int_pair(y_bounds, ge=0,
1038
- lt=tomo_recon_stacks[0].shape[1]):
1039
- raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
1040
- z_bounds = None
1041
- else:
1266
+ z_bounds = tuple(self._test_config.get('z_bounds'))
1267
+ elif self._interactive:
1042
1268
  if x_bounds is None and x_bounds in nxentry.reconstructed_data:
1043
1269
  x_bounds = (-1, -1)
1044
1270
  if y_bounds is None and y_bounds in nxentry.reconstructed_data:
1045
1271
  y_bounds = (-1, -1)
1046
- x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(tomo_recon_combined,
1047
- z_only=True)
1272
+ x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(
1273
+ tomo_recon_combined, z_only=True)
1274
+ else:
1275
+ if x_bounds is None:
1276
+ self._logger.warning(
1277
+ 'x_bounds unspecified, reconstruct data for full x-range')
1278
+ elif not is_int_pair(
1279
+ x_bounds, ge=0, lt=tomo_recon_combined.shape[1]):
1280
+ raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
1281
+ if y_bounds is None:
1282
+ self._logger.warning(
1283
+ 'y_bounds unspecified, reconstruct data for full y-range')
1284
+ elif not is_int_pair(
1285
+ y_bounds, ge=0, lt=tomo_recon_combined.shape[2]):
1286
+ raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
1287
+ z_bounds = None
1048
1288
  if x_bounds is None:
1049
1289
  x_range = (0, tomo_recon_combined.shape[1])
1050
1290
  x_slice = int(x_range[1]/2)
1051
1291
  else:
1052
1292
  x_range = x_bounds
1053
- x_slice = int((x_bounds[0]+x_bounds[1])/2)
1293
+ x_slice = int((x_bounds[0]+x_bounds[1]) / 2)
1054
1294
  if y_bounds is None:
1055
1295
  y_range = (0, tomo_recon_combined.shape[2])
1056
1296
  y_slice = int(y_range[1]/2)
1057
1297
  else:
1058
1298
  y_range = y_bounds
1059
- y_slice = int((y_bounds[0]+y_bounds[1])/2)
1299
+ y_slice = int((y_bounds[0]+y_bounds[1]) / 2)
1060
1300
  if z_bounds is None:
1061
1301
  z_range = (0, tomo_recon_combined.shape[0])
1062
1302
  z_slice = int(z_range[1]/2)
1063
1303
  else:
1064
1304
  z_range = z_bounds
1065
- z_slice = int((z_bounds[0]+z_bounds[1])/2)
1305
+ z_slice = int((z_bounds[0]+z_bounds[1]) / 2)
1066
1306
 
1067
1307
  # Plot a few combined image slices
1068
- if self.save_figs:
1069
- quick_imshow(tomo_recon_combined[z_range[0]:z_range[1],x_slice,y_range[0]:y_range[1]],
1070
- title=f'recon combined xslice{x_slice}', path=path, save_fig=True,
1071
- save_only=True)
1072
- quick_imshow(tomo_recon_combined[z_range[0]:z_range[1],x_range[0]:x_range[1],y_slice],
1073
- title=f'recon combined yslice{y_slice}', path=path, save_fig=True,
1074
- save_only=True)
1075
- quick_imshow(tomo_recon_combined[z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
1076
- title=f'recon combined zslice{z_slice}', path=path, save_fig=True,
1077
- save_only=True)
1308
+ if self._save_figs:
1309
+ quick_imshow(
1310
+ tomo_recon_combined[
1311
+ z_range[0]:z_range[1],x_slice,y_range[0]:y_range[1]],
1312
+ title=f'recon combined xslice{x_slice}',
1313
+ path=self._output_folder, save_fig=True, save_only=True)
1314
+ quick_imshow(
1315
+ tomo_recon_combined[
1316
+ z_range[0]:z_range[1],x_range[0]:x_range[1],y_slice],
1317
+ title=f'recon combined yslice{y_slice}',
1318
+ path=self._output_folder, save_fig=True, save_only=True)
1319
+ quick_imshow(
1320
+ tomo_recon_combined[
1321
+ z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
1322
+ title=f'recon combined zslice{z_slice}',
1323
+ path=self._output_folder, save_fig=True, save_only=True)
1078
1324
 
1079
1325
  # Save test data to file
1080
- # combined data order: row/z,x,y
1081
- if self.test_mode:
1082
- np.savetxt(f'{self.output_folder}/recon_combined.txt', tomo_recon_combined[
1083
- z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]], fmt='%.6e')
1326
+ # combined data order: row/z,x,y
1327
+ if self._test_mode:
1328
+ np.savetxt(
1329
+ f'{self._output_folder}/recon_combined.txt',
1330
+ tomo_recon_combined[
1331
+ z_slice,x_range[0]:x_range[1],y_range[0]:y_range[1]],
1332
+ fmt='%.6e')
1084
1333
 
1085
1334
  # Add image reconstruction to reconstructed data NXprocess
1086
- # combined data order: row/z,x,y
1335
+ # combined data order: row/z,x,y
1087
1336
  nxprocess.data = NXdata()
1088
1337
  nxprocess.attrs['default'] = 'data'
1089
1338
  if x_bounds is not None:
@@ -1093,12 +1342,15 @@ class Tomo:
1093
1342
  if z_bounds is not None:
1094
1343
  nxprocess.z_bounds = z_bounds
1095
1344
  nxprocess.data['combined_data'] = tomo_recon_combined[
1096
- z_range[0]:z_range[1],x_range[0]:x_range[1],y_range[0]:y_range[1]]
1345
+ z_range[0]:z_range[1],x_range[0]:x_range[1],y_range[0]:y_range[1]]
1097
1346
  nxprocess.data.attrs['signal'] = 'combined_data'
1098
1347
 
1099
- # Create a copy of the input Nexus object and remove reconstructed data
1100
- exclude_items = [f'{nxentry._name}/reconstructed_data/data',
1101
- f'{nxentry._name}/data/reconstructed_data']
1348
+ # Create a copy of the input Nexus object and remove
1349
+ # reconstructed data
1350
+ exclude_items = [
1351
+ f'{nxentry.nxname}/reconstructed_data/data',
1352
+ f'{nxentry.nxname}/data/reconstructed_data',
1353
+ ]
1102
1354
  nxroot_copy = nxcopy(nxroot, exclude_nxpaths=exclude_items)
1103
1355
 
1104
1356
  # Add the combined data NXprocess to the new Nexus object
@@ -1107,39 +1359,51 @@ class Tomo:
1107
1359
  if 'data' not in nxentry_copy:
1108
1360
  nxentry_copy.data = NXdata()
1109
1361
  nxentry_copy.attrs['default'] = 'data'
1110
- nxentry_copy.data.makelink(nxprocess.data.combined_data, name='combined_data')
1362
+ nxentry_copy.data.makelink(
1363
+ nxprocess.data.combined_data, name='combined_data')
1111
1364
  nxentry_copy.data.attrs['signal'] = 'combined_data'
1112
1365
 
1113
- return(nxroot_copy)
1366
+ return nxroot_copy
1114
1367
 
1115
1368
  def _gen_dark(self, nxentry, reduced_data):
1116
- """Generate dark field.
1117
- """
1369
+ """Generate dark field."""
1370
+ # Third party modules
1118
1371
  from nexusformat.nexus import NXdata
1119
1372
 
1120
- from CHAP.common.models.map import get_scanparser, import_scanparser
1373
+ # Local modules
1374
+ from CHAP.common.models.map import (
1375
+ get_scanparser,
1376
+ import_scanparser,
1377
+ )
1121
1378
 
1122
1379
  # Get the dark field images
1123
1380
  image_key = nxentry.instrument.detector.get('image_key', None)
1124
1381
  if image_key and 'data' in nxentry.instrument.detector:
1125
- field_indices = [index for index, key in enumerate(image_key) if key == 2]
1382
+ field_indices = [
1383
+ index for index, key in enumerate(image_key) if key == 2]
1126
1384
  tdf_stack = nxentry.instrument.detector.data[field_indices,:,:]
1127
- # RV the default NXtomo form does not accomodate bright or dark field stacks
1385
+ # RV the default NXtomo form does not accomodate dark field
1386
+ # stacks
1128
1387
  else:
1129
- import_scanparser(nxentry.instrument.source.attrs['station'],
1130
- nxentry.instrument.source.attrs['experiment_type'])
1388
+ import_scanparser(
1389
+ nxentry.instrument.source.attrs['station'],
1390
+ nxentry.instrument.source.attrs['experiment_type'])
1131
1391
  dark_field_scans = nxentry.spec_scans.dark_field
1132
1392
  detector_prefix = str(nxentry.instrument.detector.local_name)
1133
1393
  tdf_stack = []
1134
1394
  for nxsubentry_name, nxsubentry in dark_field_scans.items():
1135
1395
  scan_number = int(nxsubentry_name.split('_')[-1])
1136
- scanparser = get_scanparser(dark_field_scans.attrs['spec_file'], scan_number)
1137
- image_offset = int(nxsubentry.instrument.detector.frame_start_number)
1396
+ scanparser = get_scanparser(
1397
+ dark_field_scans.attrs['spec_file'], scan_number)
1398
+ image_offset = int(
1399
+ nxsubentry.instrument.detector.frame_start_number)
1138
1400
  num_image = len(nxsubentry.sample.rotation_angle)
1139
- tdf_stack.append(scanparser.get_detector_data(detector_prefix,
1401
+ tdf_stack.append(
1402
+ scanparser.get_detector_data(
1403
+ detector_prefix,
1140
1404
  (image_offset, image_offset+num_image)))
1141
1405
  if isinstance(tdf_stack, list):
1142
- assert(len(tdf_stack) == 1) # TODO
1406
+ assert len(tdf_stack) == 1 # RV
1143
1407
  tdf_stack = tdf_stack[0]
1144
1408
 
1145
1409
  # Take median
@@ -1149,137 +1413,153 @@ class Tomo:
1149
1413
  tdf = np.median(tdf_stack, axis=0)
1150
1414
  del tdf_stack
1151
1415
  else:
1152
- raise ValueError(f'Invalid tdf_stack shape ({tdf_stack.shape})')
1416
+ raise RuntimeError(f'Invalid tdf_stack shape ({tdf_stack.shape})')
1153
1417
 
1154
1418
  # Remove dark field intensities above the cutoff
1155
- #RV tdf_cutoff = None
1156
- tdf_cutoff = tdf.min()+2*(np.median(tdf)-tdf.min())
1157
- self.logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1419
+ # tdf_cutoff = None
1420
+ tdf_cutoff = tdf.min() + 2 * (np.median(tdf)-tdf.min())
1421
+ self._logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1158
1422
  if tdf_cutoff is not None:
1159
1423
  if not isinstance(tdf_cutoff, (int, float)) or tdf_cutoff < 0:
1160
- self.logger.warning(f'Ignoring illegal value of tdf_cutoff {tdf_cutoff}')
1424
+ self._logger.warning(
1425
+ f'Ignoring illegal value of tdf_cutoff {tdf_cutoff}')
1161
1426
  else:
1162
1427
  tdf[tdf > tdf_cutoff] = np.nan
1163
- self.logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1428
+ self._logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1164
1429
 
1165
1430
  # Remove nans
1166
1431
  tdf_mean = np.nanmean(tdf)
1167
- self.logger.debug(f'tdf_mean = {tdf_mean}')
1168
- np.nan_to_num(tdf, copy=False, nan=tdf_mean, posinf=tdf_mean, neginf=0.)
1432
+ self._logger.debug(f'tdf_mean = {tdf_mean}')
1433
+ np.nan_to_num(
1434
+ tdf, copy=False, nan=tdf_mean, posinf=tdf_mean, neginf=0.0)
1169
1435
 
1170
1436
  # Plot dark field
1171
- if self.save_figs:
1172
- if self.galaxy_flag:
1173
- quick_imshow(tdf, title='dark field', path='tomo_reduce_plots', save_fig=True,
1174
- save_only=True)
1175
- else:
1176
- quick_imshow(tdf, title='dark field', path=self.output_folder, save_fig=True,
1177
- save_only=True)
1437
+ if self._save_figs:
1438
+ quick_imshow(
1439
+ tdf, title='dark field', path=self._output_folder,
1440
+ save_fig=True, save_only=True)
1178
1441
 
1179
1442
  # Add dark field to reduced data NXprocess
1180
1443
  reduced_data.data = NXdata()
1181
1444
  reduced_data.data['dark_field'] = tdf
1182
1445
 
1183
- return(reduced_data)
1446
+ return reduced_data
1184
1447
 
1185
1448
  def _gen_bright(self, nxentry, reduced_data):
1186
- """Generate bright field.
1187
- """
1449
+ """Generate bright field."""
1450
+ # Third party modules
1188
1451
  from nexusformat.nexus import NXdata
1189
1452
 
1190
- from CHAP.common.models.map import get_scanparser, import_scanparser
1453
+ # Local modules
1454
+ from CHAP.common.models.map import (
1455
+ get_scanparser,
1456
+ import_scanparser,
1457
+ )
1191
1458
 
1192
1459
  # Get the bright field images
1193
1460
  image_key = nxentry.instrument.detector.get('image_key', None)
1194
1461
  if image_key and 'data' in nxentry.instrument.detector:
1195
- field_indices = [index for index, key in enumerate(image_key) if key == 1]
1462
+ field_indices = [
1463
+ index for index, key in enumerate(image_key) if key == 1]
1196
1464
  tbf_stack = nxentry.instrument.detector.data[field_indices,:,:]
1197
- # RV the default NXtomo form does not accomodate bright or dark field stacks
1465
+ # RV the default NXtomo form does not accomodate bright
1466
+ # field stacks
1198
1467
  else:
1199
- import_scanparser(nxentry.instrument.source.attrs['station'],
1200
- nxentry.instrument.source.attrs['experiment_type'])
1468
+ import_scanparser(
1469
+ nxentry.instrument.source.attrs['station'],
1470
+ nxentry.instrument.source.attrs['experiment_type'])
1201
1471
  bright_field_scans = nxentry.spec_scans.bright_field
1202
1472
  detector_prefix = str(nxentry.instrument.detector.local_name)
1203
1473
  tbf_stack = []
1204
1474
  for nxsubentry_name, nxsubentry in bright_field_scans.items():
1205
1475
  scan_number = int(nxsubentry_name.split('_')[-1])
1206
- scanparser = get_scanparser(bright_field_scans.attrs['spec_file'], scan_number)
1207
- image_offset = int(nxsubentry.instrument.detector.frame_start_number)
1476
+ scanparser = get_scanparser(
1477
+ bright_field_scans.attrs['spec_file'], scan_number)
1478
+ image_offset = int(
1479
+ nxsubentry.instrument.detector.frame_start_number)
1208
1480
  num_image = len(nxsubentry.sample.rotation_angle)
1209
- tbf_stack.append(scanparser.get_detector_data(detector_prefix,
1481
+ tbf_stack.append(
1482
+ scanparser.get_detector_data(
1483
+ detector_prefix,
1210
1484
  (image_offset, image_offset+num_image)))
1211
1485
  if isinstance(tbf_stack, list):
1212
- assert(len(tbf_stack) == 1) # TODO
1486
+ assert len(tbf_stack) == 1 # RV
1213
1487
  tbf_stack = tbf_stack[0]
1214
1488
 
1215
1489
  # Take median if more than one image
1216
- """Median or mean: It may be best to try the median because of some image
1217
- artifacts that arise due to crinkles in the upstream kapton tape windows
1218
- causing some phase contrast images to appear on the detector.
1219
- One thing that also may be useful in a future implementation is to do a
1220
- brightfield adjustment on EACH frame of the tomo based on a ROI in the
1221
- corner of the frame where there is no sample but there is the direct X-ray
1222
- beam because there is frame to frame fluctuations from the incoming beam.
1223
- We don’t typically account for them but potentially could.
1224
- """
1225
- from nexusformat.nexus import NXdata
1226
-
1490
+ #
1491
+ # Median or mean: It may be best to try the median because of
1492
+ # some image artifacts that arise due to crinkles in the
1493
+ # upstream kapton tape windows causing some phase contrast
1494
+ # images to appear on the detector.
1495
+ #
1496
+ # One thing that also may be useful in a future implementation
1497
+ # is to do a brightfield adjustment on EACH frame of the tomo
1498
+ # based on a ROI in the corner of the frame where there is no
1499
+ # sample but there is the direct X-ray beam because there is
1500
+ # frame to frame fluctuations from the incoming beam. We don’t
1501
+ # typically account for them but potentially could.
1227
1502
  if tbf_stack.ndim == 2:
1228
1503
  tbf = tbf_stack
1229
1504
  elif tbf_stack.ndim == 3:
1230
1505
  tbf = np.median(tbf_stack, axis=0)
1231
1506
  del tbf_stack
1232
1507
  else:
1233
- raise ValueError(f'Invalid tbf_stack shape ({tbf_stacks.shape})')
1508
+ raise RuntimeError(f'Invalid tbf_stack shape ({tbf_stack.shape})')
1234
1509
 
1235
1510
  # Subtract dark field
1236
1511
  if 'data' in reduced_data and 'dark_field' in reduced_data.data:
1237
1512
  tbf -= reduced_data.data.dark_field
1238
1513
  else:
1239
- self.logger.warning('Dark field unavailable')
1514
+ self._logger.warning('Dark field unavailable')
1240
1515
 
1241
1516
  # Set any non-positive values to one
1242
1517
  # (avoid negative bright field values for spikes in dark field)
1243
1518
  tbf[tbf < 1] = 1
1244
1519
 
1245
1520
  # Plot bright field
1246
- if self.save_figs:
1247
- if self.galaxy_flag:
1248
- quick_imshow(tbf, title='bright field', path='tomo_reduce_plots', save_fig=True,
1249
- save_only=True)
1250
- else:
1251
- quick_imshow(tbf, title='bright field', path=self.output_folder, save_fig=True,
1252
- save_only=True)
1521
+ if self._save_figs:
1522
+ quick_imshow(
1523
+ tbf, title='bright field', path=self._output_folder,
1524
+ save_fig=True, save_only=True)
1253
1525
 
1254
1526
  # Add bright field to reduced data NXprocess
1255
- if 'data' not in reduced_data:
1527
+ if 'data' not in reduced_data:
1256
1528
  reduced_data.data = NXdata()
1257
1529
  reduced_data.data['bright_field'] = tbf
1258
1530
 
1259
- return(reduced_data)
1531
+ return reduced_data
1260
1532
 
1261
1533
  def _set_detector_bounds(self, nxentry, reduced_data, img_x_bounds=None):
1262
- """Set vertical detector bounds for each image stack.
1263
- Right now the range is the same for each set in the image stack.
1264
1534
  """
1265
- from CHAP.common.models.map import get_scanparser, import_scanparser
1535
+ Set vertical detector bounds for each image stack.Right now the
1536
+ range is the same for each set in the image stack.
1537
+ """
1538
+ # Local modules
1539
+ from CHAP.common.models.map import (
1540
+ get_scanparser,
1541
+ import_scanparser,
1542
+ )
1266
1543
  from CHAP.common.utils.general import is_index_range
1267
1544
 
1268
- if self.test_mode:
1269
- return(tuple(self.test_config['img_x_bounds']))
1545
+ if self._test_mode:
1546
+ return tuple(self._test_config['img_x_bounds'])
1270
1547
 
1271
1548
  # Get the first tomography image and the reference heights
1272
1549
  image_key = nxentry.instrument.detector.get('image_key', None)
1273
1550
  if image_key and 'data' in nxentry.instrument.detector:
1274
- field_indices = [index for index, key in enumerate(image_key) if key == 0]
1275
- first_image = np.asarray(nxentry.instrument.detector.data[field_indices[0],:,:])
1551
+ field_indices = [
1552
+ index for index, key in enumerate(image_key) if key == 0]
1553
+ first_image = np.asarray(
1554
+ nxentry.instrument.detector.data[field_indices[0],:,:])
1276
1555
  theta = float(nxentry.sample.rotation_angle[field_indices[0]])
1277
1556
  z_translation_all = nxentry.sample.z_translation[field_indices]
1278
1557
  vertical_shifts = sorted(list(set(z_translation_all)))
1279
1558
  num_tomo_stacks = len(vertical_shifts)
1280
1559
  else:
1281
- import_scanparser(nxentry.instrument.source.attrs['station'],
1282
- nxentry.instrument.source.attrs['experiment_type'])
1560
+ import_scanparser(
1561
+ nxentry.instrument.source.attrs['station'],
1562
+ nxentry.instrument.source.attrs['experiment_type'])
1283
1563
  tomo_field_scans = nxentry.spec_scans.tomo_fields
1284
1564
  num_tomo_stacks = len(tomo_field_scans.keys())
1285
1565
  center_stack_index = int(num_tomo_stacks/2)
@@ -1287,20 +1567,29 @@ class Tomo:
1287
1567
  vertical_shifts = []
1288
1568
  for i, nxsubentry in enumerate(tomo_field_scans.items()):
1289
1569
  scan_number = int(nxsubentry[0].split('_')[-1])
1290
- scanparser = get_scanparser(tomo_field_scans.attrs['spec_file'], scan_number)
1291
- image_offset = int(nxsubentry[1].instrument.detector.frame_start_number)
1570
+ scanparser = get_scanparser(
1571
+ tomo_field_scans.attrs['spec_file'], scan_number)
1572
+ image_offset = int(
1573
+ nxsubentry[1].instrument.detector.frame_start_number)
1292
1574
  vertical_shifts.append(nxsubentry[1].sample.z_translation)
1293
1575
  if i == center_stack_index:
1294
- first_image = scanparser.get_detector_data(detector_prefix, image_offset)
1576
+ first_image = scanparser.get_detector_data(
1577
+ detector_prefix, image_offset)
1295
1578
  theta = float(nxsubentry[1].sample.rotation_angle[0])
1296
1579
 
1297
1580
  # Select image bounds
1298
- title = f'tomography image at theta={round(theta, 2)+0}'
1581
+ title = f'tomography image at theta = {round(theta, 2)+0}'
1299
1582
  if img_x_bounds is not None:
1300
- if not is_index_range(img_x_bounds, ge=0, le=first_image.shape[0]):
1301
- raise ValueError(f'Invalid parameter img_x_bounds ({img_x_bounds})')
1302
- #RV TODO make interactive upon request?
1303
- return(img_x_bounds)
1583
+ if is_index_range(img_x_bounds, ge=0, le=first_image.shape[0]):
1584
+ return img_x_bounds
1585
+ if self._interactive:
1586
+ self._logger.warning(
1587
+ f'Invalid parameter img_x_bounds ({img_x_bounds}), '
1588
+ + 'ignoring img_x_bounds')
1589
+ img_x_bounds = None
1590
+ else:
1591
+ raise ValueError(
1592
+ f'Invalid parameter img_x_bounds ({img_x_bounds})')
1304
1593
  if nxentry.instrument.source.attrs['station'] in ('id1a3', 'id3a'):
1305
1594
  pixel_size = nxentry.instrument.detector.x_pixel_size
1306
1595
  # Try to get a fit from the bright field
@@ -1309,51 +1598,59 @@ class Tomo:
1309
1598
  x_sum = np.sum(tbf, 1)
1310
1599
  x_sum_min = x_sum.min()
1311
1600
  x_sum_max = x_sum.max()
1312
- fit = Fit.fit_data(x_sum, 'rectangle', x=np.array(range(len(x_sum))), form='atan',
1313
- guess=True)
1601
+ fit = Fit.fit_data(
1602
+ x_sum, 'rectangle', x=np.array(range(len(x_sum))),
1603
+ form='atan', guess=True)
1314
1604
  parameters = fit.best_values
1315
1605
  x_low_fit = parameters.get('center1', None)
1316
1606
  x_upp_fit = parameters.get('center2', None)
1317
1607
  sig_low = parameters.get('sigma1', None)
1318
1608
  sig_upp = parameters.get('sigma2', None)
1319
- have_fit = fit.success and x_low_fit is not None and x_upp_fit is not None and \
1320
- sig_low is not None and sig_upp is not None and \
1321
- 0 <= x_low_fit < x_upp_fit <= x_sum.size and \
1322
- (sig_low+sig_upp)/(x_upp_fit-x_low_fit) < 0.1
1609
+ have_fit = (fit.success and x_low_fit is not None
1610
+ and x_upp_fit is not None and sig_low is not None
1611
+ and sig_upp is not None
1612
+ and 0 <= x_low_fit < x_upp_fit <= x_sum.size
1613
+ and (sig_low+sig_upp) / (x_upp_fit-x_low_fit) < 0.1)
1323
1614
  if have_fit:
1324
1615
  # Set a 5% margin on each side
1325
- margin = 0.05*(x_upp_fit-x_low_fit)
1616
+ margin = 0.05 * (x_upp_fit-x_low_fit)
1326
1617
  x_low_fit = max(0, x_low_fit-margin)
1327
1618
  x_upp_fit = min(tbf_shape[0], x_upp_fit+margin)
1328
1619
  if num_tomo_stacks == 1:
1329
1620
  if have_fit:
1330
- # Set the default range to enclose the full fitted window
1621
+ # Set the default range to enclose the full fitted
1622
+ # window
1331
1623
  x_low = int(x_low_fit)
1332
1624
  x_upp = int(x_upp_fit)
1333
1625
  else:
1334
- # Center a default range of 1 mm (RV: can we get this from the slits?)
1335
- num_x_min = int((1.0-0.5*pixel_size)/pixel_size)
1336
- x_low = int(0.5*(tbf_shape[0]-num_x_min))
1626
+ # Center a default range of 1 mm
1627
+ # RV can we get this from the slits?
1628
+ num_x_min = int((1.0 - 0.5*pixel_size) / pixel_size)
1629
+ x_low = int((tbf_shape[0]-num_x_min) / 2)
1337
1630
  x_upp = x_low+num_x_min
1338
1631
  else:
1339
1632
  # Get the default range from the reference heights
1340
1633
  delta_z = vertical_shifts[1]-vertical_shifts[0]
1341
1634
  for i in range(2, num_tomo_stacks):
1342
- delta_z = min(delta_z, vertical_shifts[i]-vertical_shifts[i-1])
1343
- self.logger.debug(f'delta_z = {delta_z}')
1344
- num_x_min = int((delta_z-0.5*pixel_size)/pixel_size)
1345
- self.logger.debug(f'num_x_min = {num_x_min}')
1635
+ delta_z = min(
1636
+ delta_z, vertical_shifts[i]-vertical_shifts[i-1])
1637
+ self._logger.debug(f'delta_z = {delta_z}')
1638
+ num_x_min = int((delta_z - 0.5*pixel_size) / pixel_size)
1639
+ self._logger.debug(f'num_x_min = {num_x_min}')
1346
1640
  if num_x_min > tbf_shape[0]:
1347
- self.logger.warning('Image bounds and pixel size prevent seamless stacking')
1641
+ self._logger.warning(
1642
+ 'Image bounds and pixel size prevent seamless '
1643
+ + 'stacking')
1348
1644
  if have_fit:
1349
- # Center the default range relative to the fitted window
1350
- x_low = int(0.5*(x_low_fit+x_upp_fit-num_x_min))
1645
+ # Center the default range relative to the fitted
1646
+ # window
1647
+ x_low = int((x_low_fit+x_upp_fit-num_x_min) / 2)
1351
1648
  x_upp = x_low+num_x_min
1352
1649
  else:
1353
1650
  # Center the default range
1354
- x_low = int(0.5*(tbf_shape[0]-num_x_min))
1651
+ x_low = int((tbf_shape[0]-num_x_min) / 2)
1355
1652
  x_upp = x_low+num_x_min
1356
- if self.galaxy_flag:
1653
+ if not self._interactive:
1357
1654
  img_x_bounds = (x_low, x_upp)
1358
1655
  else:
1359
1656
  tmp = np.copy(tbf)
@@ -1367,13 +1664,14 @@ class Tomo:
1367
1664
  tmp[x_upp-1,:] = tmp_max
1368
1665
  quick_imshow(tmp, title=title)
1369
1666
  del tmp
1370
- quick_plot((range(x_sum.size), x_sum),
1371
- ([x_low, x_low], [x_sum_min, x_sum_max], 'r-'),
1372
- ([x_upp, x_upp], [x_sum_min, x_sum_max], 'r-'),
1373
- title='sum over theta and y')
1667
+ quick_plot(
1668
+ (range(x_sum.size), x_sum),
1669
+ ([x_low, x_low], [x_sum_min, x_sum_max], 'r-'),
1670
+ ([x_upp, x_upp], [x_sum_min, x_sum_max], 'r-'),
1671
+ title='sum over theta and y')
1374
1672
  print(f'lower bound = {x_low} (inclusive)')
1375
1673
  print(f'upper bound = {x_upp} (exclusive)]')
1376
- accept = input_yesno('Accept these bounds (y/n)?', 'y')
1674
+ accept = input_yesno('Accept these bounds (y/n)?', 'y')
1377
1675
  clear_imshow('bright field')
1378
1676
  clear_imshow(title)
1379
1677
  clear_plot('sum over theta and y')
@@ -1381,106 +1679,134 @@ class Tomo:
1381
1679
  img_x_bounds = (x_low, x_upp)
1382
1680
  else:
1383
1681
  while True:
1384
- mask, img_x_bounds = draw_mask_1d(x_sum, title='select x data range',
1385
- legend='sum over theta and y')
1682
+ _, img_x_bounds = draw_mask_1d(
1683
+ x_sum, title='select x data range',
1684
+ legend='sum over theta and y')
1386
1685
  if len(img_x_bounds) == 1:
1387
1686
  break
1388
- else:
1389
- print(f'Choose a single connected data range')
1687
+ print('Choose a single connected data range')
1390
1688
  img_x_bounds = tuple(img_x_bounds[0])
1391
- if (num_tomo_stacks > 1 and img_x_bounds[1]-img_x_bounds[0]+1 <
1392
- int((delta_z-0.5*pixel_size)/pixel_size)):
1393
- self.logger.warning('Image bounds and pixel size prevent seamless stacking')
1689
+ if (num_tomo_stacks > 1
1690
+ and (img_x_bounds[1]-img_x_bounds[0]+1)
1691
+ < int((delta_z - 0.5*pixel_size) / pixel_size)):
1692
+ self._logger.warning(
1693
+ 'Image bounds and pixel size prevent seamless stacking')
1394
1694
  else:
1395
1695
  if num_tomo_stacks > 1:
1396
- raise NotImplementedError('Selecting image bounds for multiple stacks on FMB')
1696
+ raise NotImplementedError(
1697
+ 'Selecting image bounds for multiple stacks on FMB')
1397
1698
  # For FMB: use the first tomography image to select range
1398
- # RV: revisit if they do tomography with multiple stacks
1699
+ # RV revisit if they do tomography with multiple stacks
1399
1700
  x_sum = np.sum(first_image, 1)
1400
1701
  x_sum_min = x_sum.min()
1401
1702
  x_sum_max = x_sum.max()
1402
- if self.galaxy_flag:
1703
+ if self._interactive:
1704
+ print(
1705
+ 'Select vertical data reduction range from first '
1706
+ + 'tomography image')
1707
+ img_x_bounds = select_image_bounds(first_image, 0, title=title)
1403
1708
  if img_x_bounds is None:
1404
- img_x_bounds = (0, first_image.shape[0])
1709
+ raise RuntimeError('Unable to select image bounds')
1405
1710
  else:
1406
- print('Select vertical data reduction range from first tomography image')
1407
- img_x_bounds = select_image_bounds(first_image, 0, title=title)
1408
1711
  if img_x_bounds is None:
1409
- raise ValueError('Unable to select image bounds')
1712
+ self._logger.warning(
1713
+ 'img_x_bounds unspecified, reduce data for entire '
1714
+ + 'detector range')
1715
+ img_x_bounds = (0, first_image.shape[0])
1410
1716
 
1411
1717
  # Plot results
1412
- if self.save_figs:
1413
- if self.galaxy_flag:
1414
- path = 'tomo_reduce_plots'
1415
- else:
1416
- path = self.output_folder
1718
+ if self._save_figs:
1417
1719
  x_low = img_x_bounds[0]
1418
1720
  x_upp = img_x_bounds[1]
1419
1721
  tmp = np.copy(first_image)
1420
1722
  tmp_max = tmp.max()
1421
1723
  tmp[x_low,:] = tmp_max
1422
1724
  tmp[x_upp-1,:] = tmp_max
1423
- quick_imshow(tmp, title=title, path=path, save_fig=True, save_only=True)
1424
- quick_plot((range(x_sum.size), x_sum),
1425
- ([x_low, x_low], [x_sum_min, x_sum_max], 'r-'),
1426
- ([x_upp, x_upp], [x_sum_min, x_sum_max], 'r-'),
1427
- title='sum over theta and y', path=path, save_fig=True, save_only=True)
1725
+ quick_imshow(
1726
+ tmp, title=title, path=self._output_folder, save_fig=True,
1727
+ save_only=True)
1728
+ quick_plot(
1729
+ (range(x_sum.size), x_sum),
1730
+ ([x_low, x_low], [x_sum_min, x_sum_max], 'r-'),
1731
+ ([x_upp, x_upp], [x_sum_min, x_sum_max], 'r-'),
1732
+ title='sum over theta and y', path=self._output_folder,
1733
+ save_fig=True, save_only=True)
1428
1734
  del tmp
1429
1735
 
1430
- return(img_x_bounds)
1736
+ return img_x_bounds
1431
1737
 
1432
1738
  def _set_zoom_or_skip(self):
1433
- """Set zoom and/or theta skip to reduce memory the requirement for the analysis.
1434
1739
  """
1435
- # if input_yesno('\nDo you want to zoom in to reduce memory requirement (y/n)?', 'n'):
1436
- # zoom_perc = input_int(' Enter zoom percentage', ge=1, le=100)
1740
+ Set zoom and/or theta skip to reduce memory the requirement
1741
+ for the analysis.
1742
+ """
1743
+ # if input_yesno(
1744
+ # '\nDo you want to zoom in to reduce memory '
1745
+ # + 'requirement (y/n)?', 'n'):
1746
+ # zoom_perc = input_int(
1747
+ # ' Enter zoom percentage', ge=1, le=100)
1437
1748
  # else:
1438
1749
  # zoom_perc = None
1439
1750
  zoom_perc = None
1440
- # if input_yesno('Do you want to skip thetas to reduce memory requirement (y/n)?', 'n'):
1441
- # num_theta_skip = input_int(' Enter the number skip theta interval', ge=0,
1442
- # lt=num_theta)
1751
+ # if input_yesno(
1752
+ # 'Do you want to skip thetas to reduce memory '
1753
+ # + 'requirement (y/n)?', 'n'):
1754
+ # num_theta_skip = input_int(
1755
+ # ' Enter the number skip theta interval',
1756
+ # ge=0, lt=num_theta)
1443
1757
  # else:
1444
1758
  # num_theta_skip = None
1445
1759
  num_theta_skip = None
1446
- self.logger.debug(f'zoom_perc = {zoom_perc}')
1447
- self.logger.debug(f'num_theta_skip = {num_theta_skip}')
1760
+ self._logger.debug(f'zoom_perc = {zoom_perc}')
1761
+ self._logger.debug(f'num_theta_skip = {num_theta_skip}')
1448
1762
 
1449
- return(zoom_perc, num_theta_skip)
1763
+ return zoom_perc, num_theta_skip
1450
1764
 
1451
1765
  def _gen_tomo(self, nxentry, reduced_data):
1452
- """Generate tomography fields.
1453
- """
1454
- import numexpr as ne
1455
- import scipy.ndimage as spi
1766
+ """Generate tomography fields."""
1767
+ # Third party modules
1768
+ from numexpr import evaluate
1769
+ from scipy.ndimage import zoom
1456
1770
 
1457
- from CHAP.common.models.map import get_scanparser, import_scanparser
1771
+ # Local modules
1772
+ from CHAP.common.models.map import (
1773
+ get_scanparser,
1774
+ import_scanparser,
1775
+ )
1458
1776
 
1459
1777
  # Get full bright field
1460
1778
  tbf = np.asarray(reduced_data.data.bright_field)
1461
1779
  tbf_shape = tbf.shape
1462
1780
 
1463
1781
  # Get image bounds
1464
- img_x_bounds = tuple(reduced_data.get('img_x_bounds', (0, tbf_shape[0])))
1465
- img_y_bounds = tuple(reduced_data.get('img_y_bounds', (0, tbf_shape[1])))
1782
+ img_x_bounds = tuple(
1783
+ reduced_data.get('img_x_bounds', (0, tbf_shape[0])))
1784
+ img_y_bounds = tuple(
1785
+ reduced_data.get('img_y_bounds', (0, tbf_shape[1])))
1466
1786
 
1467
1787
  # Get resized dark field
1468
1788
  # if 'dark_field' in data:
1469
- # tbf = np.asarray(reduced_data.data.dark_field[
1470
- # img_x_bounds[0]:img_x_bounds[1],img_y_bounds[0]:img_y_bounds[1]])
1789
+ # tbf = np.asarray(
1790
+ # reduced_data.data.dark_field[
1791
+ # img_x_bounds[0]:img_x_bounds[1],
1792
+ # img_y_bounds[0]:img_y_bounds[1]])
1471
1793
  # else:
1472
- # self.logger.warning('Dark field unavailable')
1794
+ # self._logger.warning('Dark field unavailable')
1473
1795
  # tdf = None
1474
1796
  tdf = None
1475
1797
 
1476
1798
  # Resize bright field
1477
- if img_x_bounds != (0, tbf.shape[0]) or img_y_bounds != (0, tbf.shape[1]):
1478
- tbf = tbf[img_x_bounds[0]:img_x_bounds[1],img_y_bounds[0]:img_y_bounds[1]]
1799
+ if (img_x_bounds != (0, tbf.shape[0])
1800
+ or img_y_bounds != (0, tbf.shape[1])):
1801
+ tbf = tbf[
1802
+ img_x_bounds[0]:img_x_bounds[1],
1803
+ img_y_bounds[0]:img_y_bounds[1]]
1479
1804
 
1480
1805
  # Get the tomography images
1481
1806
  image_key = nxentry.instrument.detector.get('image_key', None)
1482
1807
  if image_key and 'data' in nxentry.instrument.detector:
1483
- field_indices_all = [index for index, key in enumerate(image_key) if key == 0]
1808
+ field_indices_all = [
1809
+ index for index, key in enumerate(image_key) if key == 0]
1484
1810
  z_translation_all = nxentry.sample.z_translation[field_indices_all]
1485
1811
  z_translation_levels = sorted(list(set(z_translation_all)))
1486
1812
  num_tomo_stacks = len(z_translation_levels)
@@ -1490,33 +1816,44 @@ class Tomo:
1490
1816
  thetas = None
1491
1817
  tomo_stacks = []
1492
1818
  for i, z_translation in enumerate(z_translation_levels):
1493
- field_indices = [field_indices_all[index]
1494
- for index, z in enumerate(z_translation_all) if z == z_translation]
1495
- horizontal_shift = list(set(nxentry.sample.x_translation[field_indices]))
1496
- assert(len(horizontal_shift) == 1)
1819
+ field_indices = [
1820
+ field_indices_all[index]
1821
+ for index, z in enumerate(z_translation_all)
1822
+ if z == z_translation]
1823
+ horizontal_shift = list(
1824
+ set(nxentry.sample.x_translation[field_indices]))
1825
+ assert len(horizontal_shift) == 1
1497
1826
  horizontal_shifts += horizontal_shift
1498
- vertical_shift = list(set(nxentry.sample.z_translation[field_indices]))
1499
- assert(len(vertical_shift) == 1)
1827
+ vertical_shift = list(
1828
+ set(nxentry.sample.z_translation[field_indices]))
1829
+ assert len(vertical_shift) == 1
1500
1830
  vertical_shifts += vertical_shift
1501
- sequence_numbers = nxentry.instrument.detector.sequence_number[field_indices]
1831
+ sequence_numbers = nxentry.instrument.detector.sequence_number[
1832
+ field_indices]
1502
1833
  if thetas is None:
1503
- thetas = np.asarray(nxentry.sample.rotation_angle[field_indices]) \
1504
- [sequence_numbers]
1834
+ thetas = np.asarray(
1835
+ nxentry.sample.rotation_angle[
1836
+ field_indices])[sequence_numbers]
1505
1837
  else:
1506
- assert(all(thetas[i] == nxentry.sample.rotation_angle[field_indices[index]]
1507
- for i, index in enumerate(sequence_numbers)))
1508
- assert(list(set(sequence_numbers)) == [i for i in range(len(sequence_numbers))])
1509
- if list(sequence_numbers) == [i for i in range(len(sequence_numbers))]:
1510
- tomo_stack = np.asarray(nxentry.instrument.detector.data[field_indices])
1838
+ assert all(
1839
+ thetas[i] == nxentry.sample.rotation_angle[
1840
+ field_indices[index]]
1841
+ for i, index in enumerate(sequence_numbers))
1842
+ assert (list(set(sequence_numbers))
1843
+ == list(np.arange(0, (len(sequence_numbers)))))
1844
+ if (list(sequence_numbers)
1845
+ == list(np.arange(0, (len(sequence_numbers))))):
1846
+ tomo_stack = np.asarray(
1847
+ nxentry.instrument.detector.data[field_indices])
1511
1848
  else:
1512
- raise ValueError('Unable to load the tomography images')
1849
+ raise RuntimeError('Unable to load the tomography images')
1513
1850
  tomo_stacks.append(tomo_stack)
1514
1851
  else:
1515
- import_scanparser(nxentry.instrument.source.attrs['station'],
1516
- nxentry.instrument.source.attrs['experiment_type'])
1852
+ import_scanparser(
1853
+ nxentry.instrument.source.attrs['station'],
1854
+ nxentry.instrument.source.attrs['experiment_type'])
1517
1855
  tomo_field_scans = nxentry.spec_scans.tomo_fields
1518
1856
  num_tomo_stacks = len(tomo_field_scans.keys())
1519
- center_stack_index = int(num_tomo_stacks/2)
1520
1857
  detector_prefix = str(nxentry.instrument.detector.local_name)
1521
1858
  thetas = None
1522
1859
  tomo_stacks = []
@@ -1524,96 +1861,109 @@ class Tomo:
1524
1861
  vertical_shifts = []
1525
1862
  for nxsubentry_name, nxsubentry in tomo_field_scans.items():
1526
1863
  scan_number = int(nxsubentry_name.split('_')[-1])
1527
- scanparser = get_scanparser(tomo_field_scans.attrs['spec_file'], scan_number)
1528
- image_offset = int(nxsubentry.instrument.detector.frame_start_number)
1864
+ scanparser = get_scanparser(
1865
+ tomo_field_scans.attrs['spec_file'], scan_number)
1866
+ image_offset = int(
1867
+ nxsubentry.instrument.detector.frame_start_number)
1529
1868
  if thetas is None:
1530
1869
  thetas = np.asarray(nxsubentry.sample.rotation_angle)
1531
1870
  num_image = len(thetas)
1532
- tomo_stacks.append(scanparser.get_detector_data(detector_prefix,
1871
+ tomo_stacks.append(
1872
+ scanparser.get_detector_data(
1873
+ detector_prefix,
1533
1874
  (image_offset, image_offset+num_image)))
1534
1875
  horizontal_shifts.append(nxsubentry.sample.x_translation)
1535
1876
  vertical_shifts.append(nxsubentry.sample.z_translation)
1536
1877
 
1537
1878
  reduced_tomo_stacks = []
1538
- if self.galaxy_flag:
1539
- path = 'tomo_reduce_plots'
1540
- else:
1541
- path = self.output_folder
1542
1879
  for i, tomo_stack in enumerate(tomo_stacks):
1543
1880
  # Resize the tomography images
1544
- # Right now the range is the same for each set in the image stack.
1545
- if img_x_bounds != (0, tbf.shape[0]) or img_y_bounds != (0, tbf.shape[1]):
1546
- t0 = time()
1547
- tomo_stack = tomo_stack[:,img_x_bounds[0]:img_x_bounds[1],
1548
- img_y_bounds[0]:img_y_bounds[1]].astype('float64')
1549
- self.logger.debug(f'Resizing tomography images took {time()-t0:.2f} seconds')
1881
+ # Right now the range is the same for each set in the stack
1882
+ if (img_x_bounds != (0, tbf.shape[0])
1883
+ or img_y_bounds != (0, tbf.shape[1])):
1884
+ tomo_stack = tomo_stack[
1885
+ :,img_x_bounds[0]:img_x_bounds[1],
1886
+ img_y_bounds[0]:img_y_bounds[1]].astype('float64')
1550
1887
 
1551
1888
  # Subtract dark field
1552
1889
  if tdf is not None:
1553
- t0 = time()
1554
- with set_numexpr_threads(self.num_core):
1555
- ne.evaluate('tomo_stack-tdf', out=tomo_stack)
1556
- self.logger.debug(f'Subtracting dark field took {time()-t0:.2f} seconds')
1890
+ try:
1891
+ with SetNumexprThreads(self._num_core):
1892
+ evaluate('tomo_stack-tdf', out=tomo_stack)
1893
+ except TypeError as e:
1894
+ sys_exit(
1895
+ f'\nA {type(e).__name__} occured while subtracting '
1896
+ + 'the dark field with num_expr.evaluate()'
1897
+ + '\nTry reducing the detector range'
1898
+ + f'\n(currently img_x_bounds = {img_x_bounds}, and '
1899
+ + f'img_y_bounds = {img_y_bounds})\n')
1557
1900
 
1558
1901
  # Normalize
1559
- t0 = time()
1560
- with set_numexpr_threads(self.num_core):
1561
- ne.evaluate('tomo_stack/tbf', out=tomo_stack, truediv=True)
1562
- self.logger.debug(f'Normalizing took {time()-t0:.2f} seconds')
1902
+ try:
1903
+ with SetNumexprThreads(self._num_core):
1904
+ evaluate('tomo_stack/tbf', out=tomo_stack, truediv=True)
1905
+ except TypeError as e:
1906
+ sys_exit(
1907
+ f'\nA {type(e).__name__} occured while normalizing the '
1908
+ + 'tomography data with num_expr.evaluate()'
1909
+ + '\nTry reducing the detector range'
1910
+ + f'\n(currently img_x_bounds = {img_x_bounds}, and '
1911
+ + f'img_y_bounds = {img_y_bounds})\n')
1563
1912
 
1564
1913
  # Remove non-positive values and linearize data
1565
- t0 = time()
1566
- cutoff = 1.e-6
1567
- with set_numexpr_threads(self.num_core):
1568
- ne.evaluate('where(tomo_stack<cutoff, cutoff, tomo_stack)', out=tomo_stack)
1569
- with set_numexpr_threads(self.num_core):
1570
- ne.evaluate('-log(tomo_stack)', out=tomo_stack)
1571
- self.logger.debug('Removing non-positive values and linearizing data took '+
1572
- f'{time()-t0:.2f} seconds')
1914
+ # RV make input argument? cutoff = 1.e-6
1915
+ with SetNumexprThreads(self._num_core):
1916
+ evaluate(
1917
+ 'where(tomo_stack < 1.e-6, 1.e-6, tomo_stack)',
1918
+ out=tomo_stack)
1919
+ with SetNumexprThreads(self._num_core):
1920
+ evaluate('-log(tomo_stack)', out=tomo_stack)
1573
1921
 
1574
1922
  # Get rid of nans/infs that may be introduced by normalization
1575
- t0 = time()
1576
1923
  np.where(np.isfinite(tomo_stack), tomo_stack, 0.)
1577
- self.logger.debug(f'Remove nans/infs took {time()-t0:.2f} seconds')
1578
1924
 
1579
1925
  # Downsize tomography stack to smaller size
1580
- # TODO use theta_skip as well
1926
+ # RV use theta_skip as well
1581
1927
  tomo_stack = tomo_stack.astype('float32')
1582
- if not self.test_mode:
1928
+ if not self._test_mode:
1583
1929
  if len(tomo_stacks) == 1:
1584
1930
  title = f'red fullres theta {round(thetas[0], 2)+0}'
1585
1931
  else:
1586
- title = f'red stack {i+1} fullres theta {round(thetas[0], 2)+0}'
1587
- quick_imshow(tomo_stack[0,:,:], title=title, path=path, save_fig=self.save_figs,
1588
- save_only=self.save_only, block=self.block)
1589
- # if not self.block:
1932
+ title = f'red stack {i+1} fullres theta ' \
1933
+ + f'{round(thetas[0], 2)+0}'
1934
+ quick_imshow(
1935
+ tomo_stack[0,:,:], title=title, path=self._output_folder,
1936
+ save_fig=self._save_figs, save_only=self._save_only,
1937
+ block=self._block)
1938
+ # if not self._block:
1590
1939
  # clear_imshow(title)
1591
- if False and zoom_perc != 100:
1940
+ zoom_perc = 100
1941
+ if zoom_perc != 100:
1592
1942
  t0 = time()
1593
- self.logger.debug(f'Zooming in ...')
1943
+ self._logger.debug('Zooming in ...')
1594
1944
  tomo_zoom_list = []
1595
1945
  for j in range(tomo_stack.shape[0]):
1596
- tomo_zoom = spi.zoom(tomo_stack[j,:,:], 0.01*zoom_perc)
1946
+ tomo_zoom = zoom(tomo_stack[j,:,:], 0.01*zoom_perc)
1597
1947
  tomo_zoom_list.append(tomo_zoom)
1598
- tomo_stack = np.stack([tomo_zoom for tomo_zoom in tomo_zoom_list])
1599
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1600
- self.logger.info(f'Zooming in took {time()-t0:.2f} seconds')
1948
+ tomo_stack = np.stack(tomo_zoom_list)
1949
+ self._logger.info(f'Zooming in took {time()-t0:.2f} seconds')
1601
1950
  del tomo_zoom_list
1602
- if not self.test_mode:
1603
- title = f'red stack {zoom_perc}p theta {round(thetas[0], 2)+0}'
1604
- quick_imshow(tomo_stack[0,:,:], title=title, path=path, save_fig=self.save_figs,
1605
- save_only=self.save_only, block=self.block)
1606
- # if not self.block:
1951
+ if not self._test_mode:
1952
+ title = f'red stack {zoom_perc}p theta ' \
1953
+ + f'{round(thetas[0], 2)+0}'
1954
+ quick_imshow(
1955
+ tomo_stack[0,:,:], title=title,
1956
+ path=self._output_folder, save_fig=self._save_figs,
1957
+ save_only=self._save_only, block=self._block)
1958
+ # if not self._block:
1607
1959
  # clear_imshow(title)
1608
1960
 
1609
1961
  # Save test data to file
1610
- if self.test_mode:
1611
- # row_index = int(tomo_stack.shape[0]/2)
1612
- # np.savetxt(f'{self.output_folder}/red_stack_{i+1}.txt', tomo_stack[row_index,:,:],
1613
- # fmt='%.6e')
1962
+ if self._test_mode:
1614
1963
  row_index = int(tomo_stack.shape[1]/2)
1615
- np.savetxt(f'{self.output_folder}/red_stack_{i+1}.txt', tomo_stack[:,row_index,:],
1616
- fmt='%.6e')
1964
+ np.savetxt(
1965
+ f'{self._output_folder}/red_stack_{i+1}.txt',
1966
+ tomo_stack[:,row_index,:], fmt='%.6e')
1617
1967
 
1618
1968
  # Combine resized stacks
1619
1969
  reduced_tomo_stacks.append(tomo_stack)
@@ -1628,220 +1978,265 @@ class Tomo:
1628
1978
  del tdf
1629
1979
  del tbf
1630
1980
 
1631
- return(reduced_data)
1981
+ return reduced_data
1632
1982
 
1633
- def _find_center_one_plane(self, sinogram, row, thetas, eff_pixel_size, cross_sectional_dim,
1634
- path=None, tol=0.1, num_core=1):
1635
- """Find center for a single tomography plane.
1636
- """
1637
- import tomopy
1983
+ def _find_center_one_plane(
1984
+ self, sinogram, row, thetas, eff_pixel_size, cross_sectional_dim,
1985
+ path=None, num_core=1): # , tol=0.1):
1986
+ """Find center for a single tomography plane."""
1987
+ from tomopy import find_center_vo
1638
1988
 
1639
1989
  # Try automatic center finding routines for initial value
1640
1990
  # sinogram index order: theta,column
1641
1991
  # need column,theta for iradon, so take transpose
1642
1992
  sinogram = np.asarray(sinogram)
1643
- sinogram_T = sinogram.T
1993
+ sinogram_t = sinogram.T
1644
1994
  center = sinogram.shape[1]/2
1645
1995
 
1646
1996
  # Try using Nghia Vo’s method
1647
1997
  t0 = time()
1648
- if num_core > num_core_tomopy_limit:
1649
- self.logger.debug(f'Running find_center_vo on {num_core_tomopy_limit} cores ...')
1650
- tomo_center = tomopy.find_center_vo(sinogram, ncore=num_core_tomopy_limit)
1998
+ if num_core > NUM_CORE_TOMOPY_LIMIT:
1999
+ self._logger.debug(
2000
+ f'Running find_center_vo on {NUM_CORE_TOMOPY_LIMIT} cores ...')
2001
+ tomo_center = find_center_vo(
2002
+ sinogram, ncore=NUM_CORE_TOMOPY_LIMIT)
1651
2003
  else:
1652
- self.logger.debug(f'Running find_center_vo on {num_core} cores ...')
1653
- tomo_center = tomopy.find_center_vo(sinogram, ncore=num_core)
1654
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1655
- self.logger.info(f'Finding the center using Nghia Vo’s method took {time()-t0:.2f} seconds')
2004
+ tomo_center = find_center_vo(sinogram, ncore=num_core)
2005
+ self._logger.info(
2006
+ f'Finding center using Nghia Vo’s method took {time()-t0:.2f} '
2007
+ + 'seconds')
1656
2008
  center_offset_vo = tomo_center-center
1657
- self.logger.info(f'Center at row {row} using Nghia Vo’s method = {center_offset_vo:.2f}')
2009
+ self._logger.info(
2010
+ f'Center at row {row} using Nghia Vo’s method = '
2011
+ + f'{center_offset_vo:.2f}')
1658
2012
  t0 = time()
1659
- self.logger.debug(f'Running _reconstruct_one_plane on {self.num_core} cores ...')
1660
- recon_plane = self._reconstruct_one_plane(sinogram_T, tomo_center, thetas,
1661
- eff_pixel_size, cross_sectional_dim, False, num_core)
1662
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1663
- self.logger.info(f'Reconstructing row {row} took {time()-t0:.2f} seconds')
2013
+ recon_plane = self._reconstruct_one_plane(
2014
+ sinogram_t, tomo_center, thetas, eff_pixel_size,
2015
+ cross_sectional_dim, False, num_core)
2016
+ self._logger.info(
2017
+ f'Reconstructing row {row} took {time()-t0:.2f} seconds')
1664
2018
 
1665
2019
  title = f'edges row{row} center offset{center_offset_vo:.2f} Vo'
1666
2020
  self._plot_edges_one_plane(recon_plane, title, path=path)
1667
2021
 
1668
2022
  # Try using phase correlation method
1669
- # if input_yesno('Try finding center using phase correlation (y/n)?', 'n'):
2023
+ # if input_yesno('
2024
+ # Try finding center using phase correlation (y/n)?',
2025
+ # 'n'):
1670
2026
  # t0 = time()
1671
- # self.logger.debug(f'Running find_center_pc ...')
1672
- # tomo_center = tomopy.find_center_pc(sinogram, sinogram, tol=0.1, rotc_guess=tomo_center)
2027
+ # tomo_center = find_center_pc(
2028
+ # sinogram, sinogram, tol=0.1, rotc_guess=tomo_center)
1673
2029
  # error = 1.
1674
2030
  # while error > tol:
1675
2031
  # prev = tomo_center
1676
- # tomo_center = tomopy.find_center_pc(sinogram, sinogram, tol=tol,
1677
- # rotc_guess=tomo_center)
2032
+ # tomo_center = find_center_pc(
2033
+ # sinogram, sinogram, tol=tol, rotc_guess=tomo_center)
1678
2034
  # error = np.abs(tomo_center-prev)
1679
- # self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1680
- # self.logger.info('Finding the center using the phase correlation method took '+
1681
- # f'{time()-t0:.2f} seconds')
2035
+ # self._logger.info(
2036
+ # 'Finding center using the phase correlation method '
2037
+ # + f'took {time()-t0:.2f} seconds')
1682
2038
  # center_offset = tomo_center-center
1683
- # print(f'Center at row {row} using phase correlation = {center_offset:.2f}')
2039
+ # print(
2040
+ # f'Center at row {row} using phase correlation = '
2041
+ # + f'{center_offset:.2f}')
1684
2042
  # t0 = time()
1685
- # self.logger.debug(f'Running _reconstruct_one_plane on {self.num_core} cores ...')
1686
- # recon_plane = self._reconstruct_one_plane(sinogram_T, tomo_center, thetas,
1687
- # eff_pixel_size, cross_sectional_dim, False, num_core)
1688
- # self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1689
- # self.logger.info(f'Reconstructing row {row} took {time()-t0:.2f} seconds')
2043
+ # recon_plane = self._reconstruct_one_plane(
2044
+ # sinogram_t, tomo_center, thetas, eff_pixel_size,
2045
+ # cross_sectional_dim, False, num_core)
2046
+ # self._logger.info(
2047
+ # f'Reconstructing row {row} took {time()-t0:.2f} seconds')
1690
2048
  #
1691
- # title = f'edges row{row} center_offset{center_offset:.2f} PC'
2049
+ # title = \
2050
+ # f'edges row{row} center_offset{center_offset:.2f} PC'
1692
2051
  # self._plot_edges_one_plane(recon_plane, title, path=path)
1693
2052
 
1694
2053
  # Select center location
1695
- # if input_yesno('Accept a center location (y) or continue search (n)?', 'y'):
1696
- if True:
1697
- # center_offset = input_num(' Enter chosen center offset', ge=-center, le=center,
1698
- # default=center_offset_vo)
1699
- center_offset = center_offset_vo
1700
- del sinogram_T
1701
- del recon_plane
1702
- return float(center_offset)
1703
-
1704
- # perform center finding search
1705
- while True:
1706
- center_offset_low = input_int('\nEnter lower bound for center offset', ge=-center,
1707
- le=center)
1708
- center_offset_upp = input_int('Enter upper bound for center offset',
1709
- ge=center_offset_low, le=center)
1710
- if center_offset_upp == center_offset_low:
1711
- center_offset_step = 1
1712
- else:
1713
- center_offset_step = input_int('Enter step size for center offset search', ge=1,
1714
- le=center_offset_upp-center_offset_low)
1715
- num_center_offset = 1+int((center_offset_upp-center_offset_low)/center_offset_step)
1716
- center_offsets = np.linspace(center_offset_low, center_offset_upp, num_center_offset)
1717
- for center_offset in center_offsets:
1718
- if center_offset == center_offset_vo:
1719
- continue
1720
- t0 = time()
1721
- self.logger.debug(f'Running _reconstruct_one_plane on {num_core} cores ...')
1722
- recon_plane = self._reconstruct_one_plane(sinogram_T, center_offset+center, thetas,
1723
- eff_pixel_size, cross_sectional_dim, False, num_core)
1724
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1725
- self.logger.info(f'Reconstructing center_offset {center_offset} took '+
1726
- f'{time()-t0:.2f} seconds')
1727
- title = f'edges row{row} center_offset{center_offset:.2f}'
1728
- self._plot_edges_one_plane(recon_plane, title, path=path)
1729
- if input_int('\nContinue (0) or end the search (1)', ge=0, le=1):
1730
- break
1731
-
1732
- del sinogram_T
2054
+ # if input_yesno(
2055
+ # 'Accept a center location (y) or continue search (n)?',
2056
+ # 'y'):
2057
+ # center_offset = input_num(' Enter chosen center offset',
2058
+ # ge=-center, le=center, default=center_offset_vo)
2059
+ # return float(center_offset)
2060
+
2061
+ # Perform center finding search
2062
+ # while True:
2063
+ # center_offset_low = input_int(
2064
+ # '\nEnter lower bound for center offset', ge=-center,le=center)
2065
+ # center_offset_upp = input_int(
2066
+ # 'Enter upper bound for center offset', ge=center_offset_low,
2067
+ # le=center)
2068
+ # if center_offset_upp == center_offset_low:
2069
+ # center_offset_step = 1
2070
+ # else:
2071
+ # center_offset_step = input_int(
2072
+ # 'Enter step size for center offset search', ge=1,
2073
+ # le=center_offset_upp-center_offset_low)
2074
+ # num_center_offset = 1 + int(
2075
+ # (center_offset_upp-center_offset_low) / center_offset_step)
2076
+ # center_offsets = np.linspace(
2077
+ # center_offset_low, center_offset_upp, num_center_offset)
2078
+ # for center_offset in center_offsets:
2079
+ # if center_offset == center_offset_vo:
2080
+ # continue
2081
+ # t0 = time()
2082
+ # recon_plane = self._reconstruct_one_plane(
2083
+ # sinogram_t, center_offset+center, thetas, eff_pixel_size,
2084
+ # cross_sectional_dim, False, num_core)
2085
+ # self._logger.info(
2086
+ # f'Reconstructing center_offset {center_offset} took '
2087
+ # + 'f{time()-t0:.2f} seconds')
2088
+ # title = f'edges row{row} center_offset{center_offset:.2f}'
2089
+ # self._plot_edges_one_plane(recon_plane, title, path=path)
2090
+ # if input_int('\nContinue (0) or end the search (1)', ge=0, le=1):
2091
+ # break
2092
+
2093
+ del sinogram_t
1733
2094
  del recon_plane
1734
- center_offset = input_num(' Enter chosen center offset', ge=-center, le=center)
2095
+ # center_offset = input_num(
2096
+ # ' Enter chosen center offset', ge=-center, le=center)
2097
+ center_offset = center_offset_vo
2098
+
1735
2099
  return float(center_offset)
1736
2100
 
1737
- def _reconstruct_one_plane(self, tomo_plane_T, center, thetas, eff_pixel_size,
2101
+ def _reconstruct_one_plane(
2102
+ self, tomo_plane_t, center, thetas, eff_pixel_size,
1738
2103
  cross_sectional_dim, plot_sinogram=True, num_core=1):
1739
- """Invert the sinogram for a single tomography plane.
1740
- """
1741
- import scipy.ndimage as spi
2104
+ """Invert the sinogram for a single tomography plane."""
2105
+ from scipy.ndimage import gaussian_filter
1742
2106
  from skimage.transform import iradon
1743
- import tomopy
2107
+ from tomopy import misc
1744
2108
 
1745
- # tomo_plane_T index order: column,theta
1746
- assert(0 <= center < tomo_plane_T.shape[0])
1747
- center_offset = center-tomo_plane_T.shape[0]/2
1748
- two_offset = 2*int(np.round(center_offset))
2109
+ # tomo_plane_t index order: column,theta
2110
+ assert 0 <= center < tomo_plane_t.shape[0]
2111
+ center_offset = center-tomo_plane_t.shape[0]/2
2112
+ two_offset = 2 * int(np.round(center_offset))
1749
2113
  two_offset_abs = np.abs(two_offset)
1750
- max_rad = int(0.55*(cross_sectional_dim/eff_pixel_size)) # 10% slack to avoid edge effects
1751
- if max_rad > 0.5*tomo_plane_T.shape[0]:
1752
- max_rad = 0.5*tomo_plane_T.shape[0]
1753
- dist_from_edge = max(1, int(np.floor((tomo_plane_T.shape[0]-two_offset_abs)/2.)-max_rad))
2114
+ # Add 10% slack to max_rad to avoid edge effects
2115
+ max_rad = int(0.55 * (cross_sectional_dim/eff_pixel_size))
2116
+ if max_rad > 0.5*tomo_plane_t.shape[0]:
2117
+ max_rad = 0.5*tomo_plane_t.shape[0]
2118
+ dist_from_edge = max(1, int(np.floor(
2119
+ (tomo_plane_t.shape[0] - two_offset_abs) / 2.0) - max_rad))
1754
2120
  if two_offset >= 0:
1755
- self.logger.debug(f'sinogram range = [{two_offset+dist_from_edge}, {-dist_from_edge}]')
1756
- sinogram = tomo_plane_T[two_offset+dist_from_edge:-dist_from_edge,:]
2121
+ self._logger.debug(
2122
+ f'sinogram range = [{two_offset+dist_from_edge}, '
2123
+ + f'{-dist_from_edge}]')
2124
+ sinogram = tomo_plane_t[
2125
+ two_offset+dist_from_edge:-dist_from_edge,:]
1757
2126
  else:
1758
- self.logger.debug(f'sinogram range = [{dist_from_edge}, {two_offset-dist_from_edge}]')
1759
- sinogram = tomo_plane_T[dist_from_edge:two_offset-dist_from_edge,:]
1760
- if not self.galaxy_flag and plot_sinogram:
1761
- quick_imshow(sinogram.T, f'sinogram center offset{center_offset:.2f}', aspect='auto',
1762
- path=self.output_folder, save_fig=self.save_figs, save_only=self.save_only,
1763
- block=self.block)
2127
+ self._logger.debug(
2128
+ f'sinogram range = [{dist_from_edge}, '
2129
+ + f'{two_offset-dist_from_edge}]')
2130
+ sinogram = tomo_plane_t[dist_from_edge:two_offset-dist_from_edge,:]
2131
+ if plot_sinogram:
2132
+ quick_imshow(
2133
+ sinogram.T, f'sinogram center offset{center_offset:.2f}',
2134
+ aspect='auto', path=self._output_folder,
2135
+ save_fig=self._save_figs, save_only=self._save_only,
2136
+ block=self._block)
1764
2137
 
1765
2138
  # Inverting sinogram
1766
2139
  t0 = time()
1767
2140
  recon_sinogram = iradon(sinogram, theta=thetas, circle=True)
1768
- self.logger.debug(f'Inverting sinogram took {time()-t0:.2f} seconds')
2141
+ self._logger.info(f'Inverting sinogram took {time()-t0:.2f} seconds')
1769
2142
  del sinogram
1770
2143
 
1771
2144
  # Performing Gaussian filtering and removing ring artifacts
1772
- recon_parameters = None#self.config.get('recon_parameters')
2145
+ recon_parameters = None # self._config.get('recon_parameters')
1773
2146
  if recon_parameters is None:
1774
2147
  sigma = 1.0
1775
2148
  ring_width = 15
1776
2149
  else:
1777
2150
  sigma = recon_parameters.get('gaussian_sigma', 1.0)
1778
2151
  if not is_num(sigma, ge=0.0):
1779
- self.logger.warning(f'Invalid gaussian_sigma ({sigma}) in _reconstruct_one_plane, '+
1780
- 'set to a default value of 1.0')
2152
+ self._logger.warning(
2153
+ f'Invalid gaussian_sigma ({sigma}) in '
2154
+ + '_reconstruct_one_plane, set to a default of 1.0')
1781
2155
  sigma = 1.0
1782
2156
  ring_width = recon_parameters.get('ring_width', 15)
1783
2157
  if not isinstance(ring_width, int) or ring_width < 0:
1784
- self.logger.warning(f'Invalid ring_width ({ring_width}) in '+
1785
- '_reconstruct_one_plane, set to a default value of 15')
2158
+ self._logger.warning(
2159
+ f'Invalid ring_width ({ring_width}) in '
2160
+ + '_reconstruct_one_plane, set to a default of 15')
1786
2161
  ring_width = 15
1787
- t0 = time()
1788
- recon_sinogram = spi.gaussian_filter(recon_sinogram, sigma, mode='nearest')
2162
+ recon_sinogram = gaussian_filter(
2163
+ recon_sinogram, sigma, mode='nearest')
1789
2164
  recon_clean = np.expand_dims(recon_sinogram, axis=0)
1790
2165
  del recon_sinogram
1791
- recon_clean = tomopy.misc.corr.remove_ring(recon_clean, rwidth=ring_width, ncore=num_core)
1792
- self.logger.debug(f'Filtering and removing ring artifacts took {time()-t0:.2f} seconds')
2166
+ recon_clean = misc.corr.remove_ring(
2167
+ recon_clean, rwidth=ring_width, ncore=num_core)
1793
2168
 
1794
2169
  return recon_clean
1795
2170
 
1796
2171
  def _plot_edges_one_plane(self, recon_plane, title, path=None):
2172
+ """
2173
+ Create an "edges plot" for a singled reconstructed tomography
2174
+ data plane.
2175
+ """
1797
2176
  from skimage.restoration import denoise_tv_chambolle
1798
2177
 
1799
- vis_parameters = None#self.config.get('vis_parameters')
2178
+ vis_parameters = None # self._config.get('vis_parameters')
1800
2179
  if vis_parameters is None:
1801
2180
  weight = 0.1
1802
2181
  else:
1803
2182
  weight = vis_parameters.get('denoise_weight', 0.1)
1804
2183
  if not is_num(weight, ge=0.0):
1805
- self.logger.warning(f'Invalid weight ({weight}) in _plot_edges_one_plane, '+
1806
- 'set to a default value of 0.1')
2184
+ self._logger.warning(
2185
+ f'Invalid weight ({weight}) in _plot_edges_one_plane, '
2186
+ + 'set to a default of 0.1')
1807
2187
  weight = 0.1
1808
2188
  edges = denoise_tv_chambolle(recon_plane, weight=weight)
1809
2189
  vmax = np.max(edges[0,:,:])
1810
2190
  vmin = -vmax
1811
2191
  if path is None:
1812
- path = self.output_folder
1813
- quick_imshow(edges[0,:,:], f'{title} coolwarm', path=path, cmap='coolwarm',
1814
- save_fig=self.save_figs, save_only=self.save_only, block=self.block)
1815
- quick_imshow(edges[0,:,:], f'{title} gray', path=path, cmap='gray', vmin=vmin, vmax=vmax,
1816
- save_fig=self.save_figs, save_only=self.save_only, block=self.block)
2192
+ path = self._output_folder
2193
+ quick_imshow(
2194
+ edges[0,:,:], f'{title} coolwarm', path=path, cmap='coolwarm',
2195
+ save_fig=self._save_figs, save_only=self._save_only,
2196
+ block=self._block)
2197
+ quick_imshow(
2198
+ edges[0,:,:], f'{title} gray', path=path, cmap='gray', vmin=vmin,
2199
+ vmax=vmax, save_fig=self._save_figs, save_only=self._save_only,
2200
+ block=self._block)
1817
2201
  del edges
1818
2202
 
1819
- def _reconstruct_one_tomo_stack(self, tomo_stack, thetas, center_offsets=[], num_core=1,
2203
+ def _reconstruct_one_tomo_stack(
2204
+ self, tomo_stack, thetas, center_offsets=None, num_core=1,
1820
2205
  algorithm='gridrec'):
1821
- """Reconstruct a single tomography stack.
1822
- """
1823
- import tomopy
2206
+ """Reconstruct a single tomography stack."""
2207
+ # Third party modules
2208
+ from tomopy import (
2209
+ astra,
2210
+ misc,
2211
+ prep,
2212
+ recon,
2213
+ )
1824
2214
 
1825
2215
  # tomo_stack order: row,theta,column
1826
- # input thetas must be in degrees
1827
- # centers_offset: tomography axis shift in pixels relative to column center
2216
+ # input thetas must be in degrees
2217
+ # centers_offset: tomography axis shift in pixels relative
2218
+ # to column center
1828
2219
  # RV should we remove stripes?
1829
2220
  # https://tomopy.readthedocs.io/en/latest/api/tomopy.prep.stripe.html
1830
2221
  # RV should we remove rings?
1831
2222
  # https://tomopy.readthedocs.io/en/latest/api/tomopy.misc.corr.html
1832
- # RV: Add an option to do (extra) secondary iterations later or to do some sort of convergence test?
1833
- if not len(center_offsets):
2223
+ # RV add an option to do (extra) secondary iterations later or
2224
+ # to do some sort of convergence test?
2225
+ if center_offsets is None:
1834
2226
  centers = np.zeros((tomo_stack.shape[0]))
1835
2227
  elif len(center_offsets) == 2:
1836
- centers = np.linspace(center_offsets[0], center_offsets[1], tomo_stack.shape[0])
2228
+ centers = np.linspace(
2229
+ center_offsets[0], center_offsets[1], tomo_stack.shape[0])
1837
2230
  else:
1838
2231
  if center_offsets.size != tomo_stack.shape[0]:
1839
- raise ValueError('center_offsets dimension mismatch in reconstruct_one_tomo_stack')
2232
+ raise RuntimeError(
2233
+ 'center_offsets dimension mismatch in '
2234
+ + 'reconstruct_one_tomo_stack')
1840
2235
  centers = center_offsets
1841
2236
  centers += tomo_stack.shape[2]/2
1842
2237
 
1843
2238
  # Get reconstruction parameters
1844
- recon_parameters = None#self.config.get('recon_parameters')
2239
+ recon_parameters = None # self._config.get('recon_parameters')
1845
2240
  if recon_parameters is None:
1846
2241
  sigma = 2.0
1847
2242
  secondary_iters = 0
@@ -1849,80 +2244,100 @@ class Tomo:
1849
2244
  else:
1850
2245
  sigma = recon_parameters.get('stripe_fw_sigma', 2.0)
1851
2246
  if not is_num(sigma, ge=0):
1852
- self.logger.warning(f'Invalid stripe_fw_sigma ({sigma}) in '+
1853
- '_reconstruct_one_tomo_stack, set to a default value of 2.0')
2247
+ self._logger.warning(
2248
+ f'Invalid stripe_fw_sigma ({sigma}) in '
2249
+ + '_reconstruct_one_tomo_stack, set to a default of 2.0')
1854
2250
  ring_width = 15
1855
2251
  secondary_iters = recon_parameters.get('secondary_iters', 0)
1856
2252
  if not isinstance(secondary_iters, int) or secondary_iters < 0:
1857
- self.logger.warning(f'Invalid secondary_iters ({secondary_iters}) in '+
1858
- '_reconstruct_one_tomo_stack, set to a default value of 0 (skip them)')
2253
+ self._logger.warning(
2254
+ f'Invalid secondary_iters ({secondary_iters}) in '
2255
+ + '_reconstruct_one_tomo_stack, set to a default of 0 '
2256
+ + '(i.e., skip them)')
1859
2257
  ring_width = 0
1860
2258
  ring_width = recon_parameters.get('ring_width', 15)
1861
2259
  if not isinstance(ring_width, int) or ring_width < 0:
1862
- self.logger.warning(f'Invalid ring_width ({ring_width}) in '+
1863
- '_reconstruct_one_plane, set to a default value of 15')
2260
+ self._logger.warning(
2261
+ f'Invalid ring_width ({ring_width}) in '
2262
+ + '_reconstruct_one_plane, set to a default of 15')
1864
2263
  ring_width = 15
1865
2264
 
1866
2265
  # Remove horizontal stripe
1867
- t0 = time()
1868
- if num_core > num_core_tomopy_limit:
1869
- self.logger.debug('Running remove_stripe_fw on {num_core_tomopy_limit} cores ...')
1870
- tomo_stack = tomopy.prep.stripe.remove_stripe_fw(tomo_stack, sigma=sigma,
1871
- ncore=num_core_tomopy_limit)
2266
+ if num_core > NUM_CORE_TOMOPY_LIMIT:
2267
+ tomo_stack = prep.stripe.remove_stripe_fw(
2268
+ tomo_stack, sigma=sigma, ncore=NUM_CORE_TOMOPY_LIMIT)
1872
2269
  else:
1873
- self.logger.debug(f'Running remove_stripe_fw on {num_core} cores ...')
1874
- tomo_stack = tomopy.prep.stripe.remove_stripe_fw(tomo_stack, sigma=sigma,
1875
- ncore=num_core)
1876
- self.logger.debug(f'... tomopy.prep.stripe.remove_stripe_fw took {time()-t0:.2f} seconds')
2270
+ tomo_stack = prep.stripe.remove_stripe_fw(
2271
+ tomo_stack, sigma=sigma, ncore=num_core)
1877
2272
 
1878
2273
  # Perform initial image reconstruction
1879
- self.logger.debug('Performing initial image reconstruction')
2274
+ self._logger.debug('Performing initial image reconstruction')
1880
2275
  t0 = time()
1881
- self.logger.debug(f'Running recon on {num_core} cores ...')
1882
- tomo_recon_stack = tomopy.recon(tomo_stack, np.radians(thetas), centers,
1883
- sinogram_order=True, algorithm=algorithm, ncore=num_core)
1884
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1885
- self.logger.info(f'Performing initial image reconstruction took {time()-t0:.2f} seconds')
2276
+ tomo_recon_stack = recon(
2277
+ tomo_stack, np.radians(thetas), centers, sinogram_order=True,
2278
+ algorithm=algorithm, ncore=num_core)
2279
+ self._logger.info(
2280
+ f'Performing initial image reconstruction took {time()-t0:.2f} '
2281
+ + 'seconds')
1886
2282
 
1887
2283
  # Run optional secondary iterations
1888
2284
  if secondary_iters > 0:
1889
- self.logger.debug(f'Running {secondary_iters} secondary iterations')
1890
- #options = {'method':'SIRT_CUDA', 'proj_type':'cuda', 'num_iter':secondary_iters}
1891
- #RV: doesn't work for me:
1892
- #"Error: CUDA error 803: system has unsupported display driver/cuda driver combination."
1893
- #options = {'method':'SIRT', 'proj_type':'linear', 'MinConstraint': 0, 'num_iter':secondary_iters}
1894
- #SIRT did not finish while running overnight
1895
- #options = {'method':'SART', 'proj_type':'linear', 'num_iter':secondary_iters}
1896
- options = {'method':'SART', 'proj_type':'linear', 'MinConstraint': 0,
1897
- 'num_iter':secondary_iters}
2285
+ self._logger.debug(
2286
+ 'Running {secondary_iters} secondary iterations')
2287
+ # options = {
2288
+ # 'method': 'SIRT_CUDA',
2289
+ # 'proj_type': 'cuda',
2290
+ # 'num_iter': secondary_iters
2291
+ # }
2292
+ # RV doesn't work for me:
2293
+ # "Error: CUDA error 803: system has unsupported display driver/cuda driver
2294
+ # combination."
2295
+ # options = {
2296
+ # 'method': 'SIRT',
2297
+ # 'proj_type': 'linear',
2298
+ # 'MinConstraint': 0,
2299
+ # 'num_iter':secondary_iters
2300
+ # }
2301
+ # SIRT did not finish while running overnight
2302
+ # options = {
2303
+ # 'method': 'SART',
2304
+ # 'proj_type': 'linear',
2305
+ # 'num_iter':secondary_iters
2306
+ # }
2307
+ options = {
2308
+ 'method': 'SART',
2309
+ 'proj_type': 'linear',
2310
+ 'MinConstraint': 0,
2311
+ 'num_iter': secondary_iters,
2312
+ }
1898
2313
  t0 = time()
1899
- self.logger.debug(f'Running recon on {num_core} cores ...')
1900
- tomo_recon_stack = tomopy.recon(tomo_stack, np.radians(thetas), centers,
1901
- init_recon=tomo_recon_stack, options=options, sinogram_order=True,
1902
- algorithm=tomopy.astra, ncore=num_core)
1903
- self.logger.debug(f'... done in {time()-t0:.2f} seconds')
1904
- self.logger.info(f'Performing secondary iterations took {time()-t0:.2f} seconds')
2314
+ tomo_recon_stack = recon(
2315
+ tomo_stack, np.radians(thetas), centers,
2316
+ init_recon=tomo_recon_stack, options=options,
2317
+ sinogram_order=True, algorithm=astra, ncore=num_core)
2318
+ self._logger.info(
2319
+ f'Performing secondary iterations took {time()-t0:.2f} '
2320
+ + 'seconds')
1905
2321
 
1906
2322
  # Remove ring artifacts
1907
- t0 = time()
1908
- tomopy.misc.corr.remove_ring(tomo_recon_stack, rwidth=ring_width, out=tomo_recon_stack,
1909
- ncore=num_core)
1910
- self.logger.debug(f'Removing ring artifacts took {time()-t0:.2f} seconds')
2323
+ misc.corr.remove_ring(
2324
+ tomo_recon_stack, rwidth=ring_width, out=tomo_recon_stack,
2325
+ ncore=num_core)
1911
2326
 
1912
2327
  return tomo_recon_stack
1913
2328
 
1914
- def _resize_reconstructed_data(self, data, x_bounds=None, y_bounds=None, z_bounds=None,
2329
+ def _resize_reconstructed_data(
2330
+ self, data, x_bounds=None, y_bounds=None, z_bounds=None,
1915
2331
  z_only=False):
1916
- """Resize the reconstructed tomography data.
1917
- """
2332
+ """Resize the reconstructed tomography data."""
1918
2333
  # Data order: row(z),x,y or stack,row(z),x,y
1919
2334
  if isinstance(data, list):
1920
2335
  for stack in data:
1921
- assert(stack.ndim == 3)
2336
+ assert stack.ndim == 3
1922
2337
  num_tomo_stacks = len(data)
1923
2338
  tomo_recon_stacks = data
1924
2339
  else:
1925
- assert(data.ndim == 3)
2340
+ assert data.ndim == 3
1926
2341
  num_tomo_stacks = 1
1927
2342
  tomo_recon_stacks = [data]
1928
2343
 
@@ -1931,77 +2346,88 @@ class Tomo:
1931
2346
  elif not z_only and x_bounds is None:
1932
2347
  # Selecting x bounds (in yz-plane)
1933
2348
  tomosum = 0
1934
- [tomosum := tomosum+np.sum(tomo_recon_stacks[i], axis=(0,2))
1935
- for i in range(num_tomo_stacks)]
1936
- select_x_bounds = input_yesno('\nDo you want to change the image x-bounds (y/n)?', 'y')
2349
+ for i in range(num_tomo_stacks):
2350
+ tomosum = tomosum + np.sum(tomo_recon_stacks[i], axis=(0,2))
2351
+ select_x_bounds = input_yesno(
2352
+ '\nDo you want to change the image x-bounds (y/n)?', 'y')
1937
2353
  if not select_x_bounds:
1938
2354
  x_bounds = None
1939
2355
  else:
1940
2356
  accept = False
1941
2357
  index_ranges = None
1942
2358
  while not accept:
1943
- mask, x_bounds = draw_mask_1d(tomosum, current_index_ranges=index_ranges,
1944
- title='select x data range', legend='recon stack sum yz')
2359
+ _, x_bounds = draw_mask_1d(
2360
+ tomosum, current_index_ranges=index_ranges,
2361
+ title='select x data range',
2362
+ legend='recon stack sum yz')
1945
2363
  while len(x_bounds) != 1:
1946
2364
  print('Please select exactly one continuous range')
1947
- mask, x_bounds = draw_mask_1d(tomosum, title='select x data range',
1948
- legend='recon stack sum yz')
2365
+ _, x_bounds = draw_mask_1d(
2366
+ tomosum, title='select x data range',
2367
+ legend='recon stack sum yz')
1949
2368
  x_bounds = x_bounds[0]
1950
2369
  accept = True
1951
- self.logger.debug(f'x_bounds = {x_bounds}')
2370
+ self._logger.debug(f'x_bounds = {x_bounds}')
1952
2371
 
1953
2372
  if y_bounds == (-1, -1):
1954
2373
  y_bounds = None
1955
2374
  elif not z_only and y_bounds is None:
1956
2375
  # Selecting y bounds (in xz-plane)
1957
2376
  tomosum = 0
1958
- [tomosum := tomosum+np.sum(tomo_recon_stacks[i], axis=(0,1))
1959
- for i in range(num_tomo_stacks)]
1960
- select_y_bounds = input_yesno('\nDo you want to change the image y-bounds (y/n)?', 'y')
2377
+ for i in range(num_tomo_stacks):
2378
+ tomosum = tomosum + np.sum(tomo_recon_stacks[i], axis=(0,1))
2379
+ select_y_bounds = input_yesno(
2380
+ '\nDo you want to change the image y-bounds (y/n)?', 'y')
1961
2381
  if not select_y_bounds:
1962
2382
  y_bounds = None
1963
2383
  else:
1964
2384
  accept = False
1965
2385
  index_ranges = None
1966
2386
  while not accept:
1967
- mask, y_bounds = draw_mask_1d(tomosum, current_index_ranges=index_ranges,
1968
- title='select x data range', legend='recon stack sum xz')
2387
+ _, y_bounds = draw_mask_1d(
2388
+ tomosum, current_index_ranges=index_ranges,
2389
+ title='select x data range',
2390
+ legend='recon stack sum xz')
1969
2391
  while len(y_bounds) != 1:
1970
2392
  print('Please select exactly one continuous range')
1971
- mask, y_bounds = draw_mask_1d(tomosum, title='select x data range',
1972
- legend='recon stack sum xz')
2393
+ _, y_bounds = draw_mask_1d(
2394
+ tomosum, title='select x data range',
2395
+ legend='recon stack sum xz')
1973
2396
  y_bounds = y_bounds[0]
1974
2397
  accept = True
1975
- self.logger.debug(f'y_bounds = {y_bounds}')
2398
+ self._logger.debug(f'y_bounds = {y_bounds}')
1976
2399
 
1977
- # Selecting z bounds (in xy-plane) (only valid for a single image stack)
2400
+ # Selecting z bounds (in xy-plane)
2401
+ # (only valid for a single image stack)
1978
2402
  if z_bounds == (-1, -1):
1979
2403
  z_bounds = None
1980
2404
  elif z_bounds is None and num_tomo_stacks != 1:
1981
2405
  tomosum = 0
1982
- [tomosum := tomosum+np.sum(tomo_recon_stacks[i], axis=(1,2))
1983
- for i in range(num_tomo_stacks)]
1984
- select_z_bounds = input_yesno('Do you want to change the image z-bounds (y/n)?', 'n')
2406
+ for i in range(num_tomo_stacks):
2407
+ tomosum = tomosum + np.sum(tomo_recon_stacks[i], axis=(1,2))
2408
+ select_z_bounds = input_yesno(
2409
+ 'Do you want to change the image z-bounds (y/n)?', 'n')
1985
2410
  if not select_z_bounds:
1986
2411
  z_bounds = None
1987
2412
  else:
1988
2413
  accept = False
1989
2414
  index_ranges = None
1990
2415
  while not accept:
1991
- mask, z_bounds = draw_mask_1d(tomosum, current_index_ranges=index_ranges,
1992
- title='select x data range', legend='recon stack sum xy')
2416
+ _, z_bounds = draw_mask_1d(
2417
+ tomosum, current_index_ranges=index_ranges,
2418
+ title='select x data range',
2419
+ legend='recon stack sum xy')
1993
2420
  while len(z_bounds) != 1:
1994
2421
  print('Please select exactly one continuous range')
1995
- mask, z_bounds = draw_mask_1d(tomosum, title='select x data range',
1996
- legend='recon stack sum xy')
2422
+ _, z_bounds = draw_mask_1d(
2423
+ tomosum, title='select x data range',
2424
+ legend='recon stack sum xy')
1997
2425
  z_bounds = z_bounds[0]
1998
2426
  accept = True
1999
- self.logger.debug(f'z_bounds = {z_bounds}')
2427
+ self._logger.debug(f'z_bounds = {z_bounds}')
2000
2428
 
2001
- return(x_bounds, y_bounds, z_bounds)
2429
+ return x_bounds, y_bounds, z_bounds
2002
2430
 
2003
2431
 
2004
2432
  if __name__ == '__main__':
2005
- from CHAP.processor import main
2006
2433
  main()
2007
-