ChessAnalysisPipeline 0.0.17.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. CHAP/TaskManager.py +216 -0
  2. CHAP/__init__.py +27 -0
  3. CHAP/common/__init__.py +57 -0
  4. CHAP/common/models/__init__.py +8 -0
  5. CHAP/common/models/common.py +124 -0
  6. CHAP/common/models/integration.py +659 -0
  7. CHAP/common/models/map.py +1291 -0
  8. CHAP/common/processor.py +2869 -0
  9. CHAP/common/reader.py +658 -0
  10. CHAP/common/utils.py +110 -0
  11. CHAP/common/writer.py +730 -0
  12. CHAP/edd/__init__.py +23 -0
  13. CHAP/edd/models.py +876 -0
  14. CHAP/edd/processor.py +3069 -0
  15. CHAP/edd/reader.py +1023 -0
  16. CHAP/edd/select_material_params_gui.py +348 -0
  17. CHAP/edd/utils.py +1572 -0
  18. CHAP/edd/writer.py +26 -0
  19. CHAP/foxden/__init__.py +19 -0
  20. CHAP/foxden/models.py +71 -0
  21. CHAP/foxden/processor.py +124 -0
  22. CHAP/foxden/reader.py +224 -0
  23. CHAP/foxden/utils.py +80 -0
  24. CHAP/foxden/writer.py +168 -0
  25. CHAP/giwaxs/__init__.py +11 -0
  26. CHAP/giwaxs/models.py +491 -0
  27. CHAP/giwaxs/processor.py +776 -0
  28. CHAP/giwaxs/reader.py +8 -0
  29. CHAP/giwaxs/writer.py +8 -0
  30. CHAP/inference/__init__.py +7 -0
  31. CHAP/inference/processor.py +69 -0
  32. CHAP/inference/reader.py +8 -0
  33. CHAP/inference/writer.py +8 -0
  34. CHAP/models.py +227 -0
  35. CHAP/pipeline.py +479 -0
  36. CHAP/processor.py +125 -0
  37. CHAP/reader.py +124 -0
  38. CHAP/runner.py +277 -0
  39. CHAP/saxswaxs/__init__.py +7 -0
  40. CHAP/saxswaxs/processor.py +8 -0
  41. CHAP/saxswaxs/reader.py +8 -0
  42. CHAP/saxswaxs/writer.py +8 -0
  43. CHAP/server.py +125 -0
  44. CHAP/sin2psi/__init__.py +7 -0
  45. CHAP/sin2psi/processor.py +8 -0
  46. CHAP/sin2psi/reader.py +8 -0
  47. CHAP/sin2psi/writer.py +8 -0
  48. CHAP/tomo/__init__.py +15 -0
  49. CHAP/tomo/models.py +210 -0
  50. CHAP/tomo/processor.py +3862 -0
  51. CHAP/tomo/reader.py +9 -0
  52. CHAP/tomo/writer.py +59 -0
  53. CHAP/utils/__init__.py +6 -0
  54. CHAP/utils/converters.py +188 -0
  55. CHAP/utils/fit.py +2947 -0
  56. CHAP/utils/general.py +2655 -0
  57. CHAP/utils/material.py +274 -0
  58. CHAP/utils/models.py +595 -0
  59. CHAP/utils/parfile.py +224 -0
  60. CHAP/writer.py +122 -0
  61. MLaaS/__init__.py +0 -0
  62. MLaaS/ktrain.py +205 -0
  63. MLaaS/mnist_img.py +83 -0
  64. MLaaS/tfaas_client.py +371 -0
  65. chessanalysispipeline-0.0.17.dev3.dist-info/LICENSE +60 -0
  66. chessanalysispipeline-0.0.17.dev3.dist-info/METADATA +29 -0
  67. chessanalysispipeline-0.0.17.dev3.dist-info/RECORD +70 -0
  68. chessanalysispipeline-0.0.17.dev3.dist-info/WHEEL +5 -0
  69. chessanalysispipeline-0.0.17.dev3.dist-info/entry_points.txt +2 -0
  70. chessanalysispipeline-0.0.17.dev3.dist-info/top_level.txt +2 -0
CHAP/tomo/processor.py ADDED
@@ -0,0 +1,3862 @@
1
+ #!/usr/bin/env python
2
+ #-*- coding: utf-8 -*-
3
+ """
4
+ File : processor.py
5
+ Author : Rolf Verberg <rolfverberg AT gmail dot com>
6
+ Description: Module for Processors used only by tomography experiments
7
+ """
8
+
9
+ # System modules
10
+ import os
11
+ import re
12
+ import sys
13
+ from time import time
14
+ from typing import Optional
15
+
16
+ # Third party modules
17
+ from json import loads
18
+ import numpy as np
19
+ from pydantic import (
20
+ conint,
21
+ conlist,
22
+ field_validator,
23
+ )
24
+
25
+ # Local modules
26
+ from CHAP.common.models.map import MapConfig
27
+ from CHAP.processor import Processor
28
+ from CHAP.utils.general import (
29
+ fig_to_iobuf,
30
+ input_int,
31
+ input_num,
32
+ input_num_list,
33
+ input_yesno,
34
+ is_int_pair,
35
+ is_num,
36
+ is_num_series,
37
+ nxcopy,
38
+ select_image_indices,
39
+ select_roi_1d,
40
+ select_roi_2d,
41
+ quick_imshow,
42
+ )
43
+
44
+
45
+ NUM_CORE_TOMOPY_LIMIT = 24
46
+
47
+
48
+ class TomoMetadataProcessor(Processor):
49
+ """A processor that takes data from the FOXDEN Data Discovery or
50
+ Metadata service and extracts what's available to create
51
+ a `CHAP.common.models.map.MapConfig` object for a tomography
52
+ experiment.
53
+ """
54
+ def process(self, data, config):
55
+ """Process the meta data and return a dictionary with
56
+ extracted data to create a `MapConfig` for the tomography
57
+ experiment.
58
+
59
+ :param data: Input data.
60
+ :type data: list[PipelineData]
61
+ :param config: Any additional input data required to create a
62
+ `MapConfig` that is unavailable from the Metadata service.
63
+ :type config: dict
64
+ :return: Metadata from the tomography experiment.
65
+ :rtype: CHAP.common.models.map.MapConfig
66
+ """
67
+ return self._process(data, config)
68
+
69
+ #@profile
70
+ def _process(self, data, config):
71
+ try:
72
+ data = self.unwrap_pipelinedata(data)[0]
73
+ if isinstance(data, list) and len(data) != 1:
74
+ raise ValueError(f'Invalid PipelineData input data ({data})')
75
+ data = data[0]
76
+ if not isinstance(data, dict):
77
+ raise ValueError(f'Invalid PipelineData input data ({data})')
78
+ except Exception:
79
+ raise
80
+
81
+ # Extract any available MapConfig info
82
+ map_config = {}
83
+ map_config['did'] = data.get('did')
84
+ map_config['title'] = data.get('sample_name')
85
+ station = data.get('beamline')[0]
86
+ if station == '3A':
87
+ station = 'id3a'
88
+ else:
89
+ raise ValueError(f'Invalid beamline parameter ({station})')
90
+ map_config['station'] = station
91
+ experiment_type = data.get('technique')
92
+ assert 'tomography' in experiment_type
93
+ map_config['experiment_type'] = 'TOMO'
94
+ map_config['sample'] = {'name': map_config['title'],
95
+ 'description': data.get('description')}
96
+ if station == 'id3a':
97
+ scan_numbers = config['scan_numbers']
98
+ if isinstance(scan_numbers, list):
99
+ if isinstance(scan_numbers[0], list):
100
+ scan_numbers = scan_numbers[0]
101
+ map_config['spec_scans'] = [{
102
+ 'spec_file': os.path.join(
103
+ data.get('data_location_raw'), 'spec.log'),
104
+ 'scan_numbers': scan_numbers}]
105
+ map_config['independent_dimensions'] = config['independent_dimensions']
106
+
107
+ # Validate the MapConfig info
108
+ MapConfig(**map_config)
109
+
110
+ return map_config
111
+
112
+
113
+ class TomoCHESSMapConverter(Processor):
114
+ """A processor to convert a CHESS style tomography map with dark
115
+ and bright field configurations to an NeXus style input format.
116
+ """
117
+ def process(self, data):
118
+ return self._process(data)
119
+
120
+ #@profile
121
+ def _process(self, data):
122
+ """Process the input map and configuration and return a
123
+ `nexusformat.nexus.NXroot` object based on the
124
+ `nexusformat.nexus.NXtomo` style format.
125
+
126
+ :param data: Input map and configuration for tomographic image
127
+ reduction/reconstruction.
128
+ :type data: list[PipelineData]
129
+ :raises ValueError: Invalid input or configuration parameter.
130
+ :return: NeXus style tomography input configuration.
131
+ :rtype: nexusformat.nexus.NXroot
132
+ """
133
+ # System modules
134
+ from copy import deepcopy
135
+
136
+ # Third party modules
137
+ # pylint: disable=no-name-in-module
138
+ from nexusformat.nexus import (
139
+ NXdata,
140
+ NXdetector,
141
+ NXentry,
142
+ NXinstrument,
143
+ NXlink,
144
+ NXroot,
145
+ NXsample,
146
+ NXsource,
147
+ nxsetconfig,
148
+ )
149
+ # pylint: enable=no-name-in-module
150
+
151
+ # Local modules
152
+ from CHAP.utils.general import index_nearest
153
+
154
+ # FIX make a config input
155
+ nxsetconfig(memory=100000)
156
+
157
+ # Load and validate the tomography fields
158
+ tomofields = self.get_data(data, schema='tomofields')
159
+ if isinstance(tomofields, NXroot):
160
+ tomofields = tomofields[tomofields.default]
161
+ if not isinstance(tomofields, NXentry):
162
+ raise ValueError(f'Invalid parameter tomofields {tomofields})')
163
+ detector_prefix = str(tomofields.detector_ids)
164
+ tomo_stacks = tomofields.data[detector_prefix].nxdata
165
+ tomo_stack_shape = tomo_stacks.shape
166
+ assert len(tomo_stack_shape) == 3
167
+
168
+ # Validate map
169
+ map_config = MapConfig(**loads(str(tomofields.map_config)))
170
+ if map_config.did is None:
171
+ self.logger.warning(
172
+ f'Unable to extract did from map configuration')
173
+ map_config.did = f'/sample={map_config.sample.name}'
174
+ assert len(map_config.spec_scans) == 1
175
+ spec_scan = map_config.spec_scans[0]
176
+ scan_numbers = spec_scan.scan_numbers
177
+
178
+ # Load and validate dark field (look upstream and downstream
179
+ # in the SPEC log file)
180
+ try:
181
+ darkfield = self.get_data(data, schema='darkfield')
182
+ except Exception:
183
+ self.logger.warning(f'Unable to load dark field from pipeline')
184
+ darkfield = None
185
+ data_darkfield = None
186
+ if darkfield is None:
187
+ try:
188
+ for scan_number in range(min(scan_numbers), 0, -1):
189
+ scanparser = spec_scan.get_scanparser(scan_number)
190
+ scan_type = scanparser.get_scan_type()
191
+ if scan_type == 'df1':
192
+ darkfield = scanparser
193
+ data_darkfield = darkfield.get_detector_data(
194
+ detector_prefix)
195
+ data_shape = data_darkfield.shape
196
+ break
197
+ except Exception:
198
+ pass
199
+ if data_darkfield is None:
200
+ try:
201
+ for scan_number in range(
202
+ 1 + max(scan_numbers), 3 + max(scan_numbers)):
203
+ scanparser = spec_scan.get_scanparser(scan_number)
204
+ scan_type = scanparser.get_scan_type()
205
+ if scan_type == 'df2':
206
+ darkfield = scanparser
207
+ data_darkfield = darkfield.get_detector_data(
208
+ detector_prefix)
209
+ data_shape = data_darkfield.shape
210
+ break
211
+ except Exception:
212
+ pass
213
+ if data_darkfield is None:
214
+ self.logger.warning(f'Unable to load dark field')
215
+ else:
216
+ if isinstance(darkfield, NXroot):
217
+ darkfield = darkfield[darkfield.default]
218
+ if not isinstance(darkfield, NXentry):
219
+ raise ValueError(f'Invalid parameter darkfield ({darkfield})')
220
+
221
+ # Load and validate bright field (FIX look upstream and
222
+ # downstream # in the SPEC log file)
223
+ try:
224
+ brightfield = self.get_data(data, schema='brightfield')
225
+ except Exception:
226
+ self.logger.warning(f'Unable to load bright field from pipeline')
227
+ brightfield = None
228
+ if brightfield is None:
229
+ for scan_number in range(min(scan_numbers), 0, -1):
230
+ scanparser = spec_scan.get_scanparser(scan_number)
231
+ scan_type = scanparser.get_scan_type()
232
+ if scan_type == 'bf1':
233
+ brightfield = scanparser
234
+ break
235
+ else:
236
+ raise ValueError(f'Unable to load bright field')
237
+ else:
238
+ if isinstance(brightfield, NXroot):
239
+ brightfield = brightfield[brightfield.default]
240
+ if not isinstance(brightfield, NXentry):
241
+ raise ValueError(
242
+ f'Invalid parameter brightfield ({brightfield})')
243
+
244
+ # Load and validate detector config if supplied
245
+ try:
246
+ detector_config = self.get_config(
247
+ data=data, schema='tomo.models.Detector')
248
+ except Exception:
249
+ detector_config = None
250
+
251
+ # Construct NXroot
252
+ nxroot = NXroot()
253
+
254
+ # Check available independent dimensions
255
+ if 'axes' in tomofields.data.attrs:
256
+ independent_dimensions = tomofields.data.attrs['axes']
257
+ else:
258
+ independent_dimensions = tomofields.data.attrs['unstructured_axes']
259
+ if isinstance(independent_dimensions, str):
260
+ independent_dimensions = [independent_dimensions]
261
+ matched_dimensions = deepcopy(independent_dimensions)
262
+ if 'rotation_angles' not in independent_dimensions:
263
+ raise ValueError('Data for rotation angles is unavailable '
264
+ '(available independent dimensions: '
265
+ f'{independent_dimensions})')
266
+ rotation_angle_data_type = \
267
+ tomofields.data.rotation_angles.attrs['data_type']
268
+ if rotation_angle_data_type != 'scan_column':
269
+ raise ValueError('Invalid data type for rotation angles '
270
+ f'({rotation_angle_data_type})')
271
+ matched_dimensions.pop(matched_dimensions.index('rotation_angles'))
272
+ if 'x_translation' in independent_dimensions:
273
+ x_translation_data_type = \
274
+ tomofields.data.x_translation.attrs['data_type']
275
+ x_translation_name = \
276
+ tomofields.data.x_translation.attrs['local_name']
277
+ if x_translation_data_type not in ('spec_motor', 'smb_par'):
278
+ raise ValueError('Invalid data type for x translation '
279
+ f'({x_translation_data_type})')
280
+ matched_dimensions.pop(matched_dimensions.index('x_translation'))
281
+ else:
282
+ x_translation_data_type = None
283
+ x_translation_name = None
284
+ if 'z_translation' in independent_dimensions:
285
+ z_translation_data_type = \
286
+ tomofields.data.z_translation.attrs['data_type']
287
+ z_translation_name = \
288
+ tomofields.data.z_translation.attrs['local_name']
289
+ if z_translation_data_type not in ('spec_motor', 'smb_par'):
290
+ raise ValueError('Invalid data type for x translation '
291
+ f'({z_translation_data_type})')
292
+ matched_dimensions.pop(matched_dimensions.index('z_translation'))
293
+ else:
294
+ z_translation_data_type = None
295
+ z_translation_name = None
296
+ if matched_dimensions:
297
+ raise ValueError('Unknown independent dimension '
298
+ f'({matched_dimensions}), independent dimensions '
299
+ 'must be in {"z_translation", "x_translation", '
300
+ '"rotation_angles"}')
301
+
302
+ # Construct base NXentry and add to NXroot
303
+ nxentry = NXentry(name=map_config.title)
304
+ nxroot[nxentry.nxname] = nxentry
305
+ nxentry.set_default()
306
+
307
+ # Add configuration fields
308
+ nxentry.definition = 'NXtomo'
309
+ nxentry.map_config = map_config.model_dump_json()
310
+
311
+ # Add an NXinstrument to the NXentry
312
+ nxinstrument = NXinstrument()
313
+ nxentry.instrument = nxinstrument
314
+
315
+ # Add an NXsource to the NXinstrument
316
+ nxsource = NXsource()
317
+ nxinstrument.source = nxsource
318
+ nxsource.type = 'Synchrotron X-ray Source'
319
+ nxsource.name = 'CHESS'
320
+ nxsource.probe = 'x-ray'
321
+
322
+ # Tag the NXsource with the runinfo (as an attribute)
323
+ # nxsource.attrs['cycle'] = cycle
324
+ # nxsource.attrs['btr'] = btr
325
+ nxsource.attrs['station'] = tomofields.station
326
+ nxsource.attrs['experiment_type'] = map_config.experiment_type
327
+
328
+ # Add an NXdetector to the NXinstrument
329
+ # (do not fill in data fields yet)
330
+ nxdetector = NXdetector()
331
+ nxinstrument.detector = nxdetector
332
+ nxdetector.local_name = detector_prefix
333
+ if detector_config is None:
334
+ detector_attrs = tomofields.data[detector_prefix].attrs
335
+ else:
336
+ detector_attrs = {
337
+ 'pixel_size': detector_config.pixel_size,
338
+ 'lens_magnification': detector_config.lens_magnification}
339
+ pixel_size = detector_attrs['pixel_size']
340
+ if isinstance(pixel_size, (int, float)):
341
+ pixel_size = [pixel_size]
342
+ if len(pixel_size) == 1:
343
+ nxdetector.row_pixel_size = \
344
+ pixel_size[0]/detector_attrs['lens_magnification']
345
+ nxdetector.column_pixel_size = \
346
+ pixel_size[0]/detector_attrs['lens_magnification']
347
+ else:
348
+ nxdetector.row_pixel_size = \
349
+ pixel_size[0]/detector_attrs['lens_magnification']
350
+ nxdetector.column_pixel_size = \
351
+ pixel_size[1]/detector_attrs['lens_magnification']
352
+ nxdetector.row_pixel_size.units = 'mm'
353
+ nxdetector.column_pixel_size.units = 'mm'
354
+ nxdetector.rows = tomo_stack_shape[1]
355
+ nxdetector.columns = tomo_stack_shape[2]
356
+ nxdetector.rows.units = 'pixels'
357
+ nxdetector.columns.units = 'pixels'
358
+
359
+ # Add an NXsample to NXentry
360
+ # (do not fill in data fields yet)
361
+ nxsample = NXsample()
362
+ nxentry.sample = nxsample
363
+ nxsample.name = map_config.sample.name
364
+ if map_config.sample.description is not None:
365
+ nxsample.description = map_config.sample.description
366
+
367
+ # Collect dark field data
368
+ image_keys = []
369
+ sequence_numbers = []
370
+ image_stacks = []
371
+ rotation_angles = []
372
+ x_translations = []
373
+ z_translations = []
374
+ if isinstance(darkfield, NXentry):
375
+ nxentry.dark_field_config = darkfield.config
376
+ for scan in darkfield.spec_scans.values():
377
+ for nxcollection in scan.values():
378
+ data_shape = nxcollection.data[detector_prefix].shape
379
+ assert len(data_shape) == 3
380
+ assert data_shape[1] == nxdetector.rows
381
+ assert data_shape[2] == nxdetector.columns
382
+ num_image = data_shape[0]
383
+ image_keys += num_image*[2]
384
+ sequence_numbers += list(range(num_image))
385
+ image_stacks.append(
386
+ nxcollection.data[detector_prefix].nxdata)
387
+ rotation_angles += num_image*[0.0]
388
+ if (x_translation_data_type == 'spec_motor' or
389
+ z_translation_data_type == 'spec_motor'):
390
+ spec_motors = loads(str(nxcollection.spec_motors))
391
+ if (x_translation_data_type == 'smb_par' or
392
+ z_translation_data_type == 'smb_par'):
393
+ smb_pars = loads(str(nxcollection.smb_pars))
394
+ if x_translation_data_type is None:
395
+ x_translations += num_image*[0.0]
396
+ else:
397
+ if x_translation_data_type == 'spec_motor':
398
+ x_translations += \
399
+ num_image*[spec_motors[x_translation_name]]
400
+ else:
401
+ x_translations += \
402
+ num_image*[smb_pars[x_translation_name]]
403
+ if z_translation_data_type is None:
404
+ z_translations += num_image*[0.0]
405
+ else:
406
+ if z_translation_data_type == 'spec_motor':
407
+ z_translations += \
408
+ num_image*[spec_motors[z_translation_name]]
409
+ else:
410
+ z_translations += \
411
+ num_image*[smb_pars[z_translation_name]]
412
+ elif data_darkfield is not None:
413
+ data_shape = data_darkfield.shape
414
+ assert len(data_shape) == 3
415
+ assert data_shape[1] == nxdetector.rows
416
+ assert data_shape[2] == nxdetector.columns
417
+ num_image = data_shape[0]
418
+ image_keys += num_image*[2]
419
+ sequence_numbers += list(range(num_image))
420
+ image_stacks.append(data_darkfield)
421
+ rotation_angles += num_image*[0.0]
422
+ if (x_translation_data_type == 'spec_motor' or
423
+ z_translation_data_type == 'spec_motor'):
424
+ spec_motors = darkfield.spec_positioner_values
425
+ # {k:float(v)
426
+ # for k, v in darkfield.spec_positioner_values.items()}
427
+ if (x_translation_data_type == 'smb_par' or
428
+ z_translation_data_type == 'smb_par'):
429
+ smb_pars = scanparser.pars
430
+ # {k:v for k,v in scanparser.pars.items()}
431
+ if x_translation_data_type is None:
432
+ x_translations += num_image*[0.0]
433
+ else:
434
+ if x_translation_data_type == 'spec_motor':
435
+ x_translations += \
436
+ num_image*[spec_motors[x_translation_name]]
437
+ else:
438
+ x_translations += \
439
+ num_image*[smb_pars[x_translation_name]]
440
+ if z_translation_data_type is None:
441
+ z_translations += num_image*[0.0]
442
+ else:
443
+ if z_translation_data_type == 'spec_motor':
444
+ z_translations += \
445
+ num_image*[spec_motors[z_translation_name]]
446
+ else:
447
+ z_translations += \
448
+ num_image*[smb_pars[z_translation_name]]
449
+
450
+ # Collect bright field data
451
+ if isinstance(brightfield, NXentry):
452
+ nxentry.bright_field_config = brightfield.config
453
+ for scan in brightfield.spec_scans.values():
454
+ for nxcollection in scan.values():
455
+ data_shape = nxcollection.data[detector_prefix].shape
456
+ assert len(data_shape) == 3
457
+ assert data_shape[1] == nxdetector.rows
458
+ assert data_shape[2] == nxdetector.columns
459
+ num_image = data_shape[0]
460
+ image_keys += num_image*[1]
461
+ sequence_numbers += list(range(num_image))
462
+ image_stacks.append(
463
+ nxcollection.data[detector_prefix].nxdata)
464
+ rotation_angles += num_image*[0.0]
465
+ if (x_translation_data_type == 'spec_motor' or
466
+ z_translation_data_type == 'spec_motor'):
467
+ spec_motors = loads(str(nxcollection.spec_motors))
468
+ if (x_translation_data_type == 'smb_par' or
469
+ z_translation_data_type == 'smb_par'):
470
+ smb_pars = loads(str(nxcollection.smb_pars))
471
+ if x_translation_data_type is None:
472
+ x_translations += num_image*[0.0]
473
+ else:
474
+ if x_translation_data_type == 'spec_motor':
475
+ x_translations += \
476
+ num_image*[spec_motors[x_translation_name]]
477
+ else:
478
+ x_translations += \
479
+ num_image*[smb_pars[x_translation_name]]
480
+ if z_translation_data_type is None:
481
+ z_translations += num_image*[0.0]
482
+ if z_translation_data_type is not None:
483
+ if z_translation_data_type == 'spec_motor':
484
+ z_translations += \
485
+ num_image*[spec_motors[z_translation_name]]
486
+ else:
487
+ z_translations += \
488
+ num_image*[smb_pars[z_translation_name]]
489
+ else:
490
+ data = brightfield.get_detector_data(detector_prefix)
491
+ data_shape = data.shape
492
+ assert len(data_shape) == 3
493
+ assert data_shape[1] == nxdetector.rows
494
+ assert data_shape[2] == nxdetector.columns
495
+ num_image = data_shape[0]
496
+ image_keys += num_image*[1]
497
+ sequence_numbers += list(range(num_image))
498
+ image_stacks.append(data)
499
+ rotation_angles += num_image*[0.0]
500
+ if (x_translation_data_type == 'spec_motor' or
501
+ z_translation_data_type == 'spec_motor'):
502
+ spec_motors = brightfield.spec_positioner_values
503
+ # {k:float(v)
504
+ # for k, v in brightfield.spec_positioner_values.items()}
505
+ if (x_translation_data_type == 'smb_par' or
506
+ z_translation_data_type == 'smb_par'):
507
+ smb_pars = scanparser.pars
508
+ # {k:v for k,v in scanparser.pars.items()}
509
+ if x_translation_data_type is None:
510
+ x_translations += num_image*[0.0]
511
+ else:
512
+ if x_translation_data_type == 'spec_motor':
513
+ x_translations += \
514
+ num_image*[spec_motors[x_translation_name]]
515
+ else:
516
+ x_translations += \
517
+ num_image*[smb_pars[x_translation_name]]
518
+ if z_translation_data_type is None:
519
+ z_translations += num_image*[0.0]
520
+ else:
521
+ if z_translation_data_type == 'spec_motor':
522
+ z_translations += \
523
+ num_image*[spec_motors[z_translation_name]]
524
+ else:
525
+ z_translations += \
526
+ num_image*[smb_pars[z_translation_name]]
527
+
528
+ # Collect tomography fields data
529
+ num_tomo_stack = len(scan_numbers)
530
+ assert not tomo_stack_shape[0] % num_tomo_stack
531
+ # Restrict to 180 degrees set of data for now to match old code
532
+ thetas_stacks = tomofields.data.rotation_angles.nxdata
533
+ num_theta = tomo_stack_shape[0] // num_tomo_stack
534
+ assert num_theta > 2
535
+ thetas = thetas_stacks[0:num_theta]
536
+ delta_theta = thetas[1] - thetas[0]
537
+ if thetas[num_theta-1] - thetas[0] > 180 - delta_theta:
538
+ image_end = index_nearest(thetas, thetas[0] + 180)
539
+ else:
540
+ image_end = thetas.size
541
+ thetas = thetas[:image_end]
542
+ num_image = thetas.size
543
+ n_start = 0
544
+ image_keys += num_tomo_stack * num_image * [0]
545
+ sequence_numbers += num_tomo_stack * list(range(num_image))
546
+ if x_translation_data_type is None:
547
+ x_translations += num_tomo_stack * num_image * [0.0]
548
+ if z_translation_data_type is None:
549
+ z_translations += num_tomo_stack * num_image * [0.0]
550
+ for _ in range(num_tomo_stack):
551
+ image_stacks.append(tomo_stacks[n_start:n_start+num_image])
552
+ if not np.array_equal(
553
+ thetas, thetas_stacks[n_start:n_start+num_image]):
554
+ raise RuntimeError(
555
+ 'Inconsistent thetas among tomography image stacks')
556
+ rotation_angles += list(thetas)
557
+ if x_translation_data_type is not None:
558
+ x_translations += list(
559
+ tomofields.data.x_translation[n_start:n_start+num_image])
560
+ if z_translation_data_type is not None:
561
+ z_translations += list(
562
+ tomofields.data.z_translation[n_start:n_start+num_image])
563
+ n_start += num_theta
564
+
565
+ # Add image data to NXdetector
566
+ nxinstrument.detector.image_key = image_keys
567
+ nxinstrument.detector.sequence_number = sequence_numbers
568
+ nxinstrument.detector.data = np.concatenate(image_stacks)
569
+ del image_stacks
570
+
571
+ # Add image data to NXsample
572
+ nxsample.rotation_angle = rotation_angles
573
+ nxsample.rotation_angle.units = 'degrees'
574
+ nxsample.x_translation = x_translations
575
+ nxsample.x_translation.units = 'mm'
576
+ nxsample.z_translation = z_translations
577
+ nxsample.z_translation.units = 'mm'
578
+
579
+ # Add an NXdata to NXentry
580
+ nxentry.data = NXdata(NXlink(nxentry.instrument.detector.data))
581
+ nxentry.data.makelink(nxentry.instrument.detector.image_key)
582
+ nxentry.data.makelink(nxentry.sample.rotation_angle)
583
+ nxentry.data.makelink(nxentry.sample.x_translation)
584
+ nxentry.data.makelink(nxentry.sample.z_translation)
585
+ nxentry.data.set_default()
586
+
587
+ return nxroot
588
+
589
+
590
+ class TomoDataProcessor(Processor):
591
+ """A processor to reconstruct a set of tomographic images returning
592
+ either a dictionary or a `nexusformat.nexus.NXroot` object
593
+ containing the data after processing each individual step, an
594
+ optional list of byte stream representions of Matplotlib figures,
595
+ and the metadata associated with the workflow.
596
+
597
+ :ivar reduce_data: Generate reduced tomography images,
598
+ defaults to `False`.
599
+ :type reduce_data: bool, optional
600
+ :ivar find_center: Generate calibrated center axis info,
601
+ defaults to `False`.
602
+ :type find_center: bool, optional
603
+ :ivar calibrate_center: Calibrate the rotation axis,
604
+ defaults to `False`.
605
+ :type calibrate_center: bool, optional
606
+ :ivar reconstruct_data: Reconstruct the tomography data,
607
+ defaults to `False`.
608
+ :type reconstruct_data: bool, optional
609
+ :ivar combine_data: Combine the reconstructed tomography stacks,
610
+ defaults to `False`.
611
+ :type combine_data: bool, optional
612
+ :ivar save_figures: Create Matplotlib figures that can be
613
+ saved to file downstream in the workflow,
614
+ defaults to `True`.
615
+ :type save_figures: bool, optional
616
+ """
617
+ reduce_data: Optional[bool] = False
618
+ find_center: Optional[bool] = False
619
+ calibrate_center: Optional[bool] = False
620
+ reconstruct_data: Optional[bool] = False
621
+ combine_data: Optional[bool] = False
622
+ save_figures: Optional[bool] = True
623
+
624
+ def process(self, data, config=None):
625
+ return self._process(data, config=config)
626
+
627
+ #@profile
628
+ def _process(self, data, config=None):
629
+ """Process the input map or configuration with the step
630
+ specific instructions and return either a dictionary or a
631
+ `nexusformat.nexus.NXroot` object with the processed result,
632
+ an optional list of byte stream representions of Matplotlib
633
+ figures, and the metadata associated with the workflow.
634
+
635
+ :param data: Input configuration and specific step instructions
636
+ for tomographic image reduction.
637
+ :type data: list[PipelineData]
638
+ :param config: Initialization parameters for a single
639
+ tomography workflow step.
640
+ :type config: dict, optional
641
+ :raises ValueError: Invalid input or configuration parameter.
642
+ :raises RuntimeError: Missing map configuration to generate
643
+ reduced tomography images.
644
+ :return: Metadata associated with the workflow, a list of byte
645
+ stream representions of Matplotlib figures, and the result
646
+ of the (partial) reconstruction.
647
+ :rtype: PipelineData, PipelineData, PipelineData
648
+ """
649
+ # Third party modules
650
+ from nexusformat.nexus import nxsetconfig
651
+
652
+ # Local modules
653
+ from CHAP.pipeline import PipelineData
654
+ from CHAP.tomo.models import (
655
+ TomoCombineConfig,
656
+ TomoFindCenterConfig,
657
+ TomoReconstructConfig,
658
+ )
659
+
660
+ # Validate the input parameters
661
+ num_part = (self.reduce_data, self.find_center, self.calibrate_center,
662
+ self.reconstruct_data, self.combine_data).count(True)
663
+ if config is not None:
664
+ if not num_part:
665
+ raise ValueError(
666
+ 'Invalid parameter combination, specify a single '
667
+ 'tomography workflow step when "config" is supplied')
668
+ if num_part > 1:
669
+ raise ValueError(
670
+ 'Invalid parameter combination, only supply "config" for '
671
+ 'a single tomography workflow step')
672
+
673
+ # Validate the workflow step configurations
674
+ try:
675
+ if self.reduce_data:
676
+ reduce_data_config = self.get_config(
677
+ data=data, config=config,
678
+ schema='tomo.models.TomoReduceConfig')
679
+ else:
680
+ reduce_data_config = self.get_config(
681
+ data=data, schema='tomo.models.TomoReduceConfig')
682
+ except ValueError:
683
+ reduce_data_config = None
684
+ try:
685
+ if self.find_center:
686
+ find_center_config = self.get_config(
687
+ data=data, config=config,
688
+ schema='tomo.models.TomoFindCenterConfig')
689
+ else:
690
+ find_center_config = self.get_config(
691
+ data=data, schema='tomo.models.TomoFindCenterConfig')
692
+ except ValueError:
693
+ find_center_config = None
694
+ try:
695
+ if self.reconstruct_data:
696
+ reconstruct_data_config = self.get_config(
697
+ data=data, config=config,
698
+ schema='tomo.models.TomoReconstructConfig')
699
+ else:
700
+ reconstruct_data_config = self.get_config(
701
+ data=data, schema='tomo.models.TomoReconstructConfig')
702
+ except ValueError:
703
+ reconstruct_data_config = None
704
+ try:
705
+ if self.combine_data:
706
+ combine_data_config = self.get_config(
707
+ data=data, config=config,
708
+ schema='tomo.models.TomoCombineConfig')
709
+ else:
710
+ combine_data_config = self.get_config(
711
+ data=data, schema='tomo.models.TomoCombineConfig')
712
+ except ValueError:
713
+ combine_data_config = None
714
+ nxroot = self.get_data(data)
715
+
716
+ # Generate metadata
717
+ map_config = loads(str(nxroot[nxroot.default].map_config))
718
+ try:
719
+ btr = map_config['did'].split('btr=')[1].split('/')[0]
720
+ assert isinstance(btr, str)
721
+ except Exception:
722
+ self.logger.warning(
723
+ f'Unable to get a valid btr from map_config ({map_config})')
724
+ btr = 'unknown'
725
+ metadata = {
726
+ 'parent_did': map_config['did'],
727
+ 'application': 'CHAP',
728
+ 'btr': btr,
729
+ 'experiment_type': map_config['experiment_type'],
730
+ 'metadata': {}
731
+ }
732
+
733
+ tomo = Tomo(
734
+ metadata, save_figures=self.save_figures, **self.run_config)
735
+
736
+ # FIX make an config input
737
+ nxsetconfig(memory=100000)
738
+
739
+ # Calibrate the rotation axis
740
+ if self.calibrate_center:
741
+ if any((self.reduce_data, self.find_center, self.reconstruct_data,
742
+ reconstruct_data_config, self.combine_data,
743
+ combine_data_config)):
744
+ self.logger.warning('Ignoring any step specific instructions '
745
+ 'during center calibration')
746
+ if nxroot is None:
747
+ raise RuntimeError('Map info required to calibrate the '
748
+ 'rotation axis')
749
+ if find_center_config is None:
750
+ find_center_config = TomoFindCenterConfig()
751
+ calibrate_center_rows = True
752
+ else:
753
+ calibrate_center_rows = find_center_config.center_rows
754
+ if calibrate_center_rows is None:
755
+ calibrate_center_rows = True
756
+ nxroot, calibrate_center_rows = tomo.reduce_data(
757
+ nxroot, reduce_data_config, calibrate_center_rows)
758
+ center_config = tomo.find_centers(
759
+ nxroot, find_center_config, calibrate_center_rows)
760
+ return center_config.model_dump()
761
+
762
+ # Reduce tomography images
763
+ if self.reduce_data or reduce_data_config is not None:
764
+ if nxroot is None:
765
+ raise RuntimeError('Map info required to reduce the '
766
+ 'tomography images')
767
+ nxroot, _ = tomo.reduce_data(nxroot, reduce_data_config)
768
+
769
+ # Find calibrated center axis info for the tomography stacks
770
+ center_config = None
771
+ if self.find_center or find_center_config is not None:
772
+ run_find_centers = False
773
+ if find_center_config is None:
774
+ find_center_config = TomoFindCenterConfig()
775
+ run_find_centers = True
776
+ else:
777
+ if (find_center_config.center_rows is None
778
+ or find_center_config.center_offsets is None):
779
+ run_find_centers = True
780
+ if run_find_centers:
781
+ center_config = tomo.find_centers(nxroot, find_center_config)
782
+ else:
783
+ center_config = find_center_config
784
+
785
+ # Reconstruct tomography stacks
786
+ # RV pass reconstruct_data_config and center_config directly to
787
+ # tomo.reconstruct_data?
788
+ if self.reconstruct_data or reconstruct_data_config is not None:
789
+ if reconstruct_data_config is None:
790
+ reconstruct_data_config = TomoReconstructConfig()
791
+ nxroot = tomo.reconstruct_data(
792
+ nxroot, center_config, reconstruct_data_config)
793
+ center_config = None
794
+
795
+ # Combine reconstructed tomography stacks
796
+ if self.combine_data or combine_data_config is not None:
797
+ if combine_data_config is None:
798
+ combine_data_config = TomoCombineConfig()
799
+ nxroot = tomo.combine_data(nxroot, combine_data_config)
800
+
801
+ metadata.pop('parent_did'),
802
+ if center_config is not None:
803
+ return (
804
+ PipelineData(
805
+ name=self.name, data=metadata, schema='metadata'),
806
+ PipelineData(
807
+ name=self.name, data=tomo._figures,
808
+ schema='common.write.ImageWriter'),
809
+ PipelineData(
810
+ name=self.name, data=center_config.model_dump(),
811
+ schema='tomodata'))
812
+ return (
813
+ PipelineData(name=self.name, data=metadata, schema='metadata'),
814
+ PipelineData(
815
+ name=self.name, data=tomo._figures,
816
+ schema='common.write.ImageWriter'),
817
+ PipelineData(name=self.name, data=nxroot, schema='tomodata'))
818
+
819
+
820
+ class SetNumexprThreads:
821
+ """Class that sets and keeps track of the number of processors used
822
+ by the code in general and by the num_expr package specifically.
823
+ """
824
+ def __init__(self, num_core):
825
+ """Initialize SetNumexprThreads.
826
+
827
+ :param num_core: Number of processors used by the num_expr
828
+ package.
829
+ :type num_core: int
830
+ """
831
+ # System modules
832
+ from multiprocessing import cpu_count
833
+
834
+ if num_core is None or num_core < 1 or num_core > cpu_count():
835
+ self._num_core = cpu_count()
836
+ else:
837
+ self._num_core = num_core
838
+ self._num_core_org = self._num_core
839
+
840
+ def __enter__(self):
841
+ # Third party modules
842
+ from numexpr import (
843
+ MAX_THREADS,
844
+ set_num_threads,
845
+ )
846
+
847
+ self._num_core_org = set_num_threads(
848
+ min(self._num_core, MAX_THREADS))
849
+
850
+ def __exit__(self, exc_type, exc_value, traceback):
851
+ # Third party modules
852
+ from numexpr import set_num_threads
853
+
854
+ set_num_threads(self._num_core_org)
855
+
856
+
857
+ class Tomo(Processor):
858
+ """Reconstruct a set of tomographic images.
859
+
860
+ :ivar save_figures: Create Matplotlib figures that can be saved to
861
+ file downstream in the workflow, defaults to `True`.
862
+ :type save_figures: bool, optional
863
+ """
864
+ save_figures: Optional[bool] = True
865
+
866
+ #FIX make this in a validator
867
+ def __init__(self, metadata, num_core=-1, **kwargs):
868
+ """Initialize Tomo.
869
+
870
+ :param metadata: Metadata record.
871
+ :type metadata: dict
872
+ :param num_core: Number of processors.
873
+ :type num_core: int
874
+ :raises ValueError: Invalid input parameter.
875
+ """
876
+ # System modules
877
+ from multiprocessing import cpu_count
878
+
879
+ super().__init__(**kwargs)
880
+ self._figures = []
881
+ self._metadata = metadata
882
+ self._num_core = num_core
883
+ self._test_config = {}
884
+ if self._num_core == -1:
885
+ self._num_core = cpu_count()
886
+ if not isinstance(self._num_core, int) or self._num_core < 0:
887
+ raise ValueError(f'Invalid parameter num_core ({num_core})')
888
+ if self._num_core > cpu_count():
889
+ self.logger.warning(
890
+ f'num_core = {self._num_core} is larger than the number '
891
+ f'of available processors and reduced to {cpu_count()}')
892
+ self._num_core = cpu_count()
893
+ # Tompy py uses numexpr with NUMEXPR_MAX_THREADS = 64
894
+ if self._num_core > 64:
895
+ self.logger.warning(
896
+ f'num_core = {self._num_core} is larger than the number '
897
+ f'of processors suitable to Tomopy and reduced to 64')
898
+ self._num_core = 64
899
+
900
+ #@profile
901
+ def reduce_data(
902
+ self, nxroot, tool_config=None, calibrate_center_rows=False):
903
+ """Reduced the tomography images.
904
+
905
+ :param nxroot: Data object containing the raw data info and
906
+ metadata required for a tomography data reduction.
907
+ :type nxroot: nexusformat.nexus.NXroot
908
+ :param tool_config: Tool configuration.
909
+ :type tool_config: CHAP.tomo.models.TomoReduceConfig, optional
910
+ :param calibrate_center_rows: Internal parameter only: used
911
+ only to calibrate the rotation axis.
912
+ :type calibrate_center_rows: Union[bool, list[int, int]]
913
+ :raises ValueError: Invalid input or configuration parameter.
914
+ :return: Reduced tomography data and the center calibration
915
+ rows (only if calibrate_center_rows is set).
916
+ :rtype: nexusformat.nexus.NXroot, Union[bool, list[int, int]]
917
+ """
918
+ # Third party modules
919
+ from nexusformat.nexus import (
920
+ NXdata,
921
+ NXprocess,
922
+ NXroot,
923
+ )
924
+
925
+ self.logger.info('Generate the reduced tomography images')
926
+
927
+ # Validate input parameter
928
+ if isinstance(nxroot, NXroot):
929
+ nxentry = nxroot[nxroot.default]
930
+ else:
931
+ raise ValueError(
932
+ f'Invalid parameter nxroot {type(nxroot)}:\n{nxroot}')
933
+ if tool_config is None:
934
+ # Local modules:
935
+ from CHAP.tomo.models import TomoReduceConfig
936
+
937
+ tool_config = TomoReduceConfig()
938
+ img_row_bounds = tool_config.img_row_bounds
939
+ if img_row_bounds is not None and calibrate_center_rows:
940
+ self.logger.warning('Ignoring parameter img_row_bounds '
941
+ 'during rotation axis calibration')
942
+ img_row_bounds = None
943
+ image_key = nxentry.instrument.detector.get('image_key', None)
944
+ if image_key is None or 'data' not in nxentry.instrument.detector:
945
+ raise ValueError(f'Unable to find image_key or data in '
946
+ 'instrument.detector '
947
+ f'({nxentry.instrument.detector.tree})')
948
+
949
+ # Create an NXprocess to store data reduction (meta)data
950
+ reduced_data = NXprocess()
951
+
952
+ # Generate dark field
953
+ reduced_data = self._gen_dark(nxentry, reduced_data, image_key)
954
+
955
+ # Generate bright field
956
+ reduced_data = self._gen_bright(nxentry, reduced_data, image_key)
957
+
958
+ # Get rotation angles for image stacks (in degrees)
959
+ thetas = self._gen_thetas(nxentry, image_key)
960
+
961
+ # Get the image stack mask to remove bad images from stack
962
+ image_mask = None
963
+ drop_fraction = 0 # fraction of images dropped as a percentage
964
+ delta_theta = tool_config.delta_theta
965
+ if drop_fraction:
966
+ if delta_theta is not None:
967
+ delta_theta = None
968
+ self.logger.warning(
969
+ 'Ignoring delta_theta when an image mask is used')
970
+ np.random.seed(0)
971
+ image_mask = np.where(np.random.rand(
972
+ len(thetas)) < drop_fraction/100, 0, 1).astype(bool)
973
+
974
+ # Set zoom and/or rotation angle interval to reduce memory
975
+ # requirement
976
+ if image_mask is None:
977
+ zoom_perc, delta_theta = self._set_zoom_or_delta_theta(
978
+ thetas, delta_theta)
979
+ if delta_theta is not None:
980
+ image_mask = np.asarray(
981
+ [0 if i%delta_theta else 1
982
+ for i in range(len(thetas))], dtype=bool)
983
+ self.logger.debug(f'zoom_perc: {zoom_perc}')
984
+ self.logger.debug(f'delta_theta: {delta_theta}')
985
+ if zoom_perc is not None:
986
+ reduced_data.attrs['zoom_perc'] = zoom_perc
987
+ tool_config.delta_theta = delta_theta
988
+ if image_mask is not None:
989
+ self.logger.debug(f'image_mask = {image_mask}')
990
+ reduced_data.image_mask = image_mask
991
+ thetas = thetas[image_mask]
992
+
993
+ # Set vertical detector bounds for image stack or rotation
994
+ # axis calibration rows
995
+ img_row_bounds = self._set_detector_bounds(
996
+ nxentry, reduced_data, image_key, thetas[0],
997
+ img_row_bounds, calibrate_center_rows)
998
+ self.logger.debug(f'img_row_bounds = {img_row_bounds}')
999
+ if calibrate_center_rows:
1000
+ calibrate_center_rows = tuple(sorted(img_row_bounds))
1001
+ img_row_bounds = None
1002
+ if img_row_bounds is None:
1003
+ tbf_shape = reduced_data.data.bright_field.shape
1004
+ img_row_bounds = (0, tbf_shape[0])
1005
+ tool_config.img_row_bounds = list(img_row_bounds)
1006
+ reduced_data.img_row_bounds = tool_config.img_row_bounds
1007
+ reduced_data.img_row_bounds.units = 'pixels'
1008
+ reduced_data.img_row_bounds.attrs['long_name'] = \
1009
+ 'image row boundaries in detector frame of reference'
1010
+
1011
+ # Store rotation angles for image stacks
1012
+ self.logger.debug(f'thetas = {thetas}')
1013
+ reduced_data.rotation_angle = thetas
1014
+ reduced_data.rotation_angle.units = 'degrees'
1015
+
1016
+ # Generate reduced tomography fields
1017
+ reduced_data = self._gen_tomo(
1018
+ nxentry, reduced_data, image_key, calibrate_center_rows)
1019
+
1020
+ # Create a copy of the input NeXus object and remove raw and
1021
+ # any existing reduced data
1022
+ exclude_items = [
1023
+ f'{nxentry.nxname}/reduced_data/data',
1024
+ f'{nxentry.nxname}/instrument/detector/data',
1025
+ f'{nxentry.nxname}/instrument/detector/image_key',
1026
+ f'{nxentry.nxname}/instrument/detector/sequence_number',
1027
+ f'{nxentry.nxname}/sample/rotation_angle',
1028
+ f'{nxentry.nxname}/sample/x_translation',
1029
+ f'{nxentry.nxname}/sample/z_translation',
1030
+ f'{nxentry.nxname}/data/data',
1031
+ f'{nxentry.nxname}/data/image_key',
1032
+ f'{nxentry.nxname}/data/rotation_angle',
1033
+ f'{nxentry.nxname}/data/x_translation',
1034
+ f'{nxentry.nxname}/data/z_translation',
1035
+ ]
1036
+ nxroot = nxcopy(nxroot, exclude_nxpaths=exclude_items)
1037
+
1038
+ # Add the reduced data NXprocess
1039
+ nxentry = nxroot[nxroot.default]
1040
+ nxentry.reduced_data = reduced_data
1041
+
1042
+ if 'data' not in nxentry:
1043
+ nxentry.data = NXdata()
1044
+ nxentry.data.set_default()
1045
+ nxentry.data.makelink(
1046
+ nxentry.reduced_data.data.tomo_fields, name='reduced_data')
1047
+ nxentry.data.makelink(nxentry.reduced_data.rotation_angle)
1048
+ nxentry.data.attrs['signal'] = 'reduced_data'
1049
+
1050
+ # Add to metadata
1051
+ self._metadata['did'] = \
1052
+ f'{self._metadata["parent_did"]}/workflow=' + \
1053
+ f'{self._metadata["experiment_type"].lower()}_reduced'
1054
+ if tool_config is None:
1055
+ self._metadata['metadata']['reduced_data'] = {}
1056
+ else:
1057
+ self._metadata['metadata']['reduced_data'] = \
1058
+ tool_config.model_dump()
1059
+ self._metadata['metadata']['reduced_data']['date'] = str(
1060
+ reduced_data.date)
1061
+
1062
+ return nxroot, calibrate_center_rows
1063
+
1064
+ #@profile
1065
+ def find_centers(self, nxroot, tool_config, calibrate_center_rows=False):
1066
+ """Find the calibrated center axis info
1067
+
1068
+ :param nxroot: Data object containing the reduced data and
1069
+ metadata required to find the calibrated center axis info.
1070
+ :type data: nexusformat.nexus.NXroot
1071
+ :param tool_config: Tool configuration.
1072
+ :type tool_config: CHAP.tomo.models.TomoFindCenterConfig
1073
+ :param calibrate_center_rows: Internal parameter only: used
1074
+ only to calibrate the rotation axis.
1075
+ :type calibrate_center_rows: Union[bool, list[int, int]]
1076
+ :raises ValueError: Invalid or missing input or configuration
1077
+ parameter.
1078
+ :return: Calibrated center axis info.
1079
+ :rtype: dict
1080
+ """
1081
+ # Third party modules
1082
+ from nexusformat.nexus import NXroot
1083
+
1084
+ self.logger.info('Find the calibrated center axis info')
1085
+
1086
+ #RV FIX FOXDEN demo only
1087
+ if nxroot is None or nxroot == 'foxden_demo':
1088
+ # Add to metadata
1089
+ from datetime import datetime
1090
+ self._metadata['did'] = \
1091
+ f'{self._metadata["parent_did"]}/workflow=' + \
1092
+ f'{self._metadata["experiment_type"].lower()}_center'
1093
+ self._metadata['metadata']['findcenter'] = tool_config.model_dump()
1094
+ self._metadata['metadata']['findcenter']['date'] = str(
1095
+ datetime.now())
1096
+ return None
1097
+
1098
+ if isinstance(nxroot, NXroot):
1099
+ nxentry = nxroot[nxroot.default]
1100
+ else:
1101
+ raise ValueError(f'Invalid parameter nxroot ({nxroot})')
1102
+
1103
+ # Check if reduced data is available
1104
+ if 'reduced_data' not in nxentry:
1105
+ raise ValueError(f'Unable to find valid reduced data in {nxentry}.')
1106
+
1107
+ # Select the image stack to find the calibrated center axis
1108
+ # reduced data axes order: stack,theta,row,column
1109
+ # Note: NeXus can't follow a link if the data it points to is
1110
+ # too big get the data from the actual place, not from
1111
+ # nxentry.data
1112
+ num_tomo_stacks = nxentry.reduced_data.data.tomo_fields.shape[0]
1113
+ self.logger.debug(f'num_tomo_stacks = {num_tomo_stacks}')
1114
+ if num_tomo_stacks == 1:
1115
+ center_stack_index = 0
1116
+ else:
1117
+ center_stack_index = tool_config.center_stack_index
1118
+ if calibrate_center_rows:
1119
+ center_stack_index = num_tomo_stacks//2
1120
+ elif self.interactive:
1121
+ if center_stack_index is None:
1122
+ center_stack_index = input_int(
1123
+ '\nEnter tomography stack index to calibrate the '
1124
+ 'center axis', ge=0, lt=num_tomo_stacks,
1125
+ default=num_tomo_stacks//2)
1126
+ else:
1127
+ if center_stack_index is None:
1128
+ center_stack_index = num_tomo_stacks//2
1129
+ self.logger.warning(
1130
+ 'center_stack_index unspecified, use stack '
1131
+ f'{center_stack_index} to find center axis info')
1132
+ tool_config.center_stack_index = center_stack_index
1133
+
1134
+ # Get thetas (in degrees)
1135
+ thetas = nxentry.reduced_data.rotation_angle.nxdata
1136
+
1137
+ # Select center rows
1138
+ if calibrate_center_rows:
1139
+ center_rows = calibrate_center_rows
1140
+ offset_center_rows = (0, 1)
1141
+ else:
1142
+ # Get full bright field
1143
+ tbf = nxentry.reduced_data.data.bright_field.nxdata
1144
+ tbf_shape = tbf.shape
1145
+
1146
+ # Get image bounds
1147
+ img_row_bounds = nxentry.reduced_data.get(
1148
+ 'img_row_bounds', (0, tbf_shape[0]))
1149
+ img_row_bounds = (int(img_row_bounds[0]), int(img_row_bounds[1]))
1150
+ img_column_bounds = nxentry.reduced_data.get(
1151
+ 'img_column_bounds', (0, tbf_shape[1]))
1152
+ img_column_bounds = (
1153
+ int(img_column_bounds[0]), int(img_column_bounds[1]))
1154
+
1155
+ center_rows = tool_config.center_rows
1156
+ if center_rows is None:
1157
+ if num_tomo_stacks == 1:
1158
+ # Add a small margin to avoid edge effects
1159
+ offset = min(
1160
+ 5, int(0.1*(img_row_bounds[1] - img_row_bounds[0])))
1161
+ center_rows = (
1162
+ img_row_bounds[0]+offset, img_row_bounds[1]-1-offset)
1163
+ else:
1164
+ if not self.interactive:
1165
+ self.logger.warning('center_rows unspecified, find '
1166
+ 'centers at reduced data bounds')
1167
+ center_rows = (img_row_bounds[0], img_row_bounds[1]-1)
1168
+ buf, center_rows = select_image_indices(
1169
+ nxentry.reduced_data.data.tomo_fields[
1170
+ center_stack_index,0,:,:],
1171
+ 0,
1172
+ b=tbf[img_row_bounds[0]:img_row_bounds[1],
1173
+ img_column_bounds[0]:img_column_bounds[1]],
1174
+ preselected_indices=center_rows,
1175
+ axis_index_offset=img_row_bounds[0],
1176
+ title='Select two detector image row indices to find center '
1177
+ f'axis (in range [{img_row_bounds[0]}, '
1178
+ f'{img_row_bounds[1]-1}])',
1179
+ title_a=r'Tomography image at $\theta$ = '
1180
+ f'{round(thetas[0], 2)+0}',
1181
+ title_b='Bright field',
1182
+ interactive=self.interactive, return_buf=self.save_figures)
1183
+ if center_rows[1] == img_row_bounds[1]:
1184
+ center_rows = (center_rows[0], center_rows[1]-1)
1185
+ offset_center_rows = (
1186
+ center_rows[0] - img_row_bounds[0],
1187
+ center_rows[1] - img_row_bounds[0])
1188
+ # Save figure
1189
+ if self.save_figures:
1190
+ self._figures.append((buf, 'center_finding_rows'))
1191
+ tool_config.center_rows = list(center_rows)
1192
+
1193
+ # Find the center offsets at each of the center rows
1194
+ prev_center_offset = None
1195
+ center_offsets = []
1196
+ for row, offset_row in zip(center_rows, offset_center_rows):
1197
+ t0 = time()
1198
+ center_offsets.append(
1199
+ self._find_center_one_plane(
1200
+ nxentry.reduced_data.data.tomo_fields, center_stack_index,
1201
+ row, offset_row, np.radians(thetas),
1202
+ num_core=self._num_core,
1203
+ center_offset_min=tool_config.center_offset_min,
1204
+ center_offset_max=tool_config.center_offset_max,
1205
+ center_search_range=tool_config.center_search_range,
1206
+ gaussian_sigma=tool_config.gaussian_sigma,
1207
+ ring_width=tool_config.ring_width,
1208
+ prev_center_offset=prev_center_offset))
1209
+ self.logger.info(
1210
+ f'Finding center row {row} took {time()-t0:.2f} seconds')
1211
+ self.logger.debug(f'center_row = {row:.2f}')
1212
+ self.logger.debug(f'center_offset = {center_offsets[-1]:.2f}')
1213
+ prev_center_offset = center_offsets[-1]
1214
+ tool_config.center_offsets = center_offsets
1215
+
1216
+ # Add to metadata
1217
+ from datetime import datetime
1218
+ self._metadata['did'] = \
1219
+ f'{self._metadata["parent_did"]}/workflow=' + \
1220
+ f'{self._metadata["experiment_type"].lower()}_center'
1221
+ self._metadata['metadata']['findcenter'] = tool_config.model_dump()
1222
+ self._metadata['metadata']['findcenter']['date'] = str(
1223
+ datetime.now())
1224
+
1225
+ return tool_config
1226
+
1227
+ #@profile
1228
+ def reconstruct_data(self, nxroot, center_info, tool_config):
1229
+ """Reconstruct the tomography data.
1230
+
1231
+ :param nxroot: Data object containing the reduced data and
1232
+ metadata required for a tomography data reconstruction.
1233
+ :type data: nexusformat.nexus.NXroot
1234
+ :param center_info: Calibrated center axis info.
1235
+ :type center_info: CHAP.tomo.models.TomoFindCenterConfig
1236
+ :param tool_config: Tool configuration.
1237
+ :type tool_config: CHAP.tomo.models.TomoReconstructConfig
1238
+ :raises ValueError: Invalid or missing input or configuration
1239
+ parameter.
1240
+ :return: Reconstructed tomography data.
1241
+ :rtype: nexusformat.nexus.NXroot
1242
+ """
1243
+ # Third party modules
1244
+ from nexusformat.nexus import (
1245
+ NXdata,
1246
+ NXfield,
1247
+ NXprocess,
1248
+ NXroot,
1249
+ )
1250
+ from CHAP.tomo.models import TomoFindCenterConfig
1251
+
1252
+ self.logger.info('Reconstruct the tomography data')
1253
+
1254
+ if isinstance(nxroot, NXroot):
1255
+ nxentry = nxroot[nxroot.default]
1256
+ else:
1257
+ raise ValueError(f'Invalid parameter nxroot ({nxroot})')
1258
+ if not isinstance(center_info, TomoFindCenterConfig):
1259
+ raise ValueError(
1260
+ f'Invalid parameter center_info ({type(center_info)})')
1261
+
1262
+ # Check if reduced data is available
1263
+ if 'reduced_data' not in nxentry:
1264
+ raise ValueError(f'Unable to find valid reduced data in {nxentry}.')
1265
+
1266
+ # Create an NXprocess to store image reconstruction (meta)data
1267
+ nxprocess = NXprocess()
1268
+
1269
+ # Get calibrated center axis rows and centers
1270
+ center_rows = center_info.center_rows
1271
+ center_offsets = center_info.center_offsets
1272
+ if center_rows is None or center_offsets is None:
1273
+ raise ValueError(
1274
+ 'Unable to find valid calibrated center axis info in '
1275
+ f'{center_info}.')
1276
+ center_slope = (center_offsets[1]-center_offsets[0]) \
1277
+ / (center_rows[1]-center_rows[0])
1278
+
1279
+ # Get thetas (in degrees)
1280
+ thetas = nxentry.reduced_data.rotation_angle.nxdata
1281
+
1282
+ # Reconstruct tomography data
1283
+ # - reduced data axes order: stack,theta,row,column
1284
+ # - reconstructed data axes order: row/-z,y,x
1285
+ # Note: NeXus can't follow a link if the data it points to is
1286
+ # too big get the data from the actual place, not from
1287
+ # nxentry.data
1288
+ if 'zoom_perc' in nxentry.reduced_data:
1289
+ res_title = f'{nxentry.reduced_data.attrs["zoom_perc"]}p'
1290
+ else:
1291
+ res_title = 'fullres'
1292
+ tomo_stacks = nxentry.reduced_data.data.tomo_fields
1293
+ num_tomo_stacks = tomo_stacks.shape[0]
1294
+ tomo_recon_stacks = []
1295
+ img_row_bounds = tuple(nxentry.reduced_data.get(
1296
+ 'img_row_bounds', (0, tomo_stacks.shape[2])))
1297
+ center_rows -= img_row_bounds[0]
1298
+ for i in range(num_tomo_stacks):
1299
+ # Convert reduced data stack from theta,row,column to
1300
+ # row,theta,column
1301
+ tomo_stack = np.swapaxes(tomo_stacks[i,:,:,:], 0, 1)
1302
+ assert len(thetas) == tomo_stack.shape[1]
1303
+ assert 0 <= center_rows[0] < center_rows[1] < tomo_stack.shape[0]
1304
+ center_offsets = [
1305
+ center_offsets[0]-center_rows[0]*center_slope,
1306
+ center_offsets[1] + center_slope * (
1307
+ tomo_stack.shape[0]-1-center_rows[1]),
1308
+ ]
1309
+ t0 = time()
1310
+ tomo_recon_stack = self._reconstruct_one_tomo_stack(
1311
+ tomo_stack, np.radians(thetas), center_offsets=center_offsets,
1312
+ num_core=self._num_core, algorithm='gridrec',
1313
+ secondary_iters=tool_config.secondary_iters,
1314
+ gaussian_sigma=tool_config.gaussian_sigma,
1315
+ remove_stripe_sigma=tool_config.remove_stripe_sigma,
1316
+ ring_width=tool_config.ring_width)
1317
+ self.logger.info(
1318
+ f'Reconstruction of stack {i} took {time()-t0:.2f} seconds')
1319
+
1320
+ # Combine stacks
1321
+ tomo_recon_stacks.append(tomo_recon_stack)
1322
+
1323
+ # Resize the reconstructed tomography data
1324
+ # - reconstructed axis data order in each stack: row/-z,y,x
1325
+ tomo_recon_shape = tomo_recon_stacks[0].shape
1326
+ x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(
1327
+ tomo_recon_stacks, x_bounds=tool_config.x_bounds,
1328
+ y_bounds=tool_config.y_bounds, z_bounds=tool_config.z_bounds)
1329
+ tool_config.x_bounds = None if x_bounds is None else list(x_bounds)
1330
+ tool_config.y_bounds = None if y_bounds is None else list(y_bounds)
1331
+ tool_config.z_bounds = None if z_bounds is None else list(z_bounds)
1332
+ if x_bounds is None:
1333
+ x_range = (0, tomo_recon_shape[2])
1334
+ x_slice = x_range[1]//2
1335
+ else:
1336
+ x_range = (min(x_bounds), max(x_bounds))
1337
+ x_slice = (x_bounds[0]+x_bounds[1])//2
1338
+ if y_bounds is None:
1339
+ y_range = (0, tomo_recon_shape[1])
1340
+ y_slice = y_range[1]//2
1341
+ else:
1342
+ y_range = (min(y_bounds), max(y_bounds))
1343
+ y_slice = (y_bounds[0]+y_bounds[1])//2
1344
+ if z_bounds is None:
1345
+ z_range = (0, tomo_recon_shape[0])
1346
+ z_slice = z_range[1]//2
1347
+ else:
1348
+ z_range = (min(z_bounds), max(z_bounds))
1349
+ z_slice = (z_bounds[0]+z_bounds[1])//2
1350
+ z_dim_org = tomo_recon_shape[0]
1351
+ tomo_recon_stacks = np.asarray(tomo_recon_stacks)[:,
1352
+ z_range[0]:z_range[1],y_range[0]:y_range[1],x_range[0]:x_range[1]]
1353
+
1354
+ detector = nxentry.instrument.detector
1355
+ row_pixel_size = float(detector.row_pixel_size)
1356
+ column_pixel_size = float(detector.column_pixel_size)
1357
+ if num_tomo_stacks == 1:
1358
+ # Convert the reconstructed tomography data from internal
1359
+ # coordinate frame row/-z,y,x with the origin on the
1360
+ # near-left-top corner to an z,y,x coordinate frame with
1361
+ # the origin on the par file x,z values, halfway in the
1362
+ # y-dimension.
1363
+ # Here x is to the right, y along the beam direction and
1364
+ # z upwards in the lab frame of reference
1365
+ tomo_recon_stack = np.flip(tomo_recon_stacks[0], 0)
1366
+ z_range = (z_dim_org-z_range[1], z_dim_org-z_range[0])
1367
+
1368
+ # Get coordinate axes
1369
+ x = column_pixel_size * (
1370
+ np.linspace(
1371
+ x_range[0], x_range[1], x_range[1]-x_range[0], False)
1372
+ - 0.5*detector.columns + 0.5)
1373
+ x = np.asarray(x + nxentry.reduced_data.x_translation[0])
1374
+ y = np.asarray(
1375
+ column_pixel_size * (
1376
+ np.linspace(
1377
+ y_range[0], y_range[1], y_range[1]-y_range[0], False)
1378
+ - 0.5*detector.columns + 0.5))
1379
+ z = row_pixel_size*(
1380
+ np.linspace(
1381
+ z_range[0], z_range[1], z_range[1]-z_range[0], False)
1382
+ + detector.rows
1383
+ - int(nxentry.reduced_data.img_row_bounds[1])
1384
+ + 0.5)
1385
+ z = np.asarray(z + nxentry.reduced_data.z_translation[0])
1386
+
1387
+ # Save a few reconstructed image slices
1388
+ if self.save_figures:
1389
+ x_index = x_slice-x_range[0]
1390
+ title = f'recon {res_title} x={x[x_index]:.4f}'
1391
+ self._figures.append(
1392
+ (quick_imshow(
1393
+ tomo_recon_stack[:,:,x_index], title=title,
1394
+ origin='lower', extent=(y[0], y[-1], z[0], z[-1]),
1395
+ show_fig=False, return_fig=True),
1396
+ re.sub(r'\s+', '_', title)))
1397
+ y_index = y_slice-y_range[0]
1398
+ title = f'recon {res_title} y={y[y_index]:.4f}'
1399
+ self._figures.append(
1400
+ (quick_imshow(
1401
+ tomo_recon_stack[:,y_index,:], title=title,
1402
+ origin='lower', extent=(x[0], x[-1], z[0], z[-1]),
1403
+ show_fig=False, return_fig=True),
1404
+ re.sub(r'\s+', '_', title)))
1405
+ z_index = z_slice-z_range[0]
1406
+ title = f'recon {res_title} z={z[z_index]:.4f}'
1407
+ self._figures.append(
1408
+ (quick_imshow(
1409
+ tomo_recon_stack[z_index,:,:], title=title,
1410
+ origin='lower', extent=(x[0], x[-1], y[0], y[-1]),
1411
+ show_fig=False, return_fig=True),
1412
+ re.sub(r'\s+', '_', title)))
1413
+ else:
1414
+ # Save a few reconstructed image slices
1415
+ if self.save_figures:
1416
+ for i in range(tomo_recon_stacks.shape[0]):
1417
+ basetitle = f'recon stack {i}'
1418
+ title = f'{basetitle} {res_title} xslice{x_slice}'
1419
+ self._figures.append(
1420
+ (quick_imshow(
1421
+ tomo_recon_stacks[i,:,:,x_slice-x_range[0]],
1422
+ title=title, show_fig=False, return_fig=True),
1423
+ re.sub(r'\s+', '_', title)))
1424
+ title = f'{basetitle} {res_title} yslice{y_slice}'
1425
+ self._figures.append(
1426
+ (quick_imshow(
1427
+ tomo_recon_stacks[i,:,y_slice-y_range[0],:],
1428
+ title=title, show_fig=False, return_fig=True),
1429
+ re.sub(r'\s+', '_', title)))
1430
+ title = f'{basetitle} {res_title} zslice{z_slice}'
1431
+ self._figures.append(
1432
+ (quick_imshow(
1433
+ tomo_recon_stacks[i,z_slice-z_range[0],:,:],
1434
+ title=title, show_fig=False, return_fig=True),
1435
+ re.sub(r'\s+', '_', title)))
1436
+
1437
+ # Add image reconstruction to reconstructed data NXprocess
1438
+ # reconstructed axis data order:
1439
+ # - for one stack: z,y,x
1440
+ # - for multiple stacks: row/-z,y,x
1441
+ for k, v in center_info.model_dump().items():
1442
+ if k == 'center_stack_index':
1443
+ nxprocess[k] = v
1444
+ if k in ('center_rows', 'center_offsets'):
1445
+ nxprocess[k] = v
1446
+ nxprocess[k].units = 'pixels'
1447
+ if k == 'center_rows':
1448
+ nxprocess[k] = v
1449
+ nxprocess[k].attrs['long_name'] = \
1450
+ 'center row indices in detector frame of reference'
1451
+ if x_bounds is not None:
1452
+ nxprocess.x_bounds = x_bounds
1453
+ nxprocess.x_bounds.units = 'pixels'
1454
+ nxprocess.x_bounds.attrs['long_name'] = \
1455
+ 'x range indices in reduced data frame of reference'
1456
+ if y_bounds is not None:
1457
+ nxprocess.y_bounds = y_bounds
1458
+ nxprocess.y_bounds.units = 'pixels'
1459
+ nxprocess.y_bounds.attrs['long_name'] = \
1460
+ 'y range indices in reduced data frame of reference'
1461
+ if z_bounds is not None:
1462
+ nxprocess.z_bounds = z_bounds
1463
+ nxprocess.z_bounds.units = 'pixels'
1464
+ nxprocess.z_bounds.attrs['long_name'] = \
1465
+ 'z range indices in reduced data frame of reference'
1466
+ if num_tomo_stacks == 1:
1467
+ nxprocess.data = NXdata(
1468
+ NXfield(tomo_recon_stack, 'reconstructed_data'),
1469
+ (NXfield(
1470
+ z, 'z', attrs={'units': detector.row_pixel_size.units}),
1471
+ NXfield(
1472
+ y, 'y',
1473
+ attrs={'units': detector.column_pixel_size.units}),
1474
+ NXfield(
1475
+ x, 'x',
1476
+ attrs={'units': detector.column_pixel_size.units}),))
1477
+ else:
1478
+ nxprocess.data = NXdata(
1479
+ NXfield(tomo_recon_stacks, 'reconstructed_data'))
1480
+
1481
+ # Create a copy of the input NeXus object and remove reduced
1482
+ # data
1483
+ exclude_items = [
1484
+ f'{nxentry.nxname}/reduced_data/data',
1485
+ f'{nxentry.nxname}/data/reduced_data',
1486
+ f'{nxentry.nxname}/data/rotation_angle',
1487
+ ]
1488
+ nxroot = nxcopy(nxroot, exclude_nxpaths=exclude_items)
1489
+
1490
+ # Add the reconstructed data NXprocess to the new NeXus object
1491
+ nxentry = nxroot[nxroot.default]
1492
+ nxentry.reconstructed_data = nxprocess
1493
+ if 'data' not in nxentry:
1494
+ nxentry.data = NXdata()
1495
+ nxentry.data.set_default()
1496
+ nxentry.data.makelink(nxprocess.data.reconstructed_data)
1497
+ if num_tomo_stacks == 1:
1498
+ nxentry.data.attrs['axes'] = ['z', 'y', 'x']
1499
+ nxentry.data.makelink(nxprocess.data.x)
1500
+ nxentry.data.makelink(nxprocess.data.y)
1501
+ nxentry.data.makelink(nxprocess.data.z)
1502
+ nxentry.data.attrs['signal'] = 'reconstructed_data'
1503
+
1504
+ # Add the center info to the new NeXus object
1505
+
1506
+ # Add to metadata
1507
+ self._metadata['did'] = \
1508
+ f'{self._metadata["parent_did"]}/workflow=' + \
1509
+ f'{self._metadata["experiment_type"].lower()}_reconstructed'
1510
+ self._metadata['metadata']['reconstructed_data'] = \
1511
+ tool_config.model_dump()
1512
+ self._metadata['metadata']['reconstructed_data']['date'] = str(
1513
+ nxentry.reconstructed_data.date)
1514
+
1515
+ return nxroot
1516
+
1517
+ #@profile
1518
+ def combine_data(self, nxroot, tool_config):
1519
+ """Combine the reconstructed tomography stacks.
1520
+
1521
+ :param nxroot: Data object containing the reconstructed data
1522
+ and metadata required to combine the tomography stacks.
1523
+ :type data: nexusformat.nexus.NXroot
1524
+ :param tool_config: Tool configuration.
1525
+ :type tool_config: CHAP.tomo.models.TomoCombineConfig
1526
+ :raises ValueError: Invalid or missing input or configuration
1527
+ parameter.
1528
+ :return: Combined reconstructed tomography data.
1529
+ :rtype: nexusformat.nexus.NXroot
1530
+ """
1531
+ # Third party modules
1532
+ from nexusformat.nexus import (
1533
+ NXdata,
1534
+ NXfield,
1535
+ NXprocess,
1536
+ NXroot,
1537
+ )
1538
+
1539
+ self.logger.info('Combine the reconstructed tomography stacks')
1540
+
1541
+ if isinstance(nxroot, NXroot):
1542
+ nxentry = nxroot[nxroot.default]
1543
+ else:
1544
+ raise ValueError(f'Invalid parameter nxroot ({nxroot})')
1545
+
1546
+ # Check if reconstructed image data is available
1547
+ if 'reconstructed_data' not in nxentry:
1548
+ raise KeyError(
1549
+ f'Unable to find valid reconstructed image data in {nxentry}')
1550
+
1551
+ # Create an NXprocess to store combined image reconstruction
1552
+ # (meta)data
1553
+ nxprocess = NXprocess()
1554
+
1555
+ if nxentry.reconstructed_data.data.reconstructed_data.ndim == 3:
1556
+ num_tomo_stacks = 1
1557
+ else:
1558
+ num_tomo_stacks = \
1559
+ nxentry.reconstructed_data.data.reconstructed_data.shape[0]
1560
+ if num_tomo_stacks == 1:
1561
+ self.logger.info('Only one stack available: leaving combine_data')
1562
+ return nxroot
1563
+
1564
+ # Get and combine the reconstructed stacks
1565
+ # - reconstructed axis data order: stack,row/-z,y,x
1566
+ # Note: NeXus can't follow a link if the data it points to is
1567
+ # too big. So get the data from the actual place, not from
1568
+ # nxentry.data
1569
+ # Also load one stack at a time to reduce risk of hitting NeXus
1570
+ # data access limit
1571
+ t0 = time()
1572
+ tomo_recon_combined = \
1573
+ nxentry.reconstructed_data.data.reconstructed_data[0,:,:,:]
1574
+ # RV check this out more
1575
+ # tomo_recon_combined = np.concatenate(
1576
+ # [tomo_recon_combined]
1577
+ # + [nxentry.reconstructed_data.data.reconstructed_data[i,:,:,:]
1578
+ # for i in range(1, num_tomo_stacks)])
1579
+ tomo_recon_combined = np.concatenate(
1580
+ [nxentry.reconstructed_data.data.reconstructed_data[i,:,:,:]
1581
+ for i in range(num_tomo_stacks-1, 0, -1)]
1582
+ + [tomo_recon_combined])
1583
+ self.logger.info(
1584
+ f'Combining the reconstructed stacks took {time()-t0:.2f} seconds')
1585
+ tomo_shape = tomo_recon_combined.shape
1586
+
1587
+ # Resize the combined tomography data stacks
1588
+ # - combined axis data order: row/-z,y,x
1589
+ if self.interactive or self.save_figures:
1590
+ x_bounds, y_bounds, z_bounds = self._resize_reconstructed_data(
1591
+ tomo_recon_combined, combine_data=True)
1592
+ tool_config.x_bounds = None if x_bounds is None else list(x_bounds)
1593
+ tool_config.y_bounds = None if y_bounds is None else list(y_bounds)
1594
+ tool_config.z_bounds = None if z_bounds is None else list(z_bounds)
1595
+ else:
1596
+ x_bounds = tool_config.x_bounds
1597
+ if x_bounds is None:
1598
+ self.logger.warning(
1599
+ 'x_bounds unspecified, combine data for full x-range')
1600
+ elif not is_int_pair(
1601
+ x_bounds, ge=0, le=tomo_shape[2]):
1602
+ raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
1603
+ y_bounds = tool_config.y_bounds
1604
+ if y_bounds is None:
1605
+ self.logger.warning(
1606
+ 'y_bounds unspecified, combine data for full y-range')
1607
+ elif not is_int_pair(
1608
+ y_bounds, ge=0, le=tomo_shape[1]):
1609
+ raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
1610
+ z_bounds = tool_config.z_bounds
1611
+ if z_bounds is None:
1612
+ self.logger.warning(
1613
+ 'z_bounds unspecified, combine data for full z-range')
1614
+ elif not is_int_pair(
1615
+ z_bounds, ge=0, le=tomo_shape[0]):
1616
+ raise ValueError(f'Invalid parameter z_bounds ({z_bounds})')
1617
+ if x_bounds is None:
1618
+ x_range = (0, tomo_shape[2])
1619
+ x_slice = x_range[1]//2
1620
+ else:
1621
+ x_range = (min(x_bounds), max(x_bounds))
1622
+ x_slice = (x_bounds[0]+x_bounds[1])//2
1623
+ if y_bounds is None:
1624
+ y_range = (0, tomo_shape[1])
1625
+ y_slice = y_range[1]//2
1626
+ else:
1627
+ y_range = (min(y_bounds), max(y_bounds))
1628
+ y_slice = (y_bounds[0]+y_bounds[1])//2
1629
+ if z_bounds is None:
1630
+ z_range = (0, tomo_shape[0])
1631
+ z_slice = z_range[1]//2
1632
+ else:
1633
+ z_range = (min(z_bounds), max(z_bounds))
1634
+ z_slice = (z_bounds[0]+z_bounds[1])//2
1635
+ z_dim_org = tomo_shape[0]
1636
+ tomo_recon_combined = tomo_recon_combined[
1637
+ z_range[0]:z_range[1],y_range[0]:y_range[1],x_range[0]:x_range[1]]
1638
+
1639
+ # Convert the reconstructed tomography data from internal
1640
+ # coordinate frame row/-z,y,x with the origin on the
1641
+ # near-left-top corner to an z,y,x coordinate frame.
1642
+ # Here x is to the right, y along the beam direction and
1643
+ # z upwards in the lab frame of reference
1644
+ tomo_recon_combined = np.flip(tomo_recon_combined, 0)
1645
+ tomo_shape = tomo_recon_combined.shape
1646
+ z_range = (z_dim_org-z_range[1], z_dim_org-z_range[0])
1647
+
1648
+ # Get coordinate axes
1649
+ detector = nxentry.instrument.detector
1650
+ row_pixel_size = float(detector.row_pixel_size)
1651
+ column_pixel_size = float(detector.column_pixel_size)
1652
+ x = column_pixel_size * (
1653
+ np.linspace(x_range[0], x_range[1], x_range[1]-x_range[0], False)
1654
+ - 0.5*detector.columns + 0.5)
1655
+ if nxentry.reconstructed_data.get('x_bounds', None) is not None:
1656
+ x += column_pixel_size*nxentry.reconstructed_data.x_bounds[0]
1657
+ x = np.asarray(x + nxentry.reduced_data.x_translation[0])
1658
+ y = column_pixel_size * (
1659
+ np.linspace(y_range[0], y_range[1], y_range[1]-y_range[0], False)
1660
+ - 0.5*detector.columns + 0.5)
1661
+ if nxentry.reconstructed_data.get('y_bounds', None) is not None:
1662
+ y += column_pixel_size*nxentry.reconstructed_data.y_bounds[0]
1663
+ y = np.asarray(y)
1664
+ z = row_pixel_size*(
1665
+ np.linspace(z_range[0], z_range[1], z_range[1]-z_range[0], False)
1666
+ - int(nxentry.reduced_data.img_row_bounds[0])
1667
+ + 0.5*detector.rows - 0.5)
1668
+ z = np.asarray(z + nxentry.reduced_data.z_translation[0])
1669
+
1670
+ # Save a few combined image slices
1671
+ if self.save_figures:
1672
+ x_slice = tomo_shape[2]//2
1673
+ title = f'recon combined x={x[x_slice]:.4f}'
1674
+ self._figures.append(
1675
+ (quick_imshow(
1676
+ tomo_recon_combined[:,:,x_slice], title=title,
1677
+ origin='lower', extent=(y[0], y[-1], z[0], z[-1]),
1678
+ show_fig=False, return_fig=True),
1679
+ re.sub(r'\s+', '_', title)))
1680
+ y_slice = tomo_shape[1]//2
1681
+ title = f'recon combined y={y[y_slice]:.4f}'
1682
+ self._figures.append(
1683
+ (quick_imshow(
1684
+ tomo_recon_combined[:,y_slice,:], title=title,
1685
+ origin='lower', extent=(x[0], x[-1], z[0], z[-1]),
1686
+ show_fig=False, return_fig=True),
1687
+ re.sub(r'\s+', '_', title)))
1688
+ z_slice = tomo_shape[0]//2
1689
+ title = f'recon combined z={z[z_slice]:.4f}'
1690
+ self._figures.append(
1691
+ (quick_imshow(
1692
+ tomo_recon_combined[z_slice,:,:], title=title,
1693
+ origin='lower', extent=(x[0], x[-1], y[0], y[-1]),
1694
+ show_fig=False, return_fig=True),
1695
+ re.sub(r'\s+', '_', title)))
1696
+
1697
+ # Add image reconstruction to reconstructed data NXprocess
1698
+ # - combined axis data order: z,y,x
1699
+ if x_bounds is not None and x_bounds != (0, tomo_shape[2]):
1700
+ nxprocess.x_bounds = x_bounds
1701
+ nxprocess.x_bounds.units = 'pixels'
1702
+ nxprocess.x_bounds.attrs['long_name'] = \
1703
+ 'x range indices in reconstructed data frame of reference'
1704
+ if y_bounds is not None and y_bounds != (0, tomo_shape[1]):
1705
+ nxprocess.y_bounds = y_bounds
1706
+ nxprocess.y_bounds.units = 'pixels'
1707
+ nxprocess.y_bounds.attrs['long_name'] = \
1708
+ 'y range indices in reconstructed data frame of reference'
1709
+ if z_bounds is not None and z_bounds != (0, tomo_shape[0]):
1710
+ nxprocess.z_bounds = z_bounds
1711
+ nxprocess.z_bounds.units = 'pixels'
1712
+ nxprocess.z_bounds.attrs['long_name'] = \
1713
+ 'z range indices in reconstructed data frame of reference'
1714
+ nxprocess.data = NXdata(
1715
+ NXfield(tomo_recon_combined, 'combined_data'),
1716
+ (NXfield(z, 'z', attrs={'units': detector.row_pixel_size.units}),
1717
+ NXfield(
1718
+ y, 'y', attrs={'units': detector.column_pixel_size.units}),
1719
+ NXfield(
1720
+ x, 'x', attrs={'units': detector.column_pixel_size.units}),))
1721
+
1722
+ # Create a copy of the input NeXus object and remove
1723
+ # reconstructed data
1724
+ exclude_items = [
1725
+ f'{nxentry.nxname}/reconstructed_data/data',
1726
+ f'{nxentry.nxname}/data/reconstructed_data',
1727
+ ]
1728
+ nxroot = nxcopy(nxroot, exclude_nxpaths=exclude_items)
1729
+
1730
+ # Add the combined data NXprocess to the new NeXus object
1731
+ nxentry = nxroot[nxroot.default]
1732
+ nxentry.combined_data = nxprocess
1733
+ if 'data' not in nxentry:
1734
+ nxentry.data = NXdata()
1735
+ nxentry.data.set_default()
1736
+ nxentry.data.makelink(nxprocess.data.combined_data)
1737
+ nxentry.data.attrs['axes'] = ['z', 'y', 'x']
1738
+ nxentry.data.makelink(nxprocess.data.x)
1739
+ nxentry.data.makelink(nxprocess.data.y)
1740
+ nxentry.data.makelink(nxprocess.data.z)
1741
+ nxentry.data.attrs['signal'] = 'combined_data'
1742
+
1743
+ # Add to metadata
1744
+ self._metadata['did'] = \
1745
+ f'{self._metadata["parent_did"]}/workflow=' + \
1746
+ f'{self._metadata["experiment_type"].lower()}_combined'
1747
+ self._metadata['metadata']['combined_data'] = \
1748
+ tool_config.model_dump()
1749
+ self._metadata['metadata']['combined_data']['date'] = str(
1750
+ nxentry.combined_data.date)
1751
+
1752
+ return nxroot
1753
+
1754
+ #@profile
1755
+ def _gen_dark(self, nxentry, reduced_data, image_key):
1756
+ """Generate dark field."""
1757
+ # Third party modules
1758
+ from nexusformat.nexus import NXdata
1759
+
1760
+ # Get the dark field images
1761
+ field_indices = [
1762
+ index for index, key in enumerate(image_key) if key == 2]
1763
+ if field_indices:
1764
+ tdf_stack = nxentry.instrument.detector.data[field_indices,:,:]
1765
+ else:
1766
+ self.logger.warning('Dark field unavailable')
1767
+ return reduced_data
1768
+
1769
+ # Take median
1770
+ if tdf_stack.ndim == 2:
1771
+ tdf = np.asarray(tdf_stack)
1772
+ elif tdf_stack.ndim == 3:
1773
+ tdf = np.median(tdf_stack, axis=0)
1774
+ del tdf_stack
1775
+ else:
1776
+ raise RuntimeError(f'Invalid tdf_stack shape ({tdf_stack.shape})')
1777
+
1778
+ # Remove dark field intensities above the cutoff
1779
+ tdf_cutoff = tdf.min() + 2 * (np.median(tdf)-tdf.min())
1780
+ self.logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1781
+ if tdf_cutoff is not None:
1782
+ if not isinstance(tdf_cutoff, (int, float)) or tdf_cutoff < 0:
1783
+ self.logger.warning(
1784
+ f'Ignoring illegal value of tdf_cutoff {tdf_cutoff}')
1785
+ else:
1786
+ tdf[tdf > tdf_cutoff] = np.nan
1787
+ self.logger.debug(f'tdf_cutoff = {tdf_cutoff}')
1788
+
1789
+ # Remove nans
1790
+ tdf_mean = np.nanmean(tdf)
1791
+ self.logger.debug(f'tdf_mean = {tdf_mean}')
1792
+ np.nan_to_num(
1793
+ tdf, copy=False, nan=tdf_mean, posinf=tdf_mean, neginf=0.)
1794
+
1795
+ # Save dark field
1796
+ if self.save_figures:
1797
+ self._figures.append(
1798
+ (quick_imshow(
1799
+ tdf, title='Dark field', show_fig=False, return_fig=True),
1800
+ 'dark_field'))
1801
+
1802
+ # Add dark field to reduced data NXprocess
1803
+ reduced_data.data = NXdata()
1804
+ reduced_data.data.dark_field = tdf
1805
+
1806
+ return reduced_data
1807
+
1808
+ #@profile
1809
+ def _gen_bright(self, nxentry, reduced_data, image_key):
1810
+ """Generate bright field."""
1811
+ # Third party modules
1812
+ from nexusformat.nexus import NXdata
1813
+
1814
+ # Get the bright field images
1815
+ field_indices = [
1816
+ index for index, key in enumerate(image_key) if key == 1]
1817
+ if field_indices:
1818
+ tbf_stack = nxentry.instrument.detector.data[field_indices,:,:]
1819
+ else:
1820
+ raise ValueError('Bright field unavailable')
1821
+
1822
+ # Take median if more than one image
1823
+ #
1824
+ # Median or mean: It may be best to try the median because of
1825
+ # some image artifacts that arise due to crinkles in the
1826
+ # upstream kapton tape windows causing some phase contrast
1827
+ # images to appear on the detector.
1828
+ #
1829
+ # One thing that also may be useful in a future implementation
1830
+ # is to do a brightfield adjustment on EACH frame of the tomo
1831
+ # based on a ROI in the corner of the frame where there is no
1832
+ # sample but there is the direct X-ray beam because there is
1833
+ # frame to frame fluctuations from the incoming beam. We don’t
1834
+ # typically account for them but potentially could.
1835
+ if tbf_stack.ndim == 2:
1836
+ tbf = np.asarray(tbf_stack)
1837
+ elif tbf_stack.ndim == 3:
1838
+ tbf = np.median(tbf_stack, axis=0)
1839
+ del tbf_stack
1840
+ else:
1841
+ raise RuntimeError(f'Invalid tbf_stack shape ({tbf_stack.shape})')
1842
+
1843
+ # Set any non-positive values to one
1844
+ # (avoid negative bright field values for spikes in dark field)
1845
+ tbf[tbf < 1] = 1
1846
+
1847
+ # Save bright field
1848
+ if self.save_figures:
1849
+ self._figures.append(
1850
+ (quick_imshow(
1851
+ tbf, title='Bright field', show_fig=False,
1852
+ return_fig=True),
1853
+ 'bright_field'))
1854
+
1855
+ # Add bright field to reduced data NXprocess
1856
+ if 'data' not in reduced_data:
1857
+ reduced_data.data = NXdata()
1858
+ reduced_data.data.bright_field = tbf
1859
+
1860
+ return reduced_data
1861
+
1862
+ #@profile
1863
+ def _set_detector_bounds(
1864
+ self, nxentry, reduced_data, image_key, theta, img_row_bounds,
1865
+ calibrate_center_rows):
1866
+ """Set vertical detector bounds for each image stack. Right
1867
+ now the range is the same for each set in the image stack.
1868
+ """
1869
+ # Get the first tomography image and the reference heights
1870
+ image_mask = reduced_data.get('image_mask')
1871
+ if image_mask is None:
1872
+ first_image_index = 0
1873
+ else:
1874
+ first_image_index = int(np.argmax(image_mask))
1875
+ field_indices_all = [
1876
+ index for index, key in enumerate(image_key) if key == 0]
1877
+ if not field_indices_all:
1878
+ raise ValueError('Tomography field(s) unavailable')
1879
+ z_translation_all = nxentry.sample.z_translation[field_indices_all]
1880
+ z_translation_levels = sorted(list(set(z_translation_all)))
1881
+ num_tomo_stacks = len(z_translation_levels)
1882
+ center_stack_index = num_tomo_stacks//2
1883
+ z_translation = z_translation_levels[center_stack_index]
1884
+ try:
1885
+ field_indices = [
1886
+ field_indices_all[index]
1887
+ for index, z in enumerate(z_translation_all)
1888
+ if z == z_translation]
1889
+ first_image = nxentry.instrument.detector.data[
1890
+ field_indices[first_image_index]]
1891
+ except Exception as exc:
1892
+ raise RuntimeError('Unable to load the tomography images') from exc
1893
+
1894
+ # Set initial image bounds or rotation calibration rows
1895
+ if num_tomo_stacks > 1 and (nxentry.instrument.source.attrs['station']
1896
+ in ('id1a3', 'id3a')):
1897
+ self.logger.warning('Ignoring parameter img_row_bounds '
1898
+ 'for id1a3 and id3a for an image stack')
1899
+ img_row_bounds = None
1900
+ tbf = reduced_data.data.bright_field.nxdata
1901
+ if (not isinstance(calibrate_center_rows, bool)
1902
+ and is_int_pair(calibrate_center_rows)):
1903
+ img_row_bounds = calibrate_center_rows
1904
+ elif img_row_bounds is None:
1905
+ if nxentry.instrument.source.attrs['station'] in ('id1a3', 'id3a'):
1906
+ # Third party modules
1907
+ from nexusformat.nexus import (
1908
+ NXdata,
1909
+ NXfield,
1910
+ )
1911
+
1912
+ # Local modules
1913
+ from CHAP.utils.fit import FitProcessor
1914
+
1915
+ pixel_size = float(nxentry.instrument.detector.row_pixel_size)
1916
+ # Try to get a fit from the bright field
1917
+ row_sum = np.sum(tbf, 1)
1918
+ num = len(row_sum)
1919
+ fit = FitProcessor(**self.run_config)
1920
+ model = {'model': 'rectangle',
1921
+ 'parameters': [
1922
+ {'name': 'amplitude',
1923
+ 'value': row_sum.max()-row_sum.min(),
1924
+ 'min': 0.0},
1925
+ {'name': 'center1', 'value': 0.25*num,
1926
+ 'min': 0.0, 'max': num},
1927
+ {'name': 'sigma1', 'value': num/7.0,
1928
+ 'min': sys.float_info.min},
1929
+ {'name': 'center2', 'value': 0.75*num,
1930
+ 'min': 0.0, 'max': num},
1931
+ {'name': 'sigma2', 'value': num/7.0,
1932
+ 'min': sys.float_info.min}]}
1933
+ bounds_fit = fit.process(
1934
+ data=NXdata(
1935
+ NXfield(row_sum, 'y'),
1936
+ NXfield(np.array(range(num)), 'x')),
1937
+ config={'models': [model], 'method': 'trf'})
1938
+ parameters = bounds_fit.best_values
1939
+ row_low_fit = parameters.get('center1', None)
1940
+ row_upp_fit = parameters.get('center2', None)
1941
+ sig_low = parameters.get('sigma1', None)
1942
+ sig_upp = parameters.get('sigma2', None)
1943
+ have_fit = (bounds_fit.success and row_low_fit is not None
1944
+ and row_upp_fit is not None and sig_low is not None
1945
+ and sig_upp is not None
1946
+ and 0 <= row_low_fit < row_upp_fit <= row_sum.size
1947
+ and (sig_low+sig_upp) / (row_upp_fit-row_low_fit) < 0.1)
1948
+ if num_tomo_stacks == 1:
1949
+ if have_fit:
1950
+ # Add a pixel margin for roundoff effects in fit
1951
+ row_low_fit += 1
1952
+ row_upp_fit -= 1
1953
+ delta_z = (row_upp_fit-row_low_fit) * pixel_size
1954
+ else:
1955
+ # Set a default range of 1 mm
1956
+ # RV can we get this from the slits?
1957
+ delta_z = 1.0
1958
+ else:
1959
+ # Get the default range from the reference heights
1960
+ delta_z = z_translation_levels[1]-z_translation_levels[0]
1961
+ for i in range(2, num_tomo_stacks):
1962
+ delta_z = min(
1963
+ delta_z,
1964
+ z_translation_levels[i]-z_translation_levels[i-1])
1965
+ self.logger.debug(f'delta_z = {delta_z}')
1966
+ num_row_min = int((delta_z + 0.5*pixel_size) / pixel_size)
1967
+ if num_row_min > tbf.shape[0]:
1968
+ self.logger.warning(
1969
+ 'Image bounds and pixel size prevent seamless '
1970
+ 'stacking')
1971
+ row_low = 0
1972
+ row_upp = tbf.shape[0]
1973
+ else:
1974
+ self.logger.debug(f'num_row_min = {num_row_min}')
1975
+ if have_fit:
1976
+ # Center the default range relative to the fitted
1977
+ # window
1978
+ row_low = int((row_low_fit+row_upp_fit-num_row_min)/2)
1979
+ row_upp = row_low+num_row_min
1980
+ else:
1981
+ # Center the default range
1982
+ row_low = int((tbf.shape[0]-num_row_min)/2)
1983
+ row_upp = row_low+num_row_min
1984
+ img_row_bounds = (row_low, row_upp)
1985
+ if calibrate_center_rows:
1986
+ # Add a small margin to avoid edge effects
1987
+ offset = int(min(5, 0.1*(row_upp-row_low)))
1988
+ img_row_bounds = (row_low+offset, row_upp-1-offset)
1989
+ else:
1990
+ if num_tomo_stacks > 1:
1991
+ raise NotImplementedError(
1992
+ 'Selecting image bounds or calibrating rotation axis '
1993
+ 'for multiple stacks on FMB')
1994
+ # For FMB: use the first tomography image to select range
1995
+ # RV revisit if they do tomography with multiple stacks
1996
+ if img_row_bounds is None and not self.interactive:
1997
+ if calibrate_center_rows:
1998
+ self.logger.warning(
1999
+ 'calibrate_center_rows unspecified, find rotation '
2000
+ 'axis at detector bounds (with a small margin)')
2001
+ # Add a small margin to avoid edge effects
2002
+ offset = min(5, 0.1*first_image.shape[0])
2003
+ img_row_bounds = (
2004
+ offset, first_image.shape[0]-1-offset)
2005
+ else:
2006
+ self.logger.warning(
2007
+ 'img_row_bounds unspecified, reduce data for '
2008
+ 'entire detector range')
2009
+ img_row_bounds = (0, first_image.shape[0])
2010
+ if calibrate_center_rows:
2011
+ title='Select two detector image row indices to '\
2012
+ 'calibrate rotation axis (in range '\
2013
+ f'[0, {first_image.shape[0]-1}])'
2014
+ else:
2015
+ title='Select detector image row bounds for data '\
2016
+ f'reduction (in range [0, {first_image.shape[0]}])'
2017
+ buf, img_row_bounds = select_image_indices(
2018
+ first_image, 0, b=tbf, preselected_indices=img_row_bounds,
2019
+ title=title,
2020
+ title_a=r'Tomography image at $\theta$ = 'f'{round(theta, 2)+0}',
2021
+ title_b='Bright field',
2022
+ interactive=self.interactive, return_buf=self.save_figures)
2023
+ if not calibrate_center_rows and (num_tomo_stacks > 1
2024
+ and (img_row_bounds[1]-img_row_bounds[0]+1)
2025
+ < int((delta_z - 0.5*pixel_size) / pixel_size)):
2026
+ self.logger.warning(
2027
+ 'Image bounds and pixel size prevent seamless stacking')
2028
+
2029
+ # Save figure
2030
+ if self.save_figures:
2031
+ if calibrate_center_rows:
2032
+ filename = 'rotation_calibration_rows'
2033
+ else:
2034
+ filename = 'detector_image_bounds'
2035
+ self._figures.append((buf, filename))
2036
+
2037
+ return img_row_bounds
2038
+
2039
+ #@profile
2040
+ def _gen_thetas(self, nxentry, image_key):
2041
+ """Get the rotation angles for the image stacks."""
2042
+ # Get the rotation angles (in degrees)
2043
+ field_indices_all = [
2044
+ index for index, key in enumerate(image_key) if key == 0]
2045
+ z_translation_all = nxentry.sample.z_translation[field_indices_all]
2046
+ z_translation_levels = sorted(list(set(z_translation_all)))
2047
+ thetas = None
2048
+ for i, z_translation in enumerate(z_translation_levels):
2049
+ field_indices = [
2050
+ field_indices_all[index]
2051
+ for index, z in enumerate(z_translation_all)
2052
+ if z == z_translation]
2053
+ sequence_numbers = \
2054
+ nxentry.instrument.detector.sequence_number[field_indices]
2055
+ assert (list(sequence_numbers)
2056
+ == list(range((len(sequence_numbers)))))
2057
+ if thetas is None:
2058
+ thetas = nxentry.sample.rotation_angle[
2059
+ field_indices][sequence_numbers]
2060
+ else:
2061
+ assert all(
2062
+ thetas[i] == nxentry.sample.rotation_angle[
2063
+ field_indices[index]]
2064
+ for i, index in enumerate(sequence_numbers))
2065
+
2066
+ return np.asarray(thetas)
2067
+
2068
+ #@profile
2069
+ def _set_zoom_or_delta_theta(self, thetas, delta_theta=None):
2070
+ """Set zoom and/or delta theta to reduce memory the requirement
2071
+ for the analysis.
2072
+ """
2073
+ # Local modules
2074
+ from CHAP.utils.general import index_nearest
2075
+
2076
+ # if input_yesno(
2077
+ # '\nDo you want to zoom in to reduce memory '
2078
+ # 'requirement (y/n)?', 'n'):
2079
+ # zoom_perc = input_int(
2080
+ # ' Enter zoom percentage', ge=1, le=100)
2081
+ # else:
2082
+ # zoom_perc = None
2083
+ zoom_perc = None
2084
+
2085
+ if delta_theta is not None and not is_num(delta_theta, gt=0):
2086
+ self.logger.warning(
2087
+ f'Invalid parameter delta_theta ({delta_theta}), '
2088
+ 'ignoring delta_theta')
2089
+ delta_theta = None
2090
+ if self.interactive:
2091
+ if delta_theta is None:
2092
+ delta_theta = thetas[1]-thetas[0]
2093
+ print(f'\nAvailable \u03b8 range: [{thetas[0]}, {thetas[-1]}]')
2094
+ print(f'Current \u03b8 interval: {delta_theta}')
2095
+ if input_yesno(
2096
+ 'Do you want to change the \u03b8 interval to reduce the '
2097
+ 'memory requirement (y/n)?', 'n'):
2098
+ delta_theta = input_num(
2099
+ ' Enter the desired \u03b8 interval',
2100
+ ge=thetas[1]-thetas[0], lt=(thetas[-1]-thetas[0])/2)
2101
+ if delta_theta is not None:
2102
+ delta_theta = index_nearest(thetas, thetas[0]+delta_theta)
2103
+ if delta_theta <= 1:
2104
+ delta_theta = None
2105
+
2106
+ return zoom_perc, delta_theta
2107
+
2108
+ #@profile
2109
+ def _gen_tomo(
2110
+ self, nxentry, reduced_data, image_key, calibrate_center_rows):
2111
+ """Generate tomography fields."""
2112
+ # Third party modules
2113
+ from numexpr import evaluate
2114
+ from scipy.ndimage import zoom
2115
+
2116
+ # Get dark field
2117
+ if 'dark_field' in reduced_data.data:
2118
+ tdf = reduced_data.data.dark_field.nxdata
2119
+ else:
2120
+ self.logger.warning('Dark field unavailable')
2121
+ tdf = None
2122
+
2123
+ # Get bright field
2124
+ tbf = reduced_data.data.bright_field.nxdata
2125
+ tbf_shape = tbf.shape
2126
+
2127
+ # Subtract dark field
2128
+ if tdf is not None:
2129
+ try:
2130
+ with SetNumexprThreads(self._num_core):
2131
+ evaluate('tbf-tdf', out=tbf)
2132
+ except TypeError as exc:
2133
+ raise TypeError(
2134
+ f'\nA {type(exc).__name__} occured while subtracting '
2135
+ 'the dark field with num_expr.evaluate()'
2136
+ '\nTry reducing the detector range') from exc
2137
+
2138
+ # Get image bounds
2139
+ img_row_bounds = tuple(reduced_data.get('img_row_bounds'))
2140
+ img_column_bounds = tuple(
2141
+ reduced_data.get('img_column_bounds', (0, tbf_shape[1])))
2142
+
2143
+ # Check if this run is a rotation axis calibration
2144
+ # and resize dark and bright fields accordingly
2145
+ if calibrate_center_rows:
2146
+ if tdf is not None:
2147
+ tdf = tdf[calibrate_center_rows,:]
2148
+ tbf = tbf[calibrate_center_rows,:]
2149
+ else:
2150
+ if (img_row_bounds != (0, tbf.shape[0])
2151
+ or img_column_bounds != (0, tbf.shape[1])):
2152
+ if tdf is not None:
2153
+ tdf = tdf[
2154
+ img_row_bounds[0]:img_row_bounds[1],
2155
+ img_column_bounds[0]:img_column_bounds[1]]
2156
+ tbf = tbf[
2157
+ img_row_bounds[0]:img_row_bounds[1],
2158
+ img_column_bounds[0]:img_column_bounds[1]]
2159
+
2160
+ # Get thetas (in degrees)
2161
+ thetas = reduced_data.rotation_angle.nxdata
2162
+
2163
+ # Get or create image mask
2164
+ image_mask = reduced_data.get('image_mask')
2165
+ if image_mask is None:
2166
+ image_mask = [True]*len(thetas)
2167
+ else:
2168
+ image_mask = list(image_mask)
2169
+
2170
+ # Get the tomography images
2171
+ field_indices_all = [
2172
+ index for index, key in enumerate(image_key) if key == 0]
2173
+ if not field_indices_all:
2174
+ raise ValueError('Tomography field(s) unavailable')
2175
+ z_translation_all = nxentry.sample.z_translation[
2176
+ field_indices_all]
2177
+ z_translation_levels = sorted(list(set(z_translation_all)))
2178
+ num_tomo_stacks = len(z_translation_levels)
2179
+ if calibrate_center_rows:
2180
+ center_stack_index = num_tomo_stacks//2
2181
+ else:
2182
+ center_stack_index = 0
2183
+ tomo_stacks = num_tomo_stacks*[None]
2184
+ horizontal_shifts = []
2185
+ vertical_shifts = []
2186
+ for i, z_translation in enumerate(z_translation_levels):
2187
+ if calibrate_center_rows and i != center_stack_index:
2188
+ continue
2189
+ try:
2190
+ field_indices = [
2191
+ field_indices_all[i]
2192
+ for i, z in enumerate(z_translation_all)
2193
+ if z == z_translation]
2194
+ field_indices_masked = [
2195
+ v for i, v in enumerate(field_indices) if image_mask[i]]
2196
+ horizontal_shift = list(
2197
+ set(nxentry.sample.x_translation[field_indices_masked]))
2198
+ assert len(horizontal_shift) == 1
2199
+ horizontal_shifts += horizontal_shift
2200
+ vertical_shift = list(
2201
+ set(nxentry.sample.z_translation[field_indices_masked]))
2202
+ assert len(vertical_shift) == 1
2203
+ vertical_shifts += vertical_shift
2204
+ sequence_numbers = \
2205
+ nxentry.instrument.detector.sequence_number[field_indices]
2206
+ assert (list(sequence_numbers)
2207
+ == list(range((len(sequence_numbers)))))
2208
+ tomo_stacks[i] = nxentry.instrument.detector.data.nxdata[
2209
+ field_indices_masked]
2210
+ except Exception as exc:
2211
+ raise RuntimeError('Unable to load the tomography images '
2212
+ f'for stack {i}') from exc
2213
+ if not calibrate_center_rows:
2214
+ if not i:
2215
+ tomo_stack_shape = tomo_stacks[0].shape
2216
+ else:
2217
+ assert tomo_stacks[i].shape == tomo_stack_shape
2218
+
2219
+ tomo_stack_shape = None
2220
+ for i in range(num_tomo_stacks):
2221
+ tomo_stack = tomo_stacks[i]
2222
+ if tomo_stack is None:
2223
+ continue
2224
+ # Resize the tomography images
2225
+ # Right now the range is the same for each set in the stack
2226
+ if calibrate_center_rows:
2227
+ tomo_stack = tomo_stack[:,calibrate_center_rows,:]
2228
+ else:
2229
+ if (img_row_bounds != (0, tomo_stack.shape[1])
2230
+ or img_column_bounds != (0, tomo_stack.shape[2])):
2231
+ tomo_stack = tomo_stack[:,
2232
+ img_row_bounds[0]:img_row_bounds[1],
2233
+ img_column_bounds[0]:img_column_bounds[1]]
2234
+
2235
+ # Subtract dark field
2236
+ if tdf is not None:
2237
+ try:
2238
+ with SetNumexprThreads(self._num_core):
2239
+ evaluate('tomo_stack-tdf', out=tomo_stack)
2240
+ except TypeError as exc:
2241
+ raise TypeError(
2242
+ f'\nA {type(exc).__name__} occured while subtracting '
2243
+ 'the dark field with num_expr.evaluate()'
2244
+ '\nTry reducing the detector range'
2245
+ f'\n(currently img_row_bounds = {img_row_bounds}, and '
2246
+ f'img_column_bounds = {img_column_bounds})\n') from exc
2247
+
2248
+ # Normalize
2249
+ try:
2250
+ with SetNumexprThreads(self._num_core):
2251
+ evaluate('tomo_stack/tbf', out=tomo_stack, truediv=True)
2252
+ except TypeError as exc:
2253
+ raise TypeError(
2254
+ f'\nA {type(exc).__name__} occured while normalizing the '
2255
+ 'tomography data with num_expr.evaluate()'
2256
+ '\nTry reducing the detector range'
2257
+ f'\n(currently img_row_bounds = {img_row_bounds}, and '
2258
+ f'img_column_bounds = {img_column_bounds})\n') from exc
2259
+
2260
+ # Remove non-positive values and linearize data
2261
+ # RV make input argument? cutoff = 1.e-6
2262
+ with SetNumexprThreads(self._num_core):
2263
+ cutoff = np.float32(1.e-6)
2264
+ evaluate(
2265
+ 'where(tomo_stack < cutoff, cutoff, tomo_stack)',
2266
+ out=tomo_stack)
2267
+ with SetNumexprThreads(self._num_core):
2268
+ evaluate('-log(tomo_stack)', out=tomo_stack)
2269
+
2270
+ # Get rid of nans/infs that may be introduced by normalization
2271
+ tomo_stack[~np.isfinite(tomo_stack)] = 0
2272
+
2273
+ # Downsize tomography stack to smaller size
2274
+ zoom_perc = 100
2275
+ if zoom_perc != 100:
2276
+ t0 = time()
2277
+ self.logger.debug('Zooming in ...')
2278
+ tomo_zoom_list = []
2279
+ for j in range(tomo_stack.shape[0]):
2280
+ tomo_zoom = zoom(tomo_stack[j,:,:], 0.01*zoom_perc)
2281
+ tomo_zoom_list.append(tomo_zoom)
2282
+ tomo_stack = np.stack(tomo_zoom_list)
2283
+ self.logger.info(f'Zooming in took {time()-t0:.2f} seconds')
2284
+ del tomo_zoom_list
2285
+
2286
+ # Combine resized stacks
2287
+ tomo_stacks[i] = tomo_stack
2288
+ if tomo_stack_shape is None:
2289
+ tomo_stack_shape = tomo_stack.shape
2290
+ else:
2291
+ assert tomo_stack_shape == tomo_stack.shape
2292
+
2293
+ for i in range(num_tomo_stacks):
2294
+ if tomo_stacks[i] is None:
2295
+ tomo_stacks[i] = np.zeros(tomo_stack_shape)
2296
+
2297
+ # Add tomo field info to reduced data NXprocess
2298
+ reduced_data.x_translation = horizontal_shifts
2299
+ reduced_data.x_translation.units = 'mm'
2300
+ reduced_data.z_translation = vertical_shifts
2301
+ reduced_data.z_translation.units = 'mm'
2302
+ reduced_data.data.tomo_fields = tomo_stacks
2303
+ reduced_data.data.attrs['signal'] = 'tomo_fields'
2304
+
2305
+ if tdf is not None:
2306
+ del tdf
2307
+ del tbf
2308
+ del tomo_stacks
2309
+
2310
+ return reduced_data
2311
+
2312
+ #@profile
2313
+ def _find_center_one_plane(
2314
+ self, tomo_stacks, stack_index, row, offset_row, thetas,
2315
+ num_core=1, center_offset_min=-50, center_offset_max=50,
2316
+ center_search_range=None, gaussian_sigma=None, ring_width=None,
2317
+ prev_center_offset=None):
2318
+ """Find center for a single tomography plane.
2319
+
2320
+ tomo_stacks data axes order: stack,theta,row,column
2321
+ thetas in radians
2322
+ """
2323
+ # Third party modules
2324
+ from tomopy import (
2325
+ # find_center,
2326
+ find_center_vo,
2327
+ find_center_pc,
2328
+ )
2329
+
2330
+ if not gaussian_sigma:
2331
+ gaussian_sigma = None
2332
+ if not ring_width:
2333
+ ring_width = None
2334
+
2335
+ # Get the sinogram for the selected plane
2336
+ sinogram = tomo_stacks[stack_index,:,offset_row,:]
2337
+ center_offset_range = sinogram.shape[1]/2
2338
+
2339
+ # Try Nghia Vo's method to find the center
2340
+ t0 = time()
2341
+ if center_offset_min is None:
2342
+ center_offset_min = -50
2343
+ if center_offset_max is None:
2344
+ center_offset_max = 50
2345
+ if num_core > NUM_CORE_TOMOPY_LIMIT:
2346
+ self.logger.debug(
2347
+ f'Running find_center_vo on {NUM_CORE_TOMOPY_LIMIT} '
2348
+ 'cores ...')
2349
+ tomo_center = find_center_vo(
2350
+ sinogram, ncore=NUM_CORE_TOMOPY_LIMIT, smin=center_offset_min,
2351
+ smax=center_offset_max)
2352
+ else:
2353
+ tomo_center = find_center_vo(
2354
+ sinogram, ncore=num_core, smin=center_offset_min,
2355
+ smax=center_offset_max)
2356
+ self.logger.info(
2357
+ f'Finding center using Nghia Vo\'s method took {time()-t0:.2f} '
2358
+ 'seconds')
2359
+ center_offset_vo = float(tomo_center-center_offset_range)
2360
+ self.logger.info(
2361
+ f'Center at row {row} using Nghia Vo\'s method = '
2362
+ f'{center_offset_vo:.2f}')
2363
+
2364
+ selected_center_offset = center_offset_vo
2365
+ if self.interactive or self.save_figures:
2366
+
2367
+ # Try Guizar-Sicairos's phase correlation method to find
2368
+ # the center
2369
+ t0 = time()
2370
+ tomo_center = find_center_pc(
2371
+ tomo_stacks[stack_index,0,:,:],
2372
+ tomo_stacks[stack_index,-1,:,:])
2373
+ self.logger.info(
2374
+ 'Finding center using Guizar-Sicairos\'s phase correlation '
2375
+ f'method took {time()-t0:.2f} seconds')
2376
+ center_offset_pc = float(tomo_center-center_offset_range)
2377
+ self.logger.info(
2378
+ f'Center at row {row} using Guizar-Sicairos\'s image entropy '
2379
+ f'method = {center_offset_pc:.2f}')
2380
+
2381
+ # Try Donath's image entropy method to find the center
2382
+ # Skip this method, it seems flawed somehow or I'm doing something wrong
2383
+ # t0 = time()
2384
+ # tomo_center = find_center(
2385
+ # tomo_stacks[stack_index,:,:,:], thetas,
2386
+ # ind=offset_row)
2387
+ # self.logger.info(
2388
+ # 'Finding center using Donath\'s image entropy method took '
2389
+ # f'{time()-t0:.2f} seconds')
2390
+ # center_offset_ie = float(tomo_center-center_offset_range)
2391
+ # self.logger.info(
2392
+ # f'Center at row {row} using Donath\'s image entropy method = '
2393
+ # f'{center_offset_ie:.2f}')
2394
+
2395
+ # Reconstruct the plane for the Nghia Vo's center
2396
+ t0 = time()
2397
+ center_offsets = [center_offset_vo]
2398
+ fig_titles = [f'Vo\'s method: center offset = '
2399
+ f'{center_offset_vo:.2f}']
2400
+ recon_planes = [self._reconstruct_planes(
2401
+ sinogram, center_offset_vo, thetas, num_core=num_core,
2402
+ gaussian_sigma=gaussian_sigma, ring_width=ring_width)]
2403
+ self.logger.info(
2404
+ f'Reconstructing row {row} with center at '
2405
+ f'{center_offset_vo} took {time()-t0:.2f} seconds')
2406
+
2407
+ # Reconstruct the plane for the Guizar-Sicairos's center
2408
+ t0 = time()
2409
+ center_offsets.append(center_offset_pc)
2410
+ fig_titles.append(f'Guizar-Sicairos\'s method: center offset = '
2411
+ f'{center_offset_pc:.2f}')
2412
+ recon_planes.append(self._reconstruct_planes(
2413
+ sinogram, center_offset_pc, thetas, num_core=num_core,
2414
+ gaussian_sigma=gaussian_sigma, ring_width=ring_width))
2415
+ self.logger.info(
2416
+ f'Reconstructing row {row} with center at '
2417
+ f'{center_offset_pc} took {time()-t0:.2f} seconds')
2418
+
2419
+ # Reconstruct the plane for the Donath's center
2420
+ # t0 = time()
2421
+ # center_offsets.append(center_offset_ie)
2422
+ # fig_titles.append(f'Donath\'s method: center offset = '
2423
+ # f'{center_offset_ie:.2f}')
2424
+ # recon_planes.append(self._reconstruct_planes(
2425
+ # sinogram, center_offset_ie, thetas, num_core=num_core,
2426
+ # gaussian_sigma=gaussian_sigma, ring_width=ring_width))
2427
+ # self.logger.info(
2428
+ # f'Reconstructing row {row} with center at '
2429
+ # f'{center_offset_ie} took {time()-t0:.2f} seconds')
2430
+
2431
+ # Reconstruct the plane at the previous row's center
2432
+ if (prev_center_offset is not None
2433
+ and prev_center_offset not in center_offsets):
2434
+ t0 = time()
2435
+ center_offsets.append(prev_center_offset)
2436
+ fig_titles.append(f'Previous row\'s: center offset = '
2437
+ f'{prev_center_offset:.2f}')
2438
+ recon_planes.append(self._reconstruct_planes(
2439
+ sinogram, prev_center_offset, thetas, num_core=num_core,
2440
+ gaussian_sigma=gaussian_sigma, ring_width=ring_width))
2441
+ self.logger.info(
2442
+ f'Reconstructing row {row} with center at '
2443
+ f'{prev_center_offset} took {time()-t0:.2f} seconds')
2444
+
2445
+ # t0 = time()
2446
+ # recon_edges = []
2447
+ # for recon_plane in recon_planes:
2448
+ # recon_edges.append(self._get_edges_one_plane(recon_plane))
2449
+ # print(f'\nGetting edges for row {row} with centers at '
2450
+ # f'{center_offsets} took {time()-t0:.2f} seconds\n')
2451
+
2452
+ # Select the best center
2453
+ buf, accept, selected_center_offset = \
2454
+ self._select_center_offset(
2455
+ recon_planes, row, center_offsets, default_offset_index=0,
2456
+ fig_titles=fig_titles, search_button=False,
2457
+ include_all_bad=True, return_buf=self.save_figures)
2458
+
2459
+ # Save figure
2460
+ if self.save_figures:
2461
+ self._figures.append((buf, f'recon_row_{row}_default_centers'))
2462
+
2463
+ # Create reconstructions for a specified search range
2464
+ if self.interactive:
2465
+ if (center_search_range is None
2466
+ and input_yesno('\nDo you want to reconstruct images '
2467
+ 'for a range of rotation centers', 'n')):
2468
+ center_search_range = input_num_list(
2469
+ 'Enter up to 3 numbers (start, end, step), '
2470
+ '(range, step), or range', remove_duplicates=False,
2471
+ sort=False)
2472
+ if center_search_range is not None:
2473
+ if len(center_search_range) != 3:
2474
+ search_range = center_search_range[0]
2475
+ if len(center_search_range) == 1:
2476
+ step = search_range
2477
+ else:
2478
+ step = center_search_range[1]
2479
+ if selected_center_offset == 'all bad':
2480
+ center_search_range = [
2481
+ - search_range/2, search_range/2, step]
2482
+ else:
2483
+ center_search_range = [
2484
+ selected_center_offset - search_range/2,
2485
+ selected_center_offset + search_range/2,
2486
+ step]
2487
+ center_search_range[1] += 1 # Make upper bound inclusive
2488
+ search_center_offsets = list(np.arange(*center_search_range))
2489
+ search_recon_planes = self._reconstruct_planes(
2490
+ sinogram, search_center_offsets, thetas, num_core=num_core,
2491
+ gaussian_sigma=gaussian_sigma, ring_width=ring_width)
2492
+ for i, center in enumerate(search_center_offsets):
2493
+ title = f'Reconstruction for row {row}, center offset: ' \
2494
+ f'{center:.2f}'
2495
+ self._figures.append(
2496
+ (quick_imshow(
2497
+ search_recon_planes[i], title=title, row_label='y',
2498
+ column_label='x', show_fig=self.interactive,
2499
+ return_fig=True, block=self.interactive),
2500
+ f'recon_row_{row}_center_{center:.2f}'))
2501
+ center_offsets.append(center)
2502
+ recon_planes.append(search_recon_planes[i])
2503
+
2504
+ # Perform an interactive center finding search
2505
+ calibrate_interactively = False
2506
+ if self.interactive:
2507
+ if selected_center_offset == 'all bad':
2508
+ calibrate_interactively = input_yesno(
2509
+ '\nDo you want to perform an interactive search to '
2510
+ 'calibrate the rotation center (y/n)?', 'n')
2511
+ else:
2512
+ calibrate_interactively = input_yesno(
2513
+ '\nDo you want to perform an interactive search to '
2514
+ 'calibrate the rotation center around the selected value '
2515
+ f'of {selected_center_offset} (y/n)?', 'n')
2516
+ if calibrate_interactively:
2517
+ include_all_bad = True
2518
+ low = None
2519
+ upp = None
2520
+ if selected_center_offset == 'all bad':
2521
+ selected_center_offset = None
2522
+ selected_center_offset = input_num(
2523
+ '\nEnter the initial center offset in the center calibration '
2524
+ 'search', ge=-center_offset_range, le=center_offset_range,
2525
+ default=selected_center_offset)
2526
+ max_step_size = min(
2527
+ center_offset_range+selected_center_offset,
2528
+ center_offset_range-selected_center_offset-1)
2529
+ max_step_size = 1 << int(np.log2(max_step_size))-1
2530
+ step_size = input_int(
2531
+ '\nEnter the intial step size in the center calibration '
2532
+ 'search (will be truncated to the nearest lower power of 2)',
2533
+ ge=2, le=max_step_size, default=4)
2534
+ step_size = 1 << int(np.log2(step_size))
2535
+ selected_center_offset_prev = round(selected_center_offset)
2536
+ while step_size:
2537
+ preselected_offsets = (
2538
+ selected_center_offset_prev-step_size,
2539
+ selected_center_offset_prev,
2540
+ selected_center_offset_prev+step_size)
2541
+ indices = []
2542
+ for i, preselected_offset in enumerate(preselected_offsets):
2543
+ if preselected_offset in center_offsets:
2544
+ indices.append(
2545
+ center_offsets.index(preselected_offset))
2546
+ else:
2547
+ indices.append(len(center_offsets))
2548
+ center_offsets.append(preselected_offset)
2549
+ recon_planes.append(self._reconstruct_planes(
2550
+ sinogram, preselected_offset, thetas,
2551
+ num_core=num_core, gaussian_sigma=gaussian_sigma,
2552
+ ring_width=ring_width))
2553
+ buf, accept, selected_center_offset = \
2554
+ self._select_center_offset(
2555
+ [recon_planes[i] for i in indices],
2556
+ row, preselected_offsets, default_offset_index=1,
2557
+ include_all_bad=include_all_bad,
2558
+ return_buf=self.save_figures)
2559
+ # Save figure
2560
+ if self.save_figures:
2561
+ self._figures.append((
2562
+ buf,
2563
+ f'recon_row_{row}_center_range_'
2564
+ f'{min(preselected_offsets)}_'\
2565
+ f'{max(preselected_offsets)}'))
2566
+ if accept and input_yesno(
2567
+ f'Accept center offset {selected_center_offset} '
2568
+ f'for row {row}? (y/n)', 'y'):
2569
+ break
2570
+ if selected_center_offset == 'all bad':
2571
+ step_size *=2
2572
+ else:
2573
+ if selected_center_offset == preselected_offsets[0]:
2574
+ upp = preselected_offsets[1]
2575
+ elif selected_center_offset == preselected_offsets[1]:
2576
+ low = preselected_offsets[0]
2577
+ upp = preselected_offsets[2]
2578
+ else:
2579
+ low = preselected_offsets[1]
2580
+ if None in (low, upp):
2581
+ step_size *= 2
2582
+ else:
2583
+ step_size = step_size//2
2584
+ include_all_bad = False
2585
+ selected_center_offset_prev = round(selected_center_offset)
2586
+ if step_size > max_step_size:
2587
+ self.logger.warning(
2588
+ 'Exceeding maximum step size of {max_step_size}')
2589
+ step_size = max_step_size
2590
+
2591
+ # Collect info for the currently selected center
2592
+ recon_planes = [recon_planes[
2593
+ center_offsets.index(selected_center_offset)]]
2594
+ center_offsets = [selected_center_offset]
2595
+ fig_titles = [f'Reconstruction for center offset = '
2596
+ f'{selected_center_offset:.2f}']
2597
+
2598
+ # Try Nghia Vo's method with the selected center
2599
+ step_size = min(step_size, 10)
2600
+ center_offset_min = selected_center_offset-step_size
2601
+ center_offset_max = selected_center_offset+step_size
2602
+ if num_core > NUM_CORE_TOMOPY_LIMIT:
2603
+ self.logger.debug(
2604
+ f'Running find_center_vo on {NUM_CORE_TOMOPY_LIMIT} '
2605
+ 'cores ...')
2606
+ tomo_center = find_center_vo(
2607
+ sinogram, ncore=NUM_CORE_TOMOPY_LIMIT,
2608
+ smin=center_offset_min, smax=center_offset_max)
2609
+ else:
2610
+ tomo_center = find_center_vo(
2611
+ sinogram, ncore=num_core, smin=center_offset_min,
2612
+ smax=center_offset_max)
2613
+ center_offset_vo = float(tomo_center-center_offset_range)
2614
+ self.logger.info(
2615
+ f'Center at row {row} using Nghia Vo\'s method = '
2616
+ f'{center_offset_vo:.2f}')
2617
+
2618
+ # Reconstruct the plane for the Nghia Vo's center
2619
+ center_offsets.append(center_offset_vo)
2620
+ fig_titles.append(
2621
+ f'Vo\'s method: center offset = {center_offset_vo:.2f}')
2622
+ recon_planes.append(self._reconstruct_planes(
2623
+ sinogram, center_offset_vo, thetas, num_core=num_core,
2624
+ gaussian_sigma=gaussian_sigma, ring_width=ring_width))
2625
+
2626
+ # Select the best center
2627
+ buf, accept, selected_center_offset = \
2628
+ self._select_center_offset(
2629
+ recon_planes, row, center_offsets, default_offset_index=0,
2630
+ fig_titles=fig_titles, search_button=False,
2631
+ return_buf=self.save_figures)
2632
+
2633
+ # Save figure
2634
+ if self.save_figures:
2635
+ self._figures.append((
2636
+ buf,
2637
+ f'recon_row_{row}_center_{selected_center_offset:.2f}'))
2638
+
2639
+ del recon_planes
2640
+
2641
+ del sinogram
2642
+
2643
+ # Return the center location
2644
+ if self.interactive:
2645
+ if selected_center_offset == 'all bad':
2646
+ self.logger.warning(
2647
+ '\nUnable to successfully calibrate center axis')
2648
+ selected_center_offset = input_num(
2649
+ 'Enter the center offset for row {row}',
2650
+ ge=-center_offset_range, le=center_offset_range)
2651
+ return float(selected_center_offset)
2652
+ return float(center_offset_vo)
2653
+
2654
+ #@profile
2655
+ def _reconstruct_planes(
2656
+ self, tomo_planes, center_offset, thetas, num_core=1,
2657
+ gaussian_sigma=None, ring_width=None):
2658
+ """Invert the sinogram for a single or multiple tomography
2659
+ planes using tomopy's recon routine."""
2660
+ # Third party modules
2661
+ from scipy.ndimage import gaussian_filter
2662
+ from tomopy import (
2663
+ misc,
2664
+ recon,
2665
+ )
2666
+
2667
+ # Reconstruct the planes
2668
+ # tomo_planes axis data order: (row,)theta,column
2669
+ # thetas in radians
2670
+ if isinstance(center_offset, (int, float)):
2671
+ tomo_planes = np.expand_dims(tomo_planes, 0)
2672
+ center_offset = center_offset + tomo_planes.shape[2]/2
2673
+ elif is_num_series(center_offset):
2674
+ tomo_planes = np.array([tomo_planes]*len(center_offset))
2675
+ center_offset = np.asarray(center_offset) + tomo_planes.shape[2]/2
2676
+ else:
2677
+ raise ValueError(
2678
+ f'Invalid parameter center_offset ({center_offset})')
2679
+ recon_planes = recon(
2680
+ tomo_planes, thetas, center=center_offset, sinogram_order=True,
2681
+ algorithm='gridrec', ncore=num_core)
2682
+
2683
+ # Performing Gaussian filtering and removing ring artifacts
2684
+ if gaussian_sigma is not None and gaussian_sigma:
2685
+ recon_planes = gaussian_filter(
2686
+ recon_planes, gaussian_sigma, mode='nearest')
2687
+ if ring_width is not None and ring_width:
2688
+ recon_planes = misc.corr.remove_ring(
2689
+ recon_planes, rwidth=ring_width, ncore=num_core)
2690
+
2691
+ # Apply a circular mask
2692
+ recon_planes = misc.corr.circ_mask(recon_planes, axis=0) #RV
2693
+
2694
+ return np.squeeze(recon_planes)
2695
+
2696
+ # def _get_edges_one_plane(self, recon_plane):
2697
+ # """Create an "edges plot" image for a single reconstructed
2698
+ # tomography data plane.
2699
+ # """
2700
+ # # Third party modules
2701
+ # from skimage.restoration import denoise_tv_chambolle
2702
+ #
2703
+ # vis_parameters = None # RV self._config.get('vis_parameters')
2704
+ # if vis_parameters is None:
2705
+ # weight = 0.1
2706
+ # else:
2707
+ # weight = vis_parameters.get('denoise_weight', 0.1)
2708
+ # if not is_num(weight, ge=0.):
2709
+ # self.logger.warning(
2710
+ # f'Invalid weight ({weight}) in _get_edges_one_plane, '
2711
+ # 'set to a default of 0.1')
2712
+ # weight = 0.1
2713
+ # return denoise_tv_chambolle(recon_plane, weight=weight)
2714
+
2715
+ #@profile
2716
+ def _select_center_offset(
2717
+ self, recon_planes, row, preselected_offsets,
2718
+ default_offset_index=0, fig_titles=None, search_button=True,
2719
+ include_all_bad=False, return_buf=False):
2720
+ """Select a center offset value from reconstructed images
2721
+ for a single reconstructed tomography data plane."""
2722
+ # Third party modules
2723
+ import matplotlib.pyplot as plt
2724
+ from matplotlib.widgets import RadioButtons, Button
2725
+
2726
+ radio_btn = None
2727
+
2728
+ def reject():
2729
+ """Callback function for the "Reject" input."""
2730
+
2731
+ def select_offset(offset):
2732
+ """Callback function for the "Select offset" input."""
2733
+
2734
+ def search(event):
2735
+ """Callback function for the "Search" button."""
2736
+ if num_plots == 1:
2737
+ selected_offset.append(
2738
+ (False, preselected_offsets[default_offset_index]))
2739
+ else:
2740
+ offset = radio_btn.value_selected
2741
+ if offset in ('both bad', 'all bad'):
2742
+ selected_offset.append((False, 'all bad'))
2743
+ else:
2744
+ selected_offset.append((False, float(offset)))
2745
+ plt.close()
2746
+
2747
+ def accept(event):
2748
+ """Callback function for the "Accept" button."""
2749
+ if num_plots == 1:
2750
+ selected_offset.append(
2751
+ (True, preselected_offsets[default_offset_index]))
2752
+ else:
2753
+ offset = radio_btn.value_selected
2754
+ if offset in ('both bad', 'all bad'):
2755
+ selected_offset.append((False, 'all bad'))
2756
+ else:
2757
+ selected_offset.append((True, float(offset)))
2758
+ plt.close()
2759
+
2760
+ if not isinstance(recon_planes, (tuple, list)):
2761
+ recon_planes = [recon_planes]
2762
+ if not isinstance(preselected_offsets, (tuple, list)):
2763
+ preselected_offsets = [preselected_offsets]
2764
+ assert len(recon_planes) == len(preselected_offsets)
2765
+ if fig_titles is not None:
2766
+ assert len(fig_titles) == len(preselected_offsets)
2767
+
2768
+ select_text = None
2769
+ selected_offset = []
2770
+
2771
+ title_pos = (0.5, 0.95)
2772
+ title_props = {'fontsize': 'xx-large', 'horizontalalignment': 'center',
2773
+ 'verticalalignment': 'bottom'}
2774
+ subtitle_pos = (0.5, 0.90)
2775
+ subtitle_props = {'fontsize': 'xx-large',
2776
+ 'horizontalalignment': 'center',
2777
+ 'verticalalignment': 'bottom'}
2778
+
2779
+ num_plots = len(recon_planes)
2780
+ if num_plots == 1:
2781
+ fig, axs = plt.subplots(figsize=(11, 8.5))
2782
+ axs = [axs]
2783
+ vmax = np.max(recon_planes[0][:,:])
2784
+ else:
2785
+ fig, axs = plt.subplots(ncols=num_plots, figsize=(17, 8.5))
2786
+ axs = list(axs)
2787
+ vmax = np.max(recon_planes[1][:,:])
2788
+ for i, (ax, recon_plane, preselected_offset) in enumerate(zip(
2789
+ axs, recon_planes, preselected_offsets)):
2790
+ ax.imshow(recon_plane, vmin=-vmax, vmax=vmax, cmap='gray')
2791
+ if fig_titles is None:
2792
+ if num_plots == 1:
2793
+ ax.set_title(
2794
+ f'Reconstruction for row {row}, center offset: ' \
2795
+ f'{preselected_offset:.2f}', fontsize='x-large')
2796
+ else:
2797
+ ax.set_title(
2798
+ f'Center offset: {preselected_offset}',
2799
+ fontsize='x-large')
2800
+ ax.set_xlabel('x', fontsize='x-large')
2801
+ if not i:
2802
+ ax.set_ylabel('y', fontsize='x-large')
2803
+ if fig_titles is not None:
2804
+ for (ax, fig_title) in zip(axs, fig_titles):
2805
+ ax.set_title(fig_title, fontsize='x-large')
2806
+
2807
+ fig_title = plt.figtext(
2808
+ *title_pos, f'Reconstruction for row {row}', **title_props)
2809
+ if num_plots == 1:
2810
+ fig_subtitle = plt.figtext(
2811
+ *subtitle_pos,
2812
+ 'Press "Accept" to accept this value or "Reject" if not',
2813
+ **subtitle_props)
2814
+ else:
2815
+ if search_button:
2816
+ fig_subtitle = plt.figtext(
2817
+ *subtitle_pos,
2818
+ 'Select the best offset and press "Accept" to accept or '
2819
+ '"Search" to continue the search',
2820
+ **subtitle_props)
2821
+ else:
2822
+ fig_subtitle = plt.figtext(
2823
+ *subtitle_pos,
2824
+ 'Select the best offset and press "Accept" to accept',
2825
+ **subtitle_props)
2826
+
2827
+ if not self.interactive:
2828
+
2829
+ selected_offset.append(
2830
+ (True, preselected_offsets[default_offset_index]))
2831
+
2832
+ else:
2833
+
2834
+ fig.subplots_adjust(bottom=0.25, top=0.85)
2835
+
2836
+ if num_plots == 1:
2837
+
2838
+ # Setup "Reject" button
2839
+ reject_btn = Button(
2840
+ plt.axes([0.15, 0.05, 0.15, 0.075]), 'Reject')
2841
+ reject_cid = reject_btn.on_clicked(reject)
2842
+
2843
+ else:
2844
+
2845
+ # Setup RadioButtons
2846
+ select_text = plt.figtext(
2847
+ 0.225, 0.175, 'Select offset', fontsize='x-large',
2848
+ horizontalalignment='center', verticalalignment='center')
2849
+ if include_all_bad:
2850
+ if num_plots == 2:
2851
+ labels = (*preselected_offsets, 'both bad')
2852
+ else:
2853
+ labels = (*preselected_offsets, 'all bad')
2854
+ else:
2855
+ labels = preselected_offsets
2856
+ radio_btn = RadioButtons(
2857
+ plt.axes([0.175, 0.05, 0.1, 0.1]),
2858
+ labels = labels, active=default_offset_index)
2859
+ radio_cid = radio_btn.on_clicked(select_offset)
2860
+
2861
+ # Setup "Search" button
2862
+ if search_button:
2863
+ search_btn = Button(
2864
+ plt.axes([0.4125, 0.05, 0.15, 0.075]), 'Search')
2865
+ search_cid = search_btn.on_clicked(search)
2866
+
2867
+ # Setup "Accept" button
2868
+ accept_btn = Button(
2869
+ plt.axes([0.7, 0.05, 0.15, 0.075]), 'Accept')
2870
+ accept_cid = accept_btn.on_clicked(accept)
2871
+
2872
+ plt.show()
2873
+
2874
+ # Disconnect all widget callbacks when figure is closed
2875
+ # and remove the buttons before returning the figure
2876
+ if num_plots == 1:
2877
+ reject_btn.disconnect(reject_cid)
2878
+ reject_btn.ax.remove()
2879
+ else:
2880
+ radio_btn.disconnect(radio_cid)
2881
+ radio_btn.ax.remove()
2882
+ # Needed to work around a bug in Matplotlib:
2883
+ radio_btn.active = False
2884
+ if search_button:
2885
+ search_btn.disconnect(search_cid)
2886
+ search_btn.ax.remove()
2887
+ accept_btn.disconnect(accept_cid)
2888
+ accept_btn.ax.remove()
2889
+
2890
+ if num_plots == 1:
2891
+ fig_title.remove()
2892
+ else:
2893
+ fig_title.set_in_layout(True)
2894
+ if self.interactive:
2895
+ select_text.remove()
2896
+ fig_subtitle.remove()
2897
+ fig.tight_layout(rect=(0, 0, 1, 0.95))
2898
+ if not selected_offset:# and num_plots == 1:
2899
+ selected_offset.append(
2900
+ (True, preselected_offsets[default_offset_index]))
2901
+
2902
+ if return_buf:
2903
+ buf = fig_to_iobuf(fig)
2904
+ else:
2905
+ buf = None
2906
+ plt.close()
2907
+ return buf, *selected_offset[0]
2908
+
2909
+ #@profile
2910
+ def _reconstruct_one_tomo_stack(
2911
+ self, tomo_stack, thetas, center_offsets=None, num_core=1,
2912
+ algorithm='gridrec', secondary_iters=0, gaussian_sigma=None,
2913
+ remove_stripe_sigma=None, ring_width=None):
2914
+ """Reconstruct a single tomography stack."""
2915
+ # Third party modules
2916
+ from tomopy import (
2917
+ astra,
2918
+ misc,
2919
+ prep,
2920
+ recon,
2921
+ )
2922
+
2923
+ # tomo_stack axis data order: row,theta,column
2924
+ # thetas in radians
2925
+ # centers_offset: tomography axis shift in pixels relative
2926
+ # to column center
2927
+ if center_offsets is None:
2928
+ centers = np.zeros((tomo_stack.shape[0]))
2929
+ elif len(center_offsets) == 2:
2930
+ centers = np.linspace(
2931
+ center_offsets[0], center_offsets[1], tomo_stack.shape[0])
2932
+ else:
2933
+ if center_offsets.size != tomo_stack.shape[0]:
2934
+ raise RuntimeError(
2935
+ 'center_offsets dimension mismatch in '
2936
+ 'reconstruct_one_tomo_stack')
2937
+ centers = center_offsets
2938
+ centers += tomo_stack.shape[2]/2
2939
+
2940
+ # Remove horizontal stripe
2941
+ # RV prep.stripe.remove_stripe_fw seems flawed for hollow brick
2942
+ # accross multiple stacks
2943
+ if remove_stripe_sigma is not None and remove_stripe_sigma:
2944
+ if num_core > NUM_CORE_TOMOPY_LIMIT:
2945
+ tomo_stack = prep.stripe.remove_stripe_fw(
2946
+ tomo_stack, sigma=remove_stripe_sigma,
2947
+ ncore=NUM_CORE_TOMOPY_LIMIT)
2948
+ else:
2949
+ tomo_stack = prep.stripe.remove_stripe_fw(
2950
+ tomo_stack, sigma=remove_stripe_sigma, ncore=num_core)
2951
+
2952
+ # Perform initial image reconstruction
2953
+ self.logger.debug('Performing initial image reconstruction')
2954
+ t0 = time()
2955
+ tomo_recon_stack = recon(
2956
+ tomo_stack, thetas, centers, sinogram_order=True,
2957
+ algorithm=algorithm, ncore=num_core)
2958
+ self.logger.info(
2959
+ f'Performing initial image reconstruction took {time()-t0:.2f} '
2960
+ 'seconds')
2961
+
2962
+ # Run optional secondary iterations
2963
+ if secondary_iters > 0:
2964
+ self.logger.debug(
2965
+ 'Running {secondary_iters} secondary iterations')
2966
+ # options = {
2967
+ # 'method': 'SIRT_CUDA',
2968
+ # 'proj_type': 'cuda',
2969
+ # 'num_iter': secondary_iters
2970
+ # }
2971
+ # RV doesn't work for me:
2972
+ # "Error: CUDA error 803: system has unsupported display driver/cuda driver
2973
+ # combination."
2974
+ # options = {
2975
+ # 'method': 'SIRT',
2976
+ # 'proj_type': 'linear',
2977
+ # 'MinConstraint': 0,
2978
+ # 'num_iter':secondary_iters
2979
+ # }
2980
+ # SIRT did not finish while running overnight
2981
+ # options = {
2982
+ # 'method': 'SART',
2983
+ # 'proj_type': 'linear',
2984
+ # 'num_iter':secondary_iters
2985
+ # }
2986
+ options = {
2987
+ 'method': 'SART',
2988
+ 'proj_type': 'linear',
2989
+ 'MinConstraint': 0,
2990
+ 'num_iter': secondary_iters,
2991
+ }
2992
+ t0 = time()
2993
+ tomo_recon_stack = recon(
2994
+ tomo_stack, thetas, centers, init_recon=tomo_recon_stack,
2995
+ options=options, sinogram_order=True, algorithm=astra,
2996
+ ncore=num_core)
2997
+ self.logger.info(
2998
+ f'Performing secondary iterations took {time()-t0:.2f} '
2999
+ 'seconds')
3000
+
3001
+ # Remove ring artifacts
3002
+ if ring_width is not None and ring_width:
3003
+ misc.corr.remove_ring(
3004
+ tomo_recon_stack, rwidth=ring_width, out=tomo_recon_stack,
3005
+ ncore=num_core)
3006
+
3007
+ # Performing Gaussian filtering
3008
+ if gaussian_sigma is not None and gaussian_sigma:
3009
+ tomo_recon_stack = misc.corr.gaussian_filter(
3010
+ tomo_recon_stack, sigma=gaussian_sigma, ncore=num_core)
3011
+
3012
+ return tomo_recon_stack
3013
+
3014
+ #@profile
3015
+ def _resize_reconstructed_data(
3016
+ self, data, x_bounds=None, y_bounds=None, z_bounds=None,
3017
+ combine_data=False):
3018
+ """Resize the reconstructed tomography data."""
3019
+ # Data order: row/-z,y,x or stack,row/-z,y,x
3020
+ if isinstance(data, list):
3021
+ num_tomo_stacks = len(data)
3022
+ for i in range(num_tomo_stacks):
3023
+ assert data[i].ndim == 3
3024
+ if i:
3025
+ assert data[i].shape[1:] == data[0].shape[1:]
3026
+ tomo_recon_stacks = data
3027
+ else:
3028
+ assert data.ndim == 3
3029
+ num_tomo_stacks = 1
3030
+ tomo_recon_stacks = [data]
3031
+
3032
+ # Selecting x an y bounds (in z-plane)
3033
+ if x_bounds is None:
3034
+ if not self.interactive:
3035
+ self.logger.warning('x_bounds unspecified, use data for '
3036
+ 'full x-range')
3037
+ x_bounds = (0, tomo_recon_stacks[0].shape[2])
3038
+ elif not is_int_pair(
3039
+ x_bounds, ge=0, le=tomo_recon_stacks[0].shape[2]):
3040
+ raise ValueError(f'Invalid parameter x_bounds ({x_bounds})')
3041
+ if y_bounds is None:
3042
+ if not self.interactive:
3043
+ self.logger.warning('y_bounds unspecified, use data for '
3044
+ 'full y-range')
3045
+ y_bounds = (0, tomo_recon_stacks[0].shape[1])
3046
+ elif not is_int_pair(
3047
+ y_bounds, ge=0, le=tomo_recon_stacks[0].shape[1]):
3048
+ raise ValueError(f'Invalid parameter y_bounds ({y_bounds})')
3049
+ if x_bounds is None and y_bounds is None:
3050
+ preselected_roi = None
3051
+ elif x_bounds is None:
3052
+ preselected_roi = (
3053
+ 0, tomo_recon_stacks[0].shape[2],
3054
+ y_bounds[0], y_bounds[1])
3055
+ elif y_bounds is None:
3056
+ preselected_roi = (
3057
+ x_bounds[0], x_bounds[1],
3058
+ 0, tomo_recon_stacks[0].shape[1])
3059
+ else:
3060
+ preselected_roi = (
3061
+ x_bounds[0], x_bounds[1],
3062
+ y_bounds[0], y_bounds[1])
3063
+ tomosum = 0
3064
+ for i in range(num_tomo_stacks):
3065
+ tomosum = tomosum + np.sum(tomo_recon_stacks[i], axis=0)
3066
+ buf, roi = select_roi_2d(
3067
+ tomosum, preselected_roi=preselected_roi,
3068
+ title_a='Reconstructed data summed over z',
3069
+ row_label='y', column_label='x',
3070
+ interactive=self.interactive, return_buf=self.save_figures)
3071
+ if self.save_figures:
3072
+ if combine_data:
3073
+ filename = 'combined_data_xy_roi'
3074
+ else:
3075
+ filename = 'reconstructed_data_xy_roi'
3076
+ self._figures.append((buf, filename))
3077
+ if roi is None:
3078
+ x_bounds = (0, tomo_recon_stacks[0].shape[2])
3079
+ y_bounds = (0, tomo_recon_stacks[0].shape[1])
3080
+ else:
3081
+ x_bounds = (int(roi[0]), int(roi[1]))
3082
+ y_bounds = (int(roi[2]), int(roi[3]))
3083
+ self.logger.debug(f'x_bounds = {x_bounds}')
3084
+ self.logger.debug(f'y_bounds = {y_bounds}')
3085
+
3086
+ # Selecting z bounds (in xy-plane)
3087
+ # (only valid for a single image stack or when combining a stack)
3088
+ if num_tomo_stacks == 1 or combine_data:
3089
+ if z_bounds is None:
3090
+ if not self.interactive:
3091
+ if combine_data:
3092
+ self.logger.warning(
3093
+ 'z_bounds unspecified, combine reconstructed data '
3094
+ 'for full z-range')
3095
+ else:
3096
+ self.logger.warning(
3097
+ 'z_bounds unspecified, reconstruct data for '
3098
+ 'full z-range')
3099
+ z_bounds = (0, tomo_recon_stacks[0].shape[0])
3100
+ elif not is_int_pair(
3101
+ z_bounds, ge=0, le=tomo_recon_stacks[0].shape[0]):
3102
+ raise ValueError(f'Invalid parameter z_bounds ({z_bounds})')
3103
+ tomosum = 0
3104
+ for i in range(num_tomo_stacks):
3105
+ tomosum = tomosum + np.sum(tomo_recon_stacks[i], axis=(1,2))
3106
+ buf, z_bounds = select_roi_1d(
3107
+ tomosum, preselected_roi=z_bounds,
3108
+ xlabel='z', ylabel='Reconstructed data summed over x and y',
3109
+ interactive=self.interactive, return_buf=self.save_figures)
3110
+ self.logger.debug(f'z_bounds = {z_bounds}')
3111
+ if self.save_figures:
3112
+ if combine_data:
3113
+ filename = 'combined_data_z_roi'
3114
+ else:
3115
+ filename = 'reconstructed_data_z_roi'
3116
+ self._figures.append((buf, filename))
3117
+
3118
+ return x_bounds, y_bounds, z_bounds
3119
+
3120
+
3121
+ class TomoSimFieldProcessor(Processor):
3122
+ """A processor to create a simulated tomography data set returning
3123
+ a `nexusformat.nexus.NXroot` object containing the simulated
3124
+ tomography detector images.
3125
+ """
3126
+ def process(self, data):
3127
+ """Process the input configuration and return a
3128
+ `nexusformat.nexus.NXroot` object with the simulated
3129
+ tomography detector images.
3130
+
3131
+ :param data: Input configuration for the simulation.
3132
+ :type data: list[PipelineData]
3133
+ :raises ValueError: Invalid input or configuration parameter.
3134
+ :return: Simulated tomographic images.
3135
+ :rtype: nexusformat.nexus.NXroot
3136
+ """
3137
+ # Third party modules
3138
+ # pylint: disable=no-name-in-module
3139
+ from nexusformat.nexus import (
3140
+ NXdetector,
3141
+ NXentry,
3142
+ NXinstrument,
3143
+ NXroot,
3144
+ NXsample,
3145
+ NXsource,
3146
+ )
3147
+ # pylint: enable=no-name-in-module
3148
+
3149
+ # Get and validate the relevant configuration object in data
3150
+ config = self.get_config(data=data, schema='tomo.models.TomoSimConfig')
3151
+
3152
+ station = config.station
3153
+ sample_type = config.sample_type
3154
+ sample_size = config.sample_size
3155
+ if len(sample_size) == 1:
3156
+ sample_size = (sample_size[0], sample_size[0])
3157
+ if sample_type == 'hollow_pyramid' and len(sample_size) != 3:
3158
+ raise ValueError('Invalid combindation of sample_type '
3159
+ f'({sample_type}) and sample_size ({sample_size}')
3160
+ wall_thickness = config.wall_thickness
3161
+ mu = config.mu
3162
+ theta_step = config.theta_step
3163
+ beam_intensity = config.beam_intensity
3164
+ background_intensity = config.background_intensity
3165
+ slit_size = config.slit_size
3166
+ pixel_size = config.detector.pixel_size
3167
+ if len(pixel_size) == 1:
3168
+ pixel_size = (
3169
+ pixel_size[0]/config.detector.lens_magnification,
3170
+ pixel_size[0]/config.detector.lens_magnification,
3171
+ )
3172
+ else:
3173
+ pixel_size = (
3174
+ pixel_size[0]/config.detector.lens_magnification,
3175
+ pixel_size[1]/config.detector.lens_magnification,
3176
+ )
3177
+ detector_size = (config.detector.rows, config.detector.columns)
3178
+ if slit_size-0.5*pixel_size[0] > detector_size[0]*pixel_size[0]:
3179
+ raise ValueError(
3180
+ f'Slit size ({slit_size}) larger than detector height '
3181
+ f'({detector_size[0]*pixel_size[0]})')
3182
+
3183
+ # Get the rotation angles (start at a arbitrarily choose angle
3184
+ # and add thetas for a full 360 degrees rotation series)
3185
+ if station in ('id1a3', 'id3a'):
3186
+ theta_start = 0.
3187
+ else:
3188
+ theta_start = -17
3189
+ # RV theta_end = theta_start + 360.
3190
+ theta_end = theta_start + 180.
3191
+ thetas = list(
3192
+ np.arange(theta_start, theta_end+0.5*theta_step, theta_step))
3193
+
3194
+ # Get the number of horizontal stacks bases on the diagonal
3195
+ # of the square and for now don't allow more than one
3196
+ if (sample_size) == 3:
3197
+ num_tomo_stack = 1 + int(
3198
+ (max(sample_size[1:2])*np.sqrt(2)-pixel_size[1])
3199
+ / (detector_size[1]*pixel_size[1]))
3200
+ else:
3201
+ num_tomo_stack = 1 + int((sample_size[1]*np.sqrt(2)-pixel_size[1])
3202
+ / (detector_size[1]*pixel_size[1]))
3203
+ if num_tomo_stack > 1:
3204
+ raise ValueError('Sample is too wide for the detector')
3205
+
3206
+ # Create the x-ray path length through a solid square
3207
+ # crosssection for a set of rotation angles.
3208
+ path_lengths_solid = None
3209
+ if sample_type != 'hollow_pyramid':
3210
+ path_lengths_solid = self._create_pathlength_solid_square(
3211
+ sample_size[1], thetas, pixel_size[1], detector_size[1])
3212
+
3213
+ # Create the x-ray path length through a hollow square
3214
+ # crosssection for a set of rotation angles.
3215
+ path_lengths_hollow = None
3216
+ if sample_type in ('square_pipe', 'hollow_cube', 'hollow_brick'):
3217
+ path_lengths_hollow = path_lengths_solid \
3218
+ - self._create_pathlength_solid_square(
3219
+ sample_size[1] - 2*wall_thickness, thetas,
3220
+ pixel_size[1], detector_size[1])
3221
+
3222
+ # Get the number of stacks
3223
+ num_tomo_stack = 1 + int((sample_size[0]-pixel_size[0])/slit_size)
3224
+ if num_tomo_stack > 1 and station == 'id3b':
3225
+ raise ValueError('Sample is to tall for the detector')
3226
+
3227
+ # Get the column coordinates
3228
+ img_row_offset = -0.5 * (detector_size[0]*pixel_size[0]
3229
+ + slit_size * (num_tomo_stack-1))
3230
+ img_row_coords = np.flip(img_row_offset
3231
+ + pixel_size[0] * (0.5 + np.asarray(range(int(detector_size[0])))))
3232
+
3233
+ # Get the transmitted intensities
3234
+ num_theta = len(thetas)
3235
+ vertical_shifts = []
3236
+ tomo_fields_stack = []
3237
+ len_img_y = (detector_size[1]+1)//2
3238
+ if len_img_y%2:
3239
+ len_img_y = 2*len_img_y - 1
3240
+ else:
3241
+ len_img_y = 2*len_img_y
3242
+ img_dim = (len(img_row_coords), len_img_y)
3243
+ intensities_solid = None
3244
+ intensities_hollow = None
3245
+ for n in range(num_tomo_stack):
3246
+ vertical_shifts.append(img_row_offset + n*slit_size
3247
+ + 0.5*detector_size[0]*pixel_size[0])
3248
+ tomo_field = beam_intensity * np.ones((num_theta, *img_dim))
3249
+ if sample_type == 'square_rod':
3250
+ intensities_solid = \
3251
+ beam_intensity * np.exp(-mu*path_lengths_solid)
3252
+ for n in range(num_theta):
3253
+ tomo_field[n,:,:] = intensities_solid[n]
3254
+ elif sample_type == 'square_pipe':
3255
+ intensities_hollow = \
3256
+ beam_intensity * np.exp(-mu*path_lengths_hollow)
3257
+ for n in range(num_theta):
3258
+ tomo_field[n,:,:] = intensities_hollow[n]
3259
+ elif sample_type == 'hollow_pyramid':
3260
+ outer_indices = \
3261
+ np.where(abs(img_row_coords) <= sample_size[0]/2)[0]
3262
+ inner_indices = np.where(
3263
+ abs(img_row_coords) < sample_size[0]/2 - wall_thickness)[0]
3264
+ wall_indices = list(set(outer_indices)-set(inner_indices))
3265
+ ratio = abs(sample_size[1]-sample_size[2])/sample_size[0]
3266
+ baselength = max(sample_size[1:2])
3267
+ for i in wall_indices:
3268
+ path_lengths_solid = self._create_pathlength_solid_square(
3269
+ baselength - ratio*(
3270
+ img_row_coords[i] + 0.5*sample_size[0]),
3271
+ thetas, pixel_size[1], detector_size[1])
3272
+ intensities_solid = \
3273
+ beam_intensity * np.exp(-mu*path_lengths_solid)
3274
+ for n in range(num_theta):
3275
+ tomo_field[n,i] = intensities_solid[n]
3276
+ for i in inner_indices:
3277
+ path_lengths_hollow = (
3278
+ self._create_pathlength_solid_square(
3279
+ baselength - ratio*(
3280
+ img_row_coords[i] + 0.5*sample_size[0]),
3281
+ thetas, pixel_size[1], detector_size[1])
3282
+ - self._create_pathlength_solid_square(
3283
+ baselength - 2*wall_thickness - ratio*(
3284
+ img_row_coords[i] + 0.5*sample_size[0]),
3285
+ thetas, pixel_size[1], detector_size[1]))
3286
+ intensities_hollow = \
3287
+ beam_intensity * np.exp(-mu*path_lengths_hollow)
3288
+ for n in range(num_theta):
3289
+ tomo_field[n,i] = intensities_hollow[n]
3290
+ else:
3291
+ intensities_solid = \
3292
+ beam_intensity * np.exp(-mu*path_lengths_solid)
3293
+ intensities_hollow = \
3294
+ beam_intensity * np.exp(-mu*path_lengths_hollow)
3295
+ outer_indices = \
3296
+ np.where(abs(img_row_coords) <= sample_size[0]/2)[0]
3297
+ inner_indices = np.where(
3298
+ abs(img_row_coords) < sample_size[0]/2 - wall_thickness)[0]
3299
+ wall_indices = list(set(outer_indices)-set(inner_indices))
3300
+ for i in wall_indices:
3301
+ for n in range(num_theta):
3302
+ tomo_field[n,i] = intensities_solid[n]
3303
+ for i in inner_indices:
3304
+ for n in range(num_theta):
3305
+ tomo_field[n,i] = intensities_hollow[n]
3306
+ tomo_field += background_intensity
3307
+ tomo_fields_stack.append(tomo_field.astype(np.int64))
3308
+ if num_tomo_stack > 1:
3309
+ img_row_coords += slit_size
3310
+
3311
+ # Add dummy snapshots at each end to mimic FMB/SMB
3312
+ if station in ('id1a3', 'id3a'):
3313
+ num_dummy_start = 5
3314
+ num_dummy_end = 0
3315
+ starting_image_index = 345000
3316
+ else:
3317
+ num_dummy_start = 1
3318
+ num_dummy_end = 0
3319
+ starting_image_index = 0
3320
+ starting_image_offset = num_dummy_start
3321
+ # thetas = [theta_start-n*theta_step
3322
+ # for n in range(num_dummy_start, 0, -1)] + thetas
3323
+ # thetas += [theta_end+n*theta_step
3324
+ # for n in range(1, num_dummy_end+1)]
3325
+ if num_dummy_start:
3326
+ dummy_fields = background_intensity * np.ones(
3327
+ (num_dummy_start, *img_dim), dtype=np.int64)
3328
+ for n, tomo_field in enumerate(tomo_fields_stack):
3329
+ tomo_fields_stack[n] = np.concatenate(
3330
+ (dummy_fields, tomo_field))
3331
+ if num_dummy_end:
3332
+ dummy_fields = background_intensity * np.ones(
3333
+ (num_dummy_end, *img_dim), dtype=np.int64)
3334
+ for n, tomo_field in enumerate(tomo_fields_stack):
3335
+ tomo_fields_stack[n] = np.concatenate(
3336
+ (tomo_field, dummy_fields))
3337
+ if num_tomo_stack == 1:
3338
+ tomo_fields_stack = tomo_fields_stack[0]
3339
+
3340
+ # Create a NeXus object and write to file
3341
+ nxroot = NXroot()
3342
+ nxroot.entry = NXentry()
3343
+ nxroot.entry.sample = NXsample()
3344
+ nxroot.entry.sample.sample_type = sample_type
3345
+ nxroot.entry.sample.sample_size = sample_size
3346
+ if wall_thickness is not None:
3347
+ nxroot.entry.sample.wall_thickness = wall_thickness
3348
+ nxroot.entry.sample.mu = mu
3349
+ nxinstrument = NXinstrument()
3350
+ nxroot.entry.instrument = nxinstrument
3351
+ nxinstrument.source = NXsource()
3352
+ nxinstrument.source.attrs['station'] = station
3353
+ nxinstrument.source.type = 'Synchrotron X-ray Source'
3354
+ nxinstrument.source.name = 'Tomography Simulator'
3355
+ nxinstrument.source.probe = 'x-ray'
3356
+ nxinstrument.source.background_intensity = background_intensity
3357
+ nxinstrument.source.beam_intensity = beam_intensity
3358
+ nxinstrument.source.slit_size = slit_size
3359
+ nxdetector = NXdetector()
3360
+ nxinstrument.detector = nxdetector
3361
+ nxdetector.local_name = config.detector.prefix
3362
+ nxdetector.row_pixel_size = pixel_size[0]
3363
+ nxdetector.column_pixel_size = pixel_size[1]
3364
+ nxdetector.row_pixel_size.units = 'mm'
3365
+ nxdetector.column_pixel_size.units = 'mm'
3366
+ nxdetector.data = tomo_fields_stack
3367
+ nxdetector.thetas = thetas
3368
+ nxdetector.z_translation = vertical_shifts
3369
+ nxdetector.starting_image_index = starting_image_index
3370
+ nxdetector.starting_image_offset = starting_image_offset
3371
+
3372
+ return nxroot
3373
+
3374
+ def _create_pathlength_solid_square(self, dim, thetas, pixel_size,
3375
+ detector_size):
3376
+ """Create the x-ray path length through a solid square
3377
+ crosssection for a set of rotation angles.
3378
+ """
3379
+ # Get the column coordinates
3380
+ img_y_coords = pixel_size * (0.5 * (1 - detector_size%2)
3381
+ + np.asarray(range((detector_size+1)//2)))
3382
+
3383
+ # Get the path lenghts for position column coordinates
3384
+ lengths = np.zeros((len(thetas), len(img_y_coords)), dtype=np.float64)
3385
+ for i, theta in enumerate(thetas):
3386
+ theta = theta - 90.*np.floor(theta/90.)
3387
+ if 45. < theta <= 90.:
3388
+ theta = 90.-theta
3389
+ theta_rad = theta*np.pi/180.
3390
+ len_ab = dim/np.cos(theta_rad)
3391
+ len_oc = dim*np.cos(theta_rad+0.25*np.pi)/np.sqrt(2.)
3392
+ len_ce = dim*np.sin(theta_rad)
3393
+ index1 = int(np.argmin(np.abs(img_y_coords-len_oc)))
3394
+ if len_oc < img_y_coords[index1] and index1 > 0:
3395
+ index1 -= 1
3396
+ index2 = int(np.argmin(np.abs(img_y_coords-len_oc-len_ce)))
3397
+ if len_oc+len_ce < img_y_coords[index2]:
3398
+ index2 -= 1
3399
+ index1 += 1
3400
+ index2 += 1
3401
+ for j in range(index1):
3402
+ lengths[i,j] = len_ab
3403
+ for j, column in enumerate(img_y_coords[index1:index2]):
3404
+ lengths[i,j+index1] = len_ab*(len_oc+len_ce-column)/len_ce
3405
+
3406
+ # Add the mirror image for negative column coordinates
3407
+ if len(img_y_coords)%2:
3408
+ lengths = np.concatenate(
3409
+ (np.fliplr(lengths[:,1:]), lengths), axis=1)
3410
+ else:
3411
+ lengths = np.concatenate((np.fliplr(lengths), lengths), axis=1)
3412
+
3413
+ return lengths
3414
+
3415
+
3416
+ class TomoDarkFieldProcessor(Processor):
3417
+ """A processor to create the dark field associated with a simulated
3418
+ tomography data set created by TomoSimProcessor.
3419
+
3420
+ :ivar num_image: Number of dark field images, defaults to `5`.
3421
+ :type num_image: int, optional.
3422
+ """
3423
+ num_image: Optional[conint(gt=0)] = 5
3424
+
3425
+ def process(self, data):
3426
+ """Process the input configuration and return a
3427
+ `nexusformat.nexus.NXroot` object with the simulated
3428
+ dark field detector images.
3429
+
3430
+ :param data: Input configuration for the simulation.
3431
+ :type data: list[PipelineData]
3432
+ :raises ValueError: Missing or invalid input or configuration
3433
+ parameter.
3434
+ :return: Simulated dark field images.
3435
+ :rtype: nexusformat.nexus.NXroot
3436
+ """
3437
+ # Third party modules
3438
+ # pylint: disable=no-name-in-module
3439
+ from nexusformat.nexus import (
3440
+ NXroot,
3441
+ NXentry,
3442
+ NXinstrument,
3443
+ NXdetector,
3444
+ )
3445
+ # pylint: enable=no-name-in-module
3446
+
3447
+ # Get and validate the TomoSimField configuration object in data
3448
+ nxroot = self.get_data(
3449
+ data, schema='tomo.models.TomoSimField', remove=False)
3450
+ if nxroot is None:
3451
+ raise ValueError('No valid TomoSimField configuration found in '
3452
+ 'input data')
3453
+ source = nxroot.entry.instrument.source
3454
+ detector = nxroot.entry.instrument.detector
3455
+ background_intensity = source.background_intensity
3456
+ detector_size = detector.data.shape[-2:]
3457
+
3458
+ # Add dummy snapshots at start to mimic SMB
3459
+ if source.station in ('id1a3', 'id3a'):
3460
+ num_dummy_start = 5
3461
+ starting_image_index = 123000
3462
+ else:
3463
+ num_dummy_start = 1
3464
+ starting_image_index = 0
3465
+ starting_image_offset = num_dummy_start
3466
+ self.num_image += num_dummy_start
3467
+
3468
+ # Create the dark field
3469
+ dark_field = int(background_intensity) * np.ones(
3470
+ (self.num_image, detector_size[0], detector_size[1]),
3471
+ dtype=np.int64)
3472
+
3473
+ # Create a NeXus object and write to file
3474
+ nxdark = NXroot()
3475
+ nxdark.entry = NXentry()
3476
+ nxdark.entry.sample = nxroot.entry.sample
3477
+ nxinstrument = NXinstrument()
3478
+ nxdark.entry.instrument = nxinstrument
3479
+ nxinstrument.source = source
3480
+ nxdetector = NXdetector()
3481
+ nxinstrument.detector = nxdetector
3482
+ nxdetector.local_name = detector.local_name
3483
+ nxdetector.row_pixel_size = detector.row_pixel_size
3484
+ nxdetector.column_pixel_size = detector.column_pixel_size
3485
+ nxdetector.data = dark_field
3486
+ nxdetector.thetas = np.asarray((self.num_image-num_dummy_start)*[0])
3487
+ nxdetector.starting_image_index = starting_image_index
3488
+ nxdetector.starting_image_offset = starting_image_offset
3489
+
3490
+ return nxdark
3491
+
3492
+
3493
+ class TomoBrightFieldProcessor(Processor):
3494
+ """A processor to create the bright field associated with a
3495
+ simulated tomography data set created by TomoSimProcessor.
3496
+
3497
+ :ivar num_image: Number of bright field images, defaults to `5`.
3498
+ :type num_image: int, optional.
3499
+ """
3500
+ num_image: Optional[conint(gt=0)] = 5
3501
+
3502
+ def process(self, data):
3503
+ """Process the input configuration and return a
3504
+ `nexusformat.nexus.NXroot` object with the simulated
3505
+ bright field detector images.
3506
+
3507
+ :param data: Input configuration for the simulation.
3508
+ :type data: list[PipelineData]
3509
+ :raises ValueError: Missing or invalid input or configuration
3510
+ parameter.
3511
+ :return: Simulated bright field images.
3512
+ :rtype: nexusformat.nexus.NXroot
3513
+ """
3514
+ # Third party modules
3515
+ # pylint: disable=no-name-in-module
3516
+ from nexusformat.nexus import (
3517
+ NXroot,
3518
+ NXentry,
3519
+ NXinstrument,
3520
+ NXdetector,
3521
+ )
3522
+ # pylint: enable=no-name-in-module
3523
+
3524
+ # Get and validate the TomoSimField configuration object in data
3525
+ nxroot = self.get_data(
3526
+ data, schema='tomo.models.TomoSimField', remove=False)
3527
+ if nxroot is None:
3528
+ raise ValueError('No valid TomoSimField configuration found in '
3529
+ 'input data')
3530
+ source = nxroot.entry.instrument.source
3531
+ detector = nxroot.entry.instrument.detector
3532
+ beam_intensity = source.beam_intensity
3533
+ background_intensity = source.background_intensity
3534
+ detector_size = detector.data.shape[-2:]
3535
+
3536
+ # Add dummy snapshots at start to mimic SMB
3537
+ if source.station in ('id1a3', 'id3a'):
3538
+ num_dummy_start = 5
3539
+ starting_image_index = 234000
3540
+ else:
3541
+ num_dummy_start = 1
3542
+ starting_image_index = 0
3543
+ starting_image_offset = num_dummy_start
3544
+
3545
+ # Create the bright field
3546
+ bright_field = int(background_intensity+beam_intensity) * np.ones(
3547
+ (self.num_image, detector_size[0], detector_size[1]),
3548
+ dtype=np.int64)
3549
+ if num_dummy_start:
3550
+ dummy_fields = int(background_intensity) * np.ones(
3551
+ (num_dummy_start, detector_size[0], detector_size[1]),
3552
+ dtype=np.int64)
3553
+ bright_field = np.concatenate((dummy_fields, bright_field))
3554
+ self.num_image += num_dummy_start
3555
+ # Add 20% to slit size to make the bright beam slightly taller
3556
+ # than the vertical displacements between stacks
3557
+ slit_size = 1.2*source.slit_size
3558
+ if slit_size < float(detector.row_pixel_size*detector_size[0]):
3559
+ img_row_coords = float(detector.row_pixel_size) \
3560
+ * (0.5 + np.asarray(range(int(detector_size[0])))
3561
+ - 0.5*detector_size[0])
3562
+ outer_indices = np.where(abs(img_row_coords) > slit_size/2)[0]
3563
+ bright_field[:,outer_indices,:] = 0
3564
+
3565
+ # Create a NeXus object and write to file
3566
+ nxbright = NXroot()
3567
+ nxbright.entry = NXentry()
3568
+ nxbright.entry.sample = nxroot.entry.sample
3569
+ nxinstrument = NXinstrument()
3570
+ nxbright.entry.instrument = nxinstrument
3571
+ nxinstrument.source = source
3572
+ nxdetector = NXdetector()
3573
+ nxinstrument.detector = nxdetector
3574
+ nxdetector.local_name = detector.local_name
3575
+ nxdetector.row_pixel_size = detector.row_pixel_size
3576
+ nxdetector.column_pixel_size = detector.column_pixel_size
3577
+ nxdetector.data = bright_field
3578
+ nxdetector.thetas = np.asarray((self.num_image-num_dummy_start)*[0])
3579
+ nxdetector.starting_image_index = starting_image_index
3580
+ nxdetector.starting_image_offset = starting_image_offset
3581
+
3582
+ return nxbright
3583
+
3584
+
3585
+ class TomoSpecProcessor(Processor):
3586
+ """A processor to create a tomography SPEC file associated with a
3587
+ simulated tomography data set created by TomoSimProcessor.
3588
+
3589
+ :ivar scan_numbers: List of SPEC scan numbers.
3590
+ :type scan_numbers: list[int], optional
3591
+ """
3592
+ scan_numbers: Optional[
3593
+ conlist(min_length=1, item_type=conint(gt=0))] = None
3594
+
3595
+ @field_validator('scan_numbers', mode='before')
3596
+ @classmethod
3597
+ def validate_scan_numbers(cls, scan_numbers):
3598
+ """Validate the specified list of scan numbers.
3599
+
3600
+ :param scan_numbers: List of scan numbers.
3601
+ :type scan_numbers: Union(int, list[int], str)
3602
+ :return: List of scan numbers.
3603
+ :rtype: list[int]
3604
+ """
3605
+ if isinstance(scan_numbers, int):
3606
+ scan_numbers = [scan_numbers]
3607
+ elif isinstance(scan_numbers, str):
3608
+ # Local modules
3609
+ from CHAP.utils.general import string_to_list
3610
+
3611
+ scan_numbers = string_to_list(scan_numbers)
3612
+ return scan_numbers
3613
+
3614
+ def process(self, data):
3615
+ """Process the input configuration and return a list of strings
3616
+ representing a plain text SPEC file.
3617
+
3618
+ :param data: Input configuration for the simulation.
3619
+ :type data: list[PipelineData]
3620
+ :raises ValueError: Invalid input or configuration parameter.
3621
+ :return: Simulated SPEC file.
3622
+ :rtype: list[str]
3623
+ """
3624
+ # System modules
3625
+ from json import dumps
3626
+ from datetime import datetime
3627
+
3628
+ from nexusformat.nexus import (
3629
+ NXentry,
3630
+ NXroot,
3631
+ NXsubentry,
3632
+ )
3633
+
3634
+ # Get and validate the TomoSimField, TomoDarkField, or
3635
+ # TomoBrightField configuration object in data
3636
+ configs = {}
3637
+ nxroot = self.get_data(data, schema='tomo.models.TomoDarkField')
3638
+ if nxroot is not None:
3639
+ configs['tomo.models.TomoDarkField'] = nxroot
3640
+ nxroot = self.get_data(data, schema='tomo.models.TomoBrightField')
3641
+ if nxroot is not None:
3642
+ configs['tomo.models.TomoBrightField'] = nxroot
3643
+ nxroot = self.get_data(data, schema='tomo.models.TomoSimField')
3644
+ if nxroot is not None:
3645
+ configs['tomo.models.TomoSimField'] = nxroot
3646
+ station = None
3647
+ sample_type = None
3648
+ num_scan = 0
3649
+ for schema, nxroot in configs.items():
3650
+ source = nxroot.entry.instrument.source
3651
+ if station is None:
3652
+ station = source.attrs.get('station')
3653
+ else:
3654
+ if station != source.attrs.get('station'):
3655
+ raise ValueError('Inconsistent station among scans')
3656
+ if sample_type is None:
3657
+ sample_type = nxroot.entry.sample.sample_type
3658
+ else:
3659
+ if sample_type != nxroot.entry.sample.sample_type:
3660
+ raise ValueError('Inconsistent sample_type among scans')
3661
+ detector = nxroot.entry.instrument.detector
3662
+ if 'z_translation' in detector:
3663
+ num_stack = detector.z_translation.size
3664
+ else:
3665
+ num_stack = 1
3666
+ data_shape = detector.data.shape
3667
+ if len(data_shape) == 3:
3668
+ if num_stack != 1:
3669
+ raise ValueError(
3670
+ 'Inconsistent z_translation and data dimensions'
3671
+ f'({num_stack} vs {1})')
3672
+ elif len(data_shape) == 4:
3673
+ if num_stack != data_shape[0]:
3674
+ raise ValueError(
3675
+ 'Inconsistent z_translation dimension and data shape '
3676
+ f'({num_stack} vs {data_shape[0]})')
3677
+ else:
3678
+ raise ValueError(f'Invalid data shape ({data_shape})')
3679
+ num_scan += num_stack
3680
+ if self.scan_numbers is None:
3681
+ self.scan_numbers = list(range(1, num_scan+1))
3682
+ elif len(self.scan_numbers) != num_scan:
3683
+ raise ValueError(
3684
+ f'Inconsistent number of scans ({num_scan}), '
3685
+ f'len(self.scan_numbers) = {len(self.scan_numbers)})')
3686
+
3687
+ # Create the output data structure in NeXus format
3688
+ nxentry = NXentry()
3689
+
3690
+ # Create the SPEC file header
3691
+ spec_file = [f'#F {sample_type}']
3692
+ spec_file.append('#E 0')
3693
+ spec_file.append(
3694
+ f'#D {datetime.now().strftime("%a %b %d %I:%M:%S %Y")}')
3695
+ spec_file.append(f'#C spec User = chess_{station}\n')
3696
+ if station in ('id1a3', 'id3a'):
3697
+ spec_file.append('#O0 ramsx ramsz')
3698
+ else:
3699
+ # RV Fix main code to use independent dim info
3700
+ spec_file.append('#O0 GI_samx GI_samz GI_samphi')
3701
+ spec_file.append('#o0 samx samz samphi') # RV do I need this line?
3702
+ spec_file.append('')
3703
+
3704
+ # Create the SPEC file scan info (and image and parfile data for SMB)
3705
+ par_file = []
3706
+ image_sets = []
3707
+ starting_image_indices = []
3708
+ num_scan = 0
3709
+ count_time = 1
3710
+ for schema, nxroot in configs.items():
3711
+ detector = nxroot.entry.instrument.detector
3712
+ if 'z_translation' in detector:
3713
+ z_translations = list(detector.z_translation.nxdata)
3714
+ else:
3715
+ z_translations = [0.]
3716
+ thetas = detector.thetas
3717
+ num_theta = thetas.size
3718
+ field_type = None
3719
+ scan_type = None
3720
+ if schema == 'tomo.models.TomoDarkField':
3721
+ if station in ('id1a3', 'id3a'):
3722
+ macro = f'slew_ome {thetas[0]} {thetas[-1]} ' \
3723
+ f'{num_theta} {count_time} darkfield'
3724
+ scan_type = 'df1'
3725
+ else:
3726
+ macro = f'flyscan {num_theta-1} {count_time}'
3727
+ field_type = 'dark_field'
3728
+ elif schema == 'tomo.models.TomoBrightField':
3729
+ if station in ('id1a3', 'id3a'):
3730
+ macro = f'slew_ome {thetas[0]} {thetas[-1]} ' \
3731
+ f'{num_theta} {count_time}'
3732
+ scan_type = 'bf1'
3733
+ else:
3734
+ macro = f'flyscan {num_theta-1} {count_time}'
3735
+ field_type = 'bright_field'
3736
+ elif schema == 'tomo.models.TomoSimField':
3737
+ if station in ('id1a3', 'id3a'):
3738
+ macro = f'slew_ome {thetas[0]} {thetas[-1]} ' \
3739
+ f'{num_theta} {count_time}'
3740
+ scan_type = 'ts1'
3741
+ else:
3742
+ macro = f'flyscan samphi {thetas[0]} ' \
3743
+ f'{thetas[-1]} {num_theta-1} {count_time}'
3744
+ field_type = 'tomo_field'
3745
+ else:
3746
+ raise ValueError(f'Invalid schema {schema}')
3747
+ starting_image_index = int(detector.starting_image_index)
3748
+ starting_image_offset = int(detector.starting_image_offset)
3749
+ for n, z_translation in enumerate(z_translations):
3750
+ scan_number = self.scan_numbers[num_scan]
3751
+ spec_file.append(f'#S {scan_number} {macro}')
3752
+ spec_file.append(
3753
+ f'#D {datetime.now().strftime("%a %b %d %I:%M:%S %Y")}')
3754
+ if station in ('id1a3', 'id3a'):
3755
+ spec_file.append(f'#P0 0.0 {z_translation}')
3756
+ spec_file.append('#N 1')
3757
+ spec_file.append('#L ome')
3758
+ if scan_type == 'ts1':
3759
+ #image_sets.append(detector.data.nxdata[n])
3760
+ image_sets.append(detector.data[n])
3761
+ else:
3762
+ #image_sets.append(detector.data.nxdata)
3763
+ image_sets.append(detector.data)
3764
+ par_file.append(
3765
+ f'{datetime.now().strftime("%Y%m%d")} '
3766
+ f'{datetime.now().strftime("%H%M%S")} '
3767
+ f'{scan_number} '
3768
+ # '2.0 '
3769
+ # '1.0 '
3770
+ f'{starting_image_index} '
3771
+ f'{starting_image_index+starting_image_offset} '
3772
+ '0.0 '
3773
+ f'{z_translation} '
3774
+ f'{thetas[0]} '
3775
+ f'{thetas[-1]} '
3776
+ f'{num_theta} '
3777
+ f'{count_time} '
3778
+ f'{scan_type}')
3779
+ else:
3780
+ spec_file.append(f'#P0 0.0 {z_translation} 0.0')
3781
+ spec_file.append('#N 1')
3782
+ spec_file.append('#L theta')
3783
+ spec_file += [str(theta) for theta in thetas]
3784
+ # Add the h5 file to output
3785
+ prefix = str(detector.local_name).upper()
3786
+ field_name = f'{field_type}_{scan_number:03d}'
3787
+ nxentry[field_name] = nxroot.entry
3788
+ nxentry[field_name].attrs['schema'] = 'h5'
3789
+ nxentry[field_name].attrs['filename'] = \
3790
+ f'{sample_type}_{prefix}_{scan_number:03d}.h5'
3791
+ starting_image_indices.append(starting_image_index)
3792
+ spec_file.append('')
3793
+ num_scan += 1
3794
+
3795
+ if station in ('id1a3', 'id3a'):
3796
+
3797
+ spec_filename = 'spec.log'
3798
+
3799
+ # Add the JSON file to output
3800
+ parfile_header = {
3801
+ '0': 'date',
3802
+ '1': 'time',
3803
+ '2': 'SCAN_N',
3804
+ # '3': 'beam_width',
3805
+ # '4': 'beam_height',
3806
+ '3': 'junkstart',
3807
+ '4': 'goodstart',
3808
+ '5': 'ramsx',
3809
+ '6': 'ramsz',
3810
+ '7': 'ome_start_real',
3811
+ '8': 'ome_end_real',
3812
+ '9': 'nframes_real',
3813
+ '10': 'count_time',
3814
+ '11': 'tomotype',
3815
+ }
3816
+ nxentry.json = NXsubentry()
3817
+ nxentry.json.data = dumps(parfile_header)
3818
+ nxentry.json.attrs['schema'] = 'json'
3819
+ nxentry.json.attrs['filename'] = \
3820
+ f'{station}-tomo_sim-{sample_type}.json'
3821
+
3822
+ # Add the par file to output
3823
+ nxentry.par = NXsubentry()
3824
+ nxentry.par.data = par_file
3825
+ nxentry.par.attrs['schema'] = 'txt'
3826
+ nxentry.par.attrs['filename'] = \
3827
+ f'{station}-tomo_sim-{sample_type}.par'
3828
+
3829
+ # Add image files as individual tiffs to output
3830
+ for scan_number, image_set, starting_image_index in zip(
3831
+ self.scan_numbers, image_sets, starting_image_indices):
3832
+ nxentry[f'{scan_number}'] = NXsubentry()
3833
+ nxsubentry = NXsubentry()
3834
+ nxentry[f'{scan_number}']['nf'] = nxsubentry
3835
+ for n in range(image_set.shape[0]):
3836
+ nxsubentry[f'tiff_{n}'] = NXsubentry()
3837
+ nxsubentry[f'tiff_{n}'].data = image_set[n]
3838
+ nxsubentry[f'tiff_{n}'].attrs['schema'] = 'tif'
3839
+ nxsubentry[f'tiff_{n}'].attrs['filename'] = \
3840
+ f'nf_{(n+starting_image_index):06d}.tif'
3841
+ else:
3842
+
3843
+ spec_filename = sample_type
3844
+
3845
+ # Add spec file to output
3846
+ nxentry.spec = NXsubentry()
3847
+ nxentry.spec.data = spec_file
3848
+ nxentry.spec.attrs['schema'] = 'txt'
3849
+ nxentry.spec.attrs['filename'] = spec_filename
3850
+
3851
+ nxroot = NXroot()
3852
+ nxroot[sample_type] = nxentry
3853
+ nxroot[sample_type].set_default()
3854
+
3855
+ return nxroot
3856
+
3857
+
3858
+ if __name__ == '__main__':
3859
+ # Local modules
3860
+ from CHAP.processor import main
3861
+
3862
+ main()