ChessAnalysisPipeline 0.0.2__py3-none-any.whl → 0.0.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

Files changed (47) hide show
  1. CHAP/__init__.py +3 -0
  2. CHAP/common/__init__.py +19 -0
  3. CHAP/common/models/__init__.py +2 -0
  4. CHAP/common/models/integration.py +515 -0
  5. CHAP/common/models/map.py +535 -0
  6. CHAP/common/processor.py +644 -0
  7. CHAP/common/reader.py +119 -0
  8. CHAP/common/utils/__init__.py +37 -0
  9. CHAP/common/utils/fit.py +2613 -0
  10. CHAP/common/utils/general.py +1225 -0
  11. CHAP/common/utils/material.py +231 -0
  12. CHAP/common/utils/scanparsers.py +785 -0
  13. CHAP/common/writer.py +96 -0
  14. CHAP/edd/__init__.py +7 -0
  15. CHAP/edd/models.py +215 -0
  16. CHAP/edd/processor.py +321 -0
  17. CHAP/edd/reader.py +5 -0
  18. CHAP/edd/writer.py +5 -0
  19. CHAP/inference/__init__.py +3 -0
  20. CHAP/inference/processor.py +68 -0
  21. CHAP/inference/reader.py +5 -0
  22. CHAP/inference/writer.py +5 -0
  23. CHAP/pipeline.py +1 -1
  24. CHAP/processor.py +11 -818
  25. CHAP/reader.py +18 -113
  26. CHAP/saxswaxs/__init__.py +6 -0
  27. CHAP/saxswaxs/processor.py +5 -0
  28. CHAP/saxswaxs/reader.py +5 -0
  29. CHAP/saxswaxs/writer.py +5 -0
  30. CHAP/sin2psi/__init__.py +7 -0
  31. CHAP/sin2psi/processor.py +5 -0
  32. CHAP/sin2psi/reader.py +5 -0
  33. CHAP/sin2psi/writer.py +5 -0
  34. CHAP/tomo/__init__.py +5 -0
  35. CHAP/tomo/models.py +125 -0
  36. CHAP/tomo/processor.py +2009 -0
  37. CHAP/tomo/reader.py +5 -0
  38. CHAP/tomo/writer.py +5 -0
  39. CHAP/writer.py +17 -167
  40. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/METADATA +1 -1
  41. ChessAnalysisPipeline-0.0.4.dist-info/RECORD +50 -0
  42. CHAP/async.py +0 -56
  43. ChessAnalysisPipeline-0.0.2.dist-info/RECORD +0 -17
  44. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/LICENSE +0 -0
  45. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/WHEEL +0 -0
  46. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/entry_points.txt +0 -0
  47. {ChessAnalysisPipeline-0.0.2.dist-info → ChessAnalysisPipeline-0.0.4.dist-info}/top_level.txt +0 -0
CHAP/common/writer.py ADDED
@@ -0,0 +1,96 @@
1
+ #!/usr/bin/env python
2
+ '''
3
+ File : writer.py
4
+ Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
5
+ Description: Module for Writers used in multiple experiment-specific workflows.
6
+ '''
7
+
8
+ # system modules
9
+ import argparse
10
+ import json
11
+ import logging
12
+ import os
13
+ import sys
14
+
15
+ # local modules
16
+ from CHAP import Writer
17
+
18
+ class ExtractArchiveWriter(Writer):
19
+ def _write(self, data, filename):
20
+ '''Take a .tar archive represented as bytes in `data` and write the
21
+ extracted archive to files.
22
+
23
+ :param data: the archive data
24
+ :type data: bytes
25
+ :param filename: the name of a directory to which the archive files will
26
+ be written
27
+ :type filename: str
28
+ :return: the original `data`
29
+ :rtype: bytes
30
+ '''
31
+
32
+ from io import BytesIO
33
+ import tarfile
34
+
35
+ tar = tarfile.open(fileobj=BytesIO(data))
36
+ tar.extractall(path=filename)
37
+
38
+ return(data)
39
+
40
+ class NexusWriter(Writer):
41
+ def _write(self, data, filename, force_overwrite=False):
42
+ '''Write `data` to a NeXus file
43
+
44
+ :param data: the data to write to `filename`.
45
+ :type data: nexusformat.nexus.NXobject
46
+ :param filename: name of the file to write to.
47
+ :param force_overwrite: flag to allow data in `filename` to be
48
+ overwritten, if it already exists.
49
+ :return: the original input data
50
+ '''
51
+
52
+ from nexusformat.nexus import NXobject
53
+
54
+ if not isinstance(data, NXobject):
55
+ raise(TypeError(f'Cannot write object of type {type(data).__name__} to a NeXus file.'))
56
+
57
+ mode = 'w' if force_overwrite else 'w-'
58
+ data.save(filename, mode=mode)
59
+
60
+ return(data)
61
+
62
+ class YAMLWriter(Writer):
63
+ def _write(self, data, filename, force_overwrite=False):
64
+ '''If `data` is a `dict`, write it to `filename`.
65
+
66
+ :param data: the dictionary to write to `filename`.
67
+ :type data: dict
68
+ :param filename: name of the file to write to.
69
+ :type filename: str
70
+ :param force_overwrite: flag to allow data in `filename` to be
71
+ overwritten if it already exists.
72
+ :type force_overwrite: bool
73
+ :raises TypeError: if `data` is not a `dict`
74
+ :raises RuntimeError: if `filename` already exists and
75
+ `force_overwrite` is `False`.
76
+ :return: the original input data
77
+ :rtype: dict
78
+ '''
79
+
80
+ import yaml
81
+
82
+ if not isinstance(data, (dict, list)):
83
+ raise(TypeError(f'{self.__name__}.write: input data must be a dict or list.'))
84
+
85
+ if not force_overwrite:
86
+ if os.path.isfile(filename):
87
+ raise(RuntimeError(f'{self.__name__}: {filename} already exists.'))
88
+
89
+ with open(filename, 'w') as outf:
90
+ yaml.dump(data, outf, sort_keys=False)
91
+
92
+ return(data)
93
+
94
+ if __name__ == '__main__':
95
+ from CHAP.writer import main
96
+ main()
CHAP/edd/__init__.py ADDED
@@ -0,0 +1,7 @@
1
+ # from CHAP.edd.reader import
2
+ from CHAP.edd.processor import (MCACeriaCalibrationProcessor,
3
+ MCADataProcessor)
4
+ # from CHAP.edd.writer import
5
+
6
+ from CHAP.common import (MapProcessor,
7
+ StrainAnalysisProcessor)
CHAP/edd/models.py ADDED
@@ -0,0 +1,215 @@
1
+ import numpy as np
2
+ from pathlib import PosixPath
3
+ from pydantic import (BaseModel,
4
+ confloat,
5
+ conint,
6
+ conlist,
7
+ constr,
8
+ FilePath,
9
+ validator)
10
+ from scipy.interpolate import interp1d
11
+ from typing import Optional
12
+
13
+
14
+ class MCACeriaCalibrationConfig(BaseModel):
15
+ '''Class representing metadata required to perform a Ceria calibration for an
16
+ MCA detector.
17
+
18
+ :ivar spec_file: Path to the SPEC file containing the CeO2 scan
19
+ :ivar scan_number: Number of the CeO2 scan in `spec_file`
20
+ :ivar scan_step_index: Index of the scan step to use for calibration,
21
+ optional. If not specified, the calibration routine will be performed on
22
+ the average of all MCA spectra for the scan.
23
+
24
+ :ivar flux_file: csv file containing station beam energy in eV (column 0)
25
+ and flux (column 1)
26
+
27
+ :ivar detector_name: name of the MCA to calibrate
28
+ :ivar num_bins: number of channels on the MCA to calibrate
29
+ :ivar max_energy_kev: maximum channel energy of the MCA in keV
30
+
31
+ :ivar hexrd_h5_material_file: path to a HEXRD materials.h5 file containing an
32
+ entry for the material properties.
33
+ :ivar hexrd_h5_material_name: Name of the material entry in
34
+ `hexrd_h5_material_file`, defaults to `'CeO2'`.
35
+ :ivar lattice_parameter_angstrom: lattice spacing in angstrom to use for
36
+ the cubic CeO2 crystal, defaults to `5.41153`.
37
+
38
+ :ivar tth_max: detector rotation about hutch x axis, defaults to `90`.
39
+ :ivar hkl_tth_tol: minimum resolvable difference in 2&theta between two
40
+ unique HKL peaks, defaults to `0.15`.
41
+
42
+ :ivar fit_include_bin_ranges: list of MCA channel index ranges whose data
43
+ will be included in the calibration routine
44
+ :ivar fit_hkls: list of unique HKL indices to fit peaks for in the
45
+ calibration routine
46
+
47
+ :ivar tth_initial_guess: initial guess for 2&theta
48
+ :ivar slope_initial_guess: initial guess for detector channel energy
49
+ correction linear slope, defaults to `1.0`.
50
+ :ivar intercept_initial_guess: initial guess for detector channel energy
51
+ correction y-intercept, defaults to `0.0`.
52
+
53
+ :ivar tth_calibrated: calibrated value for 2&theta, defaults to None
54
+ :ivar slope_calibrated: calibrated value for detector channel energy
55
+ correction linear slope, defaults to `None`
56
+ :ivar intercept_calibrated: calibrated value for detector channel energy
57
+ correction y-intercept, defaluts to None
58
+
59
+ :ivar max_iter: maximum number of iterations of the calibration routine,
60
+ defaults to `10`.
61
+ :ivar tune_tth_tol: stop iteratively tuning 2&theta when an iteration
62
+ produces a change in the tuned value of 2&theta that is smaller than this
63
+ value, defaults to `1e-8`.
64
+ '''
65
+
66
+ spec_file: FilePath
67
+ scan_number: conint(gt=0)
68
+ scan_step_index: Optional[conint(ge=0)]
69
+
70
+ flux_file: FilePath
71
+
72
+ detector_name: constr(strip_whitespace=True, min_length=1)
73
+ num_bins: conint(gt=0)
74
+ max_energy_kev: confloat(gt=0)
75
+
76
+ hexrd_h5_material_file: FilePath
77
+ hexrd_h5_material_name: constr(strip_whitespace=True, min_length=1) = 'CeO2'
78
+ lattice_parameter_angstrom: confloat(gt=0) = 5.41153
79
+
80
+ tth_max: confloat(gt=0, allow_inf_nan=False) = 90.0
81
+ hkl_tth_tol: confloat(gt=0, allow_inf_nan=False) = 0.15
82
+
83
+ fit_include_bin_ranges: conlist(min_items=1,
84
+ item_type=conlist(item_type=conint(ge=0),
85
+ min_items=2,
86
+ max_items=2))
87
+ fit_hkls: conlist(item_type=conint(ge=0), min_items=1)
88
+
89
+ tth_initial_guess: confloat(gt=0, le=tth_max, allow_inf_nan=False)
90
+ slope_initial_guess: float = 1.0
91
+ intercept_initial_guess: float = 0.0
92
+ tth_calibrated: Optional[confloat(gt=0, allow_inf_nan=False)]
93
+ slope_calibrated: Optional[confloat(allow_inf_nan=False)]
94
+ intercept_calibrated: Optional[confloat(allow_inf_nan=False)]
95
+
96
+ max_iter: conint(gt=0) = 10
97
+ tune_tth_tol: confloat(ge=0) = 1e-8
98
+
99
+ @validator('fit_include_bin_ranges', each_item=True)
100
+ def validate_include_bin_range(cls, value, values):
101
+ '''Ensure no bin ranges are outside the boundary of the detector'''
102
+
103
+ num_bins = values.get('num_bins')
104
+ value[1] = min(value[1], num_bins)
105
+ return(value)
106
+
107
+ def mca_data(self):
108
+ '''Get the 1D array of MCA data to use for calibration.
109
+
110
+ :return: MCA data
111
+ :rtype: np.ndarray
112
+ '''
113
+
114
+ from CHAP.common.utils.scanparsers import SMBMCAScanParser as ScanParser
115
+ scanparser = ScanParser(self.spec_file, self.scan_number)
116
+ if self.scan_step_index is None:
117
+ data = scanparser.get_all_detector_data(self.detector_name)
118
+ if scanparser.spec_scan_npts > 1:
119
+ data = np.average(data, axis=1)
120
+ else:
121
+ data = data[0]
122
+ else:
123
+ data = scanparser.get_detector_data(self.detector_name, self.scan_step_index)
124
+
125
+ return(np.array(data))
126
+
127
+ def mca_mask(self):
128
+ '''Get a boolean mask array to use on MCA data before fitting.
129
+
130
+ :return: boolean mask array
131
+ :rtype: numpy.ndarray
132
+ '''
133
+
134
+ mask = np.asarray([False]*self.num_bins)
135
+ bin_indices = np.arange(self.num_bins)
136
+ for min_, max_ in self.fit_include_bin_ranges:
137
+ _mask = np.logical_and(bin_indices > min_, bin_indices < max_)
138
+ mask = np.logical_or(mask, _mask)
139
+
140
+ return(mask)
141
+
142
+ def flux_correction_interpolation_function(self):
143
+ '''Get an interpolation function to correct MCA data for relative energy
144
+ flux of the incident beam.
145
+
146
+ :return: energy flux correction interpolation function
147
+ :rtype: scipy.interpolate._polyint._Interpolator1D
148
+ '''
149
+
150
+ flux = np.loadtxt(self.flux_file)
151
+ energies = flux[:,0]/1.e3
152
+ relative_intensities = flux[:,1]/np.max(flux[:,1])
153
+ interpolation_function = interp1d(energies, relative_intensities)
154
+ return(interpolation_function)
155
+
156
+ def material(self):
157
+ '''Get CeO2 as a `CHAP.common.utils.Material` object.
158
+
159
+ :return: CeO2 material
160
+ :rtype: CHAP.common.utils.Material
161
+ '''
162
+
163
+ from CHAP.common.utils import Material
164
+ material = Material(material_name=self.hexrd_h5_material_name,
165
+ material_file=self.hexrd_h5_material_file,
166
+ lattice_parameters_angstroms=self.lattice_parameter_angstrom)
167
+ # The following kwargs will be needed if we allow the material to be
168
+ # built using xrayutilities (for now, we only allow hexrd to make the
169
+ # material):
170
+ # sgnum=225,
171
+ # atoms=['Ce4p', 'O2mdot'],
172
+ # pos=[(0.,0.,0.), (0.25,0.75,0.75)],
173
+ # enrgy=50000.) # Why do we need to specify an energy to get HKLs when using xrayutilities?
174
+ return(material)
175
+
176
+ def unique_ds(self):
177
+ '''Get a list of unique HKLs and their lattice spacings
178
+
179
+ :return: unique HKLs and their lattice spacings in angstroms
180
+ :rtype: np.ndarray, np.ndarray
181
+ '''
182
+
183
+ unique_hkls, unique_ds = self.material().get_unique_ds(tth_tol=self.hkl_tth_tol, tth_max=self.tth_max)
184
+
185
+ return(unique_hkls, unique_ds)
186
+
187
+ def fit_ds(self):
188
+ '''Get a list of HKLs and their lattice spacings that will be fit in the
189
+ calibration routine
190
+
191
+ :return: HKLs to fit and their lattice spacings in angstroms
192
+ :rtype: np.ndarray, np.ndarray
193
+ '''
194
+
195
+ unique_hkls, unique_ds = self.unique_ds()
196
+
197
+ fit_hkls = np.array([unique_hkls[i] for i in self.fit_hkls])
198
+ fit_ds = np.array([unique_ds[i] for i in self.fit_hkls])
199
+
200
+ return(fit_hkls, fit_ds)
201
+
202
+ def dict(self):
203
+ '''Return a representation of this configuration in a dictionary that is
204
+ suitable for dumping to a YAML file (one that converts all instances of
205
+ fields with type `PosixPath` to `str`).
206
+
207
+ :return: dictionary representation of the configuration.
208
+ :rtype: dict
209
+ '''
210
+
211
+ d = super().dict()
212
+ for k,v in d.items():
213
+ if isinstance(v, PosixPath):
214
+ d[k] = str(v)
215
+ return(d)
CHAP/edd/processor.py ADDED
@@ -0,0 +1,321 @@
1
+ #!/usr/bin/env python
2
+ #-*- coding: utf-8 -*-
3
+ #pylint: disable=
4
+ '''
5
+ File : processor.py
6
+ Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
7
+ Description: Module for Processors used only by EDD experiments
8
+ '''
9
+
10
+ # system modules
11
+ import json
12
+
13
+ # local modules
14
+ from CHAP.processor import Processor
15
+ from CHAP.common import StrainAnalysisProcessor
16
+
17
+ class MCACeriaCalibrationProcessor(Processor):
18
+ '''A Processor using a CeO2 scan to obtain tuned values for the bragg
19
+ diffraction angle and linear correction parameters for MCA channel energies
20
+ for an EDD experimental setup.
21
+ '''
22
+
23
+ def _process(self, data):
24
+ '''Return tuned values for 2&theta and linear correction parameters for
25
+ the MCA channel energies.
26
+
27
+ :param data: input configuration for the raw data & tuning procedure
28
+ :type data: list[dict[str,object]]
29
+ :return: original configuration dictionary with tuned values added
30
+ :rtype: dict[str,float]
31
+ '''
32
+
33
+ calibration_config = self.get_config(data)
34
+
35
+ tth, slope, intercept = self.calibrate(calibration_config)
36
+
37
+ calibration_config.tth_calibrated = tth
38
+ calibration_config.slope_calibrated = slope
39
+ calibration_config.intercept_calibrated = intercept
40
+
41
+ return(calibration_config.dict())
42
+
43
+ def get_config(self, data):
44
+ '''Get an instance of the configuration object needed by this
45
+ `Processor` from a returned value of `Reader.read`
46
+
47
+ :param data: Result of `Reader.read` where at least one item has the
48
+ value `'MCACeriaCalibrationConfig'` for the `'schema'` key.
49
+ :type data: list[dict[str,object]]
50
+ :raises Exception: If a valid config object cannot be constructed from
51
+ `data`.
52
+ :return: a valid instance of a configuration object with field values
53
+ taken from `data`.
54
+ :rtype: MCACeriaCalibrationConfig
55
+ '''
56
+
57
+ from CHAP.edd.models import MCACeriaCalibrationConfig
58
+
59
+ calibration_config = False
60
+ if isinstance(data, list):
61
+ for item in data:
62
+ if isinstance(item, dict):
63
+ if item.get('schema') == 'MCACeriaCalibrationConfig':
64
+ calibration_config = item.get('data')
65
+ break
66
+
67
+ if not calibration_config:
68
+ raise(ValueError('No MCA ceria calibration configuration found in input data'))
69
+
70
+ return(MCACeriaCalibrationConfig(**calibration_config))
71
+
72
+ def calibrate(self, calibration_config):
73
+ '''Iteratively calibrate 2&theta by fitting selected peaks of an MCA
74
+ spectrum until the computed strain is sufficiently small. Use the fitted
75
+ peak locations to determine linear correction parameters for the MCA's
76
+ channel energies.
77
+
78
+ :param calibration_config: object configuring the CeO2 calibration
79
+ procedure
80
+ :type calibration_config: MCACeriaCalibrationConfig
81
+ :return: calibrated values of 2&theta and linear correction parameters
82
+ for MCA channel energies : tth, slope, intercept
83
+ :rtype: float, float, float
84
+ '''
85
+
86
+ from CHAP.common.utils import Fit, FitMultipeak
87
+ import numpy as np
88
+ from scipy.constants import physical_constants
89
+
90
+ hc = (physical_constants['Planck constant in eV/Hz'][0]
91
+ * physical_constants['speed of light in vacuum'][0]
92
+ * 1e7) # We'll work in keV and A, not eV and m.
93
+
94
+ # Collect raw MCA data of interest
95
+ mca_data = calibration_config.mca_data()
96
+ mca_bin_energies = (np.arange(0, calibration_config.num_bins)
97
+ * (calibration_config.max_energy_kev
98
+ / calibration_config.num_bins))
99
+
100
+ # Mask out the corrected MCA data for fitting
101
+ mca_mask = calibration_config.mca_mask()
102
+ fit_mca_energies = mca_bin_energies[mca_mask]
103
+ fit_mca_intensities = mca_data[mca_mask]
104
+
105
+ # Correct raw MCA data for variable flux at different energies
106
+ flux_correct = calibration_config.flux_correction_interpolation_function()
107
+ mca_intensity_weights = flux_correct(fit_mca_energies)
108
+ fit_mca_intensities = fit_mca_intensities / mca_intensity_weights
109
+
110
+ # Get the HKLs and lattice spacings that will be used for fitting
111
+ tth = calibration_config.tth_initial_guess
112
+ fit_hkls, fit_ds = calibration_config.fit_ds()
113
+ c_1 = fit_hkls[:,0]**2 + fit_hkls[:,1]**2 + fit_hkls[:,2]**2
114
+
115
+ for iter_i in range(calibration_config.max_iter):
116
+
117
+ ### Perform the uniform fit first ###
118
+
119
+ # Get expected peak energy locations for this iteration's starting
120
+ # value of tth
121
+ fit_lambda = 2.0 * fit_ds * np.sin(0.5*np.radians(tth))
122
+ fit_E0 = hc / fit_lambda
123
+
124
+ # Run the uniform fit
125
+ best_fit, residual, best_values, best_errors, redchi, success = \
126
+ FitMultipeak.fit_multipeak(
127
+ fit_mca_intensities,
128
+ fit_E0,
129
+ x=fit_mca_energies,
130
+ fit_type='uniform')
131
+
132
+ # Extract values of interest from the best values for the uniform fit
133
+ # parameters
134
+ uniform_fit_centers = [best_values[f'peak{i+1}_center'] for i in range(len(calibration_config.fit_hkls))]
135
+ # uniform_a = best_values['scale_factor']
136
+ # uniform_strain = np.log(
137
+ # (uniform_a
138
+ # / calibration_config.lattice_parameter_angstrom))
139
+ # uniform_tth = tth * (1.0 + uniform_strain)
140
+ # uniform_rel_rms_error = (np.linalg.norm(residual)
141
+ # / np.linalg.norm(fit_mca_intensities))
142
+
143
+ ### Next, perform the unconstrained fit ###
144
+
145
+ # Use the peak locations found in the uniform fit as the initial
146
+ # guesses for peak locations in the unconstrained fit
147
+ best_fit, residual, best_values, best_errors, redchi, success = \
148
+ FitMultipeak.fit_multipeak(
149
+ fit_mca_intensities,
150
+ uniform_fit_centers,
151
+ x=fit_mca_energies,
152
+ fit_type='unconstrained')
153
+
154
+ # Extract values of interest from the best values for the
155
+ # unconstrained fit parameters
156
+ unconstrained_fit_centers = np.array(
157
+ [best_values[f'peak{i+1}_center'] for i in range(len(calibration_config.fit_hkls))])
158
+ unconstrained_a = (0.5 * hc * np.sqrt(c_1)
159
+ / (unconstrained_fit_centers
160
+ * abs(np.sin(0.5*np.radians(tth)))))
161
+ unconstrained_strains = np.log(
162
+ (unconstrained_a
163
+ / calibration_config.lattice_parameter_angstrom))
164
+ unconstrained_strain = np.mean(unconstrained_strains)
165
+ unconstrained_tth = tth * (1.0 + unconstrained_strain)
166
+ unconstrained_rel_rms_error = (np.linalg.norm(residual)
167
+ / np.linalg.norm(fit_mca_intensities))
168
+
169
+
170
+ # Update tth for the next iteration of tuning
171
+ prev_tth = tth
172
+ tth = unconstrained_tth
173
+
174
+ # Stop tuning tth at this iteration if differences are small enough
175
+ if abs(tth - prev_tth) < calibration_config.tune_tth_tol:
176
+ break
177
+
178
+ # Fit line to expected / computed peak locations from the last
179
+ # unconstrained fit.
180
+ fit = Fit.fit_data(
181
+ fit_E0,
182
+ 'linear',
183
+ x=unconstrained_fit_centers,
184
+ nan_policy='omit')
185
+ slope = fit.best_values['slope']
186
+ intercept = fit.best_values['intercept']
187
+
188
+ return(float(tth), float(slope), float(intercept))
189
+
190
+ class MCADataProcessor(Processor):
191
+ '''A Processor to return data from an MCA, restuctured to incorporate the
192
+ shape & metadata associated with a map configuration to which the MCA data
193
+ belongs, and linearly transformed according to the results of a ceria
194
+ calibration.
195
+ '''
196
+
197
+ def _process(self, data):
198
+ '''Process configurations for a map and MCA detector(s), and return the
199
+ calibrated MCA data collected over the map.
200
+
201
+ :param data: input map configuration and results of ceria calibration
202
+ :type data: list[dict[str,object]]
203
+ :return: calibrated and flux-corrected MCA data
204
+ :rtype: nexusformat.nexus.NXentry
205
+ '''
206
+
207
+ map_config, calibration_config = self.get_configs(data)
208
+ nxroot = self.get_nxroot(map_config, calibration_config)
209
+
210
+ return(nxroot)
211
+
212
+ def get_configs(self, data):
213
+ '''Get instances of the configuration objects needed by this
214
+ `Processor` from a returned value of `Reader.read`
215
+
216
+ :param data: Result of `Reader.read` where at least one item has the
217
+ value `'MapConfig'` for the `'schema'` key, and at least one item has
218
+ the value `'MCACeriaCalibrationConfig'` for the `'schema'` key.
219
+ :type data: list[dict[str,object]]
220
+ :raises Exception: If valid config objects cannot be constructed from
221
+ `data`.
222
+ :return: valid instances of the configuration objects with field values
223
+ taken from `data`.
224
+ :rtype: tuple[MapConfig, MCACeriaCalibrationConfig]
225
+ '''
226
+
227
+ from CHAP.common.models import MapConfig
228
+ from CHAP.edd.models import MCACeriaCalibrationConfig
229
+
230
+ map_config = False
231
+ calibration_config = False
232
+ if isinstance(data, list):
233
+ for item in data:
234
+ if isinstance(item, dict):
235
+ schema = item.get('schema')
236
+ if schema == 'MapConfig':
237
+ map_config = item.get('data')
238
+ elif schema == 'MCACeriaCalibrationConfig':
239
+ calibration_config = item.get('data')
240
+
241
+ if not map_config:
242
+ raise(ValueError('No map configuration found in input data'))
243
+ if not calibration_config:
244
+ raise(ValueError('No MCA ceria calibration configuration found in input data'))
245
+
246
+ return(MapConfig(**map_config), MCACeriaCalibrationConfig(**calibration_config))
247
+
248
+ def get_nxroot(self, map_config, calibration_config):
249
+ '''Get a map of the MCA data collected by the scans in `map_config`. The
250
+ MCA data will be calibrated and flux-corrected according to the
251
+ parameters included in `calibration_config`. The data will be returned
252
+ along with relevant metadata in the form of a NeXus structure.
253
+
254
+ :param map_config: the map configuration
255
+ :type map_config: MapConfig
256
+ :param calibration_config: the calibration configuration
257
+ :type calibration_config: MCACeriaCalibrationConfig
258
+ :return: a map of the calibrated and flux-corrected MCA data
259
+ :rtype: nexusformat.nexus.NXroot
260
+ '''
261
+
262
+ from CHAP.common import MapProcessor
263
+
264
+ from nexusformat.nexus import (NXdata,
265
+ NXdetector,
266
+ NXentry,
267
+ NXinstrument,
268
+ NXroot)
269
+ import numpy as np
270
+
271
+ nxroot = NXroot()
272
+
273
+ nxroot[map_config.title] = MapProcessor.get_nxentry(map_config)
274
+ nxentry = nxroot[map_config.title]
275
+
276
+ nxentry.instrument = NXinstrument()
277
+ nxentry.instrument.detector = NXdetector()
278
+ nxentry.instrument.detector.calibration_configuration = json.dumps(calibration_config.dict())
279
+
280
+ nxentry.instrument.detector.data = NXdata()
281
+ nxdata = nxentry.instrument.detector.data
282
+ nxdata.raw = np.empty((*map_config.shape, calibration_config.num_bins))
283
+ nxdata.raw.attrs['units'] = 'counts'
284
+ nxdata.channel_energy = (calibration_config.slope_calibrated
285
+ * np.arange(0, calibration_config.num_bins)
286
+ * (calibration_config.max_energy_kev
287
+ / calibration_config.num_bins)
288
+ + calibration_config.intercept_calibrated)
289
+ nxdata.channel_energy.attrs['units'] = 'keV'
290
+
291
+ for scans in map_config.spec_scans:
292
+ for scan_number in scans.scan_numbers:
293
+ scanparser = scans.get_scanparser(scan_number)
294
+ for scan_step_index in range(scanparser.spec_scan_npts):
295
+ map_index = scans.get_index(
296
+ scan_number,
297
+ scan_step_index,
298
+ map_config)
299
+ nxdata.raw[map_index] = scanparser.get_detector_data(
300
+ calibration_config.detector_name,
301
+ scan_step_index)
302
+
303
+ nxentry.data.makelink(
304
+ nxdata.raw,
305
+ name=calibration_config.detector_name)
306
+ nxentry.data.makelink(
307
+ nxdata.channel_energy,
308
+ name=f'{calibration_config.detector_name}_channel_energy')
309
+ if isinstance(nxentry.data.attrs['axes'], str):
310
+ nxentry.data.attrs['axes'] = [
311
+ nxentry.data.attrs['axes'],
312
+ f'{calibration_config.detector_name}_channel_energy']
313
+ else:
314
+ nxentry.data.attrs['axes'] += [f'{calibration_config.detector_name}_channel_energy']
315
+ nxentry.data.attrs['signal'] = calibration_config.detector_name
316
+
317
+ return(nxroot)
318
+
319
+ if __name__ == '__main__':
320
+ from CHAP.processor import main
321
+ main()
CHAP/edd/reader.py ADDED
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env python
2
+
3
+ if __name__ == '__main__':
4
+ from CHAP.reader import main
5
+ main()
CHAP/edd/writer.py ADDED
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env python
2
+
3
+ if __name__ == '__main__':
4
+ from CHAP.writer import main
5
+ main()
@@ -0,0 +1,3 @@
1
+ # from CHAP.inference.reader import
2
+ from CHAP.inference.processor import TFaaSImageProcessor
3
+ # from CHAP.inference.writer import