ChessAnalysisPipeline 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

Files changed (43) hide show
  1. CHAP/TaskManager.py +214 -0
  2. CHAP/common/models/__init__.py +0 -2
  3. CHAP/common/models/integration.py +392 -249
  4. CHAP/common/models/map.py +350 -198
  5. CHAP/common/processor.py +229 -191
  6. CHAP/common/reader.py +52 -39
  7. CHAP/common/utils/__init__.py +0 -37
  8. CHAP/common/utils/fit.py +1197 -991
  9. CHAP/common/utils/general.py +629 -372
  10. CHAP/common/utils/material.py +158 -121
  11. CHAP/common/utils/scanparsers.py +735 -339
  12. CHAP/common/writer.py +31 -25
  13. CHAP/edd/models.py +65 -51
  14. CHAP/edd/processor.py +136 -113
  15. CHAP/edd/reader.py +1 -1
  16. CHAP/edd/writer.py +1 -1
  17. CHAP/inference/processor.py +35 -28
  18. CHAP/inference/reader.py +1 -1
  19. CHAP/inference/writer.py +1 -1
  20. CHAP/pipeline.py +14 -28
  21. CHAP/processor.py +44 -75
  22. CHAP/reader.py +49 -40
  23. CHAP/runner.py +73 -32
  24. CHAP/saxswaxs/processor.py +1 -1
  25. CHAP/saxswaxs/reader.py +1 -1
  26. CHAP/saxswaxs/writer.py +1 -1
  27. CHAP/server.py +130 -0
  28. CHAP/sin2psi/processor.py +1 -1
  29. CHAP/sin2psi/reader.py +1 -1
  30. CHAP/sin2psi/writer.py +1 -1
  31. CHAP/tomo/__init__.py +1 -4
  32. CHAP/tomo/models.py +53 -31
  33. CHAP/tomo/processor.py +1326 -902
  34. CHAP/tomo/reader.py +4 -2
  35. CHAP/tomo/writer.py +4 -2
  36. CHAP/writer.py +47 -41
  37. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/METADATA +1 -1
  38. ChessAnalysisPipeline-0.0.6.dist-info/RECORD +52 -0
  39. ChessAnalysisPipeline-0.0.4.dist-info/RECORD +0 -50
  40. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/LICENSE +0 -0
  41. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/WHEEL +0 -0
  42. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/entry_points.txt +0 -0
  43. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/top_level.txt +0 -0
CHAP/common/writer.py CHANGED
@@ -1,67 +1,70 @@
1
1
  #!/usr/bin/env python
2
- '''
2
+ """
3
3
  File : writer.py
4
4
  Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
5
5
  Description: Module for Writers used in multiple experiment-specific workflows.
6
- '''
6
+ """
7
7
 
8
8
  # system modules
9
- import argparse
10
- import json
11
- import logging
12
9
  import os
13
- import sys
14
10
 
15
11
  # local modules
16
12
  from CHAP import Writer
17
13
 
14
+
18
15
  class ExtractArchiveWriter(Writer):
16
+ """Writer for tar files from binary data"""
19
17
  def _write(self, data, filename):
20
- '''Take a .tar archive represented as bytes in `data` and write the
21
- extracted archive to files.
18
+ """Take a .tar archive represented as bytes in `data` and
19
+ write the extracted archive to files.
22
20
 
23
21
  :param data: the archive data
24
22
  :type data: bytes
25
- :param filename: the name of a directory to which the archive files will
26
- be written
23
+ :param filename: the name of a directory to which the archive
24
+ files will be written
27
25
  :type filename: str
28
26
  :return: the original `data`
29
27
  :rtype: bytes
30
- '''
28
+ """
31
29
 
32
30
  from io import BytesIO
33
31
  import tarfile
34
32
 
35
- tar = tarfile.open(fileobj=BytesIO(data))
36
- tar.extractall(path=filename)
33
+ with tarfile.open(fileobj=BytesIO(data)) as tar:
34
+ tar.extractall(path=filename)
35
+
36
+ return data
37
37
 
38
- return(data)
39
38
 
40
39
  class NexusWriter(Writer):
40
+ """Writer for NeXus files from `NXobject`-s"""
41
41
  def _write(self, data, filename, force_overwrite=False):
42
- '''Write `data` to a NeXus file
42
+ """Write `data` to a NeXus file
43
43
 
44
44
  :param data: the data to write to `filename`.
45
45
  :type data: nexusformat.nexus.NXobject
46
46
  :param filename: name of the file to write to.
47
47
  :param force_overwrite: flag to allow data in `filename` to be
48
- overwritten, if it already exists.
48
+ overwritten, if it already exists.
49
49
  :return: the original input data
50
- '''
50
+ """
51
51
 
52
52
  from nexusformat.nexus import NXobject
53
-
53
+
54
54
  if not isinstance(data, NXobject):
55
- raise(TypeError(f'Cannot write object of type {type(data).__name__} to a NeXus file.'))
55
+ raise TypeError('Cannot write object of type '
56
+ f'{type(data).__name__} to a NeXus file.')
56
57
 
57
58
  mode = 'w' if force_overwrite else 'w-'
58
59
  data.save(filename, mode=mode)
59
60
 
60
- return(data)
61
+ return data
62
+
61
63
 
62
64
  class YAMLWriter(Writer):
65
+ """Writer for YAML files from `dict`-s"""
63
66
  def _write(self, data, filename, force_overwrite=False):
64
- '''If `data` is a `dict`, write it to `filename`.
67
+ """If `data` is a `dict`, write it to `filename`.
65
68
 
66
69
  :param data: the dictionary to write to `filename`.
67
70
  :type data: dict
@@ -75,21 +78,24 @@ class YAMLWriter(Writer):
75
78
  `force_overwrite` is `False`.
76
79
  :return: the original input data
77
80
  :rtype: dict
78
- '''
81
+ """
79
82
 
80
83
  import yaml
81
84
 
82
85
  if not isinstance(data, (dict, list)):
83
- raise(TypeError(f'{self.__name__}.write: input data must be a dict or list.'))
86
+ raise TypeError(
87
+ f'{self.__name__}.write: input data must be a dict or list.')
84
88
 
85
89
  if not force_overwrite:
86
90
  if os.path.isfile(filename):
87
- raise(RuntimeError(f'{self.__name__}: {filename} already exists.'))
91
+ raise RuntimeError(
92
+ f'{self.__name__}: {filename} already exists.')
88
93
 
89
94
  with open(filename, 'w') as outf:
90
95
  yaml.dump(data, outf, sort_keys=False)
91
96
 
92
- return(data)
97
+ return data
98
+
93
99
 
94
100
  if __name__ == '__main__':
95
101
  from CHAP.writer import main
CHAP/edd/models.py CHANGED
@@ -1,3 +1,4 @@
1
+ # third party modules
1
2
  import numpy as np
2
3
  from pathlib import PosixPath
3
4
  from pydantic import (BaseModel,
@@ -12,14 +13,15 @@ from typing import Optional
12
13
 
13
14
 
14
15
  class MCACeriaCalibrationConfig(BaseModel):
15
- '''Class representing metadata required to perform a Ceria calibration for an
16
+ """
17
+ Class representing metadata required to perform a Ceria calibration for an
16
18
  MCA detector.
17
19
 
18
20
  :ivar spec_file: Path to the SPEC file containing the CeO2 scan
19
21
  :ivar scan_number: Number of the CeO2 scan in `spec_file`
20
22
  :ivar scan_step_index: Index of the scan step to use for calibration,
21
- optional. If not specified, the calibration routine will be performed on
22
- the average of all MCA spectra for the scan.
23
+ optional. If not specified, the calibration routine will be performed
24
+ on the average of all MCA spectra for the scan.
23
25
 
24
26
  :ivar flux_file: csv file containing station beam energy in eV (column 0)
25
27
  and flux (column 1)
@@ -28,8 +30,8 @@ class MCACeriaCalibrationConfig(BaseModel):
28
30
  :ivar num_bins: number of channels on the MCA to calibrate
29
31
  :ivar max_energy_kev: maximum channel energy of the MCA in keV
30
32
 
31
- :ivar hexrd_h5_material_file: path to a HEXRD materials.h5 file containing an
32
- entry for the material properties.
33
+ :ivar hexrd_h5_material_file: path to a HEXRD materials.h5 file containing
34
+ an entry for the material properties.
33
35
  :ivar hexrd_h5_material_name: Name of the material entry in
34
36
  `hexrd_h5_material_file`, defaults to `'CeO2'`.
35
37
  :ivar lattice_parameter_angstrom: lattice spacing in angstrom to use for
@@ -59,9 +61,9 @@ class MCACeriaCalibrationConfig(BaseModel):
59
61
  :ivar max_iter: maximum number of iterations of the calibration routine,
60
62
  defaults to `10`.
61
63
  :ivar tune_tth_tol: stop iteratively tuning 2&theta when an iteration
62
- produces a change in the tuned value of 2&theta that is smaller than this
63
- value, defaults to `1e-8`.
64
- '''
64
+ produces a change in the tuned value of 2&theta that is smaller than
65
+ this value, defaults to `1e-8`.
66
+ """
65
67
 
66
68
  spec_file: FilePath
67
69
  scan_number: conint(gt=0)
@@ -74,7 +76,8 @@ class MCACeriaCalibrationConfig(BaseModel):
74
76
  max_energy_kev: confloat(gt=0)
75
77
 
76
78
  hexrd_h5_material_file: FilePath
77
- hexrd_h5_material_name: constr(strip_whitespace=True, min_length=1) = 'CeO2'
79
+ hexrd_h5_material_name: constr(
80
+ strip_whitespace=True, min_length=1) = 'CeO2'
78
81
  lattice_parameter_angstrom: confloat(gt=0) = 5.41153
79
82
 
80
83
  tth_max: confloat(gt=0, allow_inf_nan=False) = 90.0
@@ -98,20 +101,22 @@ class MCACeriaCalibrationConfig(BaseModel):
98
101
 
99
102
  @validator('fit_include_bin_ranges', each_item=True)
100
103
  def validate_include_bin_range(cls, value, values):
101
- '''Ensure no bin ranges are outside the boundary of the detector'''
104
+ """Ensure no bin ranges are outside the boundary of the detector"""
102
105
 
103
106
  num_bins = values.get('num_bins')
104
107
  value[1] = min(value[1], num_bins)
105
- return(value)
108
+ return value
106
109
 
107
110
  def mca_data(self):
108
- '''Get the 1D array of MCA data to use for calibration.
109
-
111
+ """Get the 1D array of MCA data to use for calibration.
112
+
110
113
  :return: MCA data
111
114
  :rtype: np.ndarray
112
- '''
115
+ """
116
+ # local modules
117
+ from CHAP.common.utils.scanparsers \
118
+ import SMBMCAScanParser as ScanParser
113
119
 
114
- from CHAP.common.utils.scanparsers import SMBMCAScanParser as ScanParser
115
120
  scanparser = ScanParser(self.spec_file, self.scan_number)
116
121
  if self.scan_step_index is None:
117
122
  data = scanparser.get_all_detector_data(self.detector_name)
@@ -120,96 +125,105 @@ class MCACeriaCalibrationConfig(BaseModel):
120
125
  else:
121
126
  data = data[0]
122
127
  else:
123
- data = scanparser.get_detector_data(self.detector_name, self.scan_step_index)
128
+ data = scanparser.get_detector_data(
129
+ self.detector_name, self.scan_step_index)
124
130
 
125
- return(np.array(data))
131
+ return np.array(data)
126
132
 
127
133
  def mca_mask(self):
128
- '''Get a boolean mask array to use on MCA data before fitting.
134
+ """Get a boolean mask array to use on MCA data before fitting.
129
135
 
130
136
  :return: boolean mask array
131
137
  :rtype: numpy.ndarray
132
- '''
138
+ """
133
139
 
134
140
  mask = np.asarray([False]*self.num_bins)
135
141
  bin_indices = np.arange(self.num_bins)
136
142
  for min_, max_ in self.fit_include_bin_ranges:
137
- _mask = np.logical_and(bin_indices > min_, bin_indices < max_)
143
+ _mask = np.logical_and(bin_indices > min_, bin_indices < max_)
138
144
  mask = np.logical_or(mask, _mask)
139
145
 
140
- return(mask)
146
+ return mask
141
147
 
142
148
  def flux_correction_interpolation_function(self):
143
- '''Get an interpolation function to correct MCA data for relative energy
149
+ """
150
+ Get an interpolation function to correct MCA data for relative energy
144
151
  flux of the incident beam.
145
152
 
146
153
  :return: energy flux correction interpolation function
147
154
  :rtype: scipy.interpolate._polyint._Interpolator1D
148
- '''
155
+ """
149
156
 
150
157
  flux = np.loadtxt(self.flux_file)
151
158
  energies = flux[:,0]/1.e3
152
159
  relative_intensities = flux[:,1]/np.max(flux[:,1])
153
160
  interpolation_function = interp1d(energies, relative_intensities)
154
- return(interpolation_function)
161
+ return interpolation_function
155
162
 
156
163
  def material(self):
157
- '''Get CeO2 as a `CHAP.common.utils.Material` object.
164
+ """Get CeO2 as a `CHAP.common.utils.material.Material` object.
158
165
 
159
166
  :return: CeO2 material
160
- :rtype: CHAP.common.utils.Material
161
- '''
162
-
163
- from CHAP.common.utils import Material
164
- material = Material(material_name=self.hexrd_h5_material_name,
165
- material_file=self.hexrd_h5_material_file,
166
- lattice_parameters_angstroms=self.lattice_parameter_angstrom)
167
+ :rtype: CHAP.common.utils.material.Material
168
+ """
169
+ # local modules
170
+ from CHAP.common.utils.material import Material
171
+
172
+ material = Material(
173
+ material_name=self.hexrd_h5_material_name,
174
+ material_file=self.hexrd_h5_material_file,
175
+ lattice_parameters_angstroms=self.lattice_parameter_angstrom)
167
176
  # The following kwargs will be needed if we allow the material to be
168
177
  # built using xrayutilities (for now, we only allow hexrd to make the
169
178
  # material):
170
- # sgnum=225,
171
- # atoms=['Ce4p', 'O2mdot'],
172
- # pos=[(0.,0.,0.), (0.25,0.75,0.75)],
173
- # enrgy=50000.) # Why do we need to specify an energy to get HKLs when using xrayutilities?
174
- return(material)
179
+ # sgnum=225,
180
+ # atoms=['Ce4p', 'O2mdot'],
181
+ # pos=[(0.,0.,0.), (0.25,0.75,0.75)],
182
+ # enrgy=50000.)
183
+ # Why do we need to specify an energy to get HKLs when using
184
+ # xrayutilities?
185
+ return material
175
186
 
176
187
  def unique_ds(self):
177
- '''Get a list of unique HKLs and their lattice spacings
178
-
188
+ """Get a list of unique HKLs and their lattice spacings
189
+
179
190
  :return: unique HKLs and their lattice spacings in angstroms
180
191
  :rtype: np.ndarray, np.ndarray
181
- '''
182
-
183
- unique_hkls, unique_ds = self.material().get_unique_ds(tth_tol=self.hkl_tth_tol, tth_max=self.tth_max)
192
+ """
184
193
 
185
- return(unique_hkls, unique_ds)
194
+ unique_hkls, unique_ds = self.material().get_ds_unique(
195
+ tth_tol=self.hkl_tth_tol, tth_max=self.tth_max)
196
+
197
+ return unique_hkls, unique_ds
186
198
 
187
199
  def fit_ds(self):
188
- '''Get a list of HKLs and their lattice spacings that will be fit in the
200
+ """
201
+ Get a list of HKLs and their lattice spacings that will be fit in the
189
202
  calibration routine
190
-
203
+
191
204
  :return: HKLs to fit and their lattice spacings in angstroms
192
205
  :rtype: np.ndarray, np.ndarray
193
- '''
194
-
206
+ """
207
+
195
208
  unique_hkls, unique_ds = self.unique_ds()
196
209
 
197
210
  fit_hkls = np.array([unique_hkls[i] for i in self.fit_hkls])
198
211
  fit_ds = np.array([unique_ds[i] for i in self.fit_hkls])
199
212
 
200
- return(fit_hkls, fit_ds)
213
+ return fit_hkls, fit_ds
201
214
 
202
215
  def dict(self):
203
- '''Return a representation of this configuration in a dictionary that is
216
+ """
217
+ Return a representation of this configuration in a dictionary that is
204
218
  suitable for dumping to a YAML file (one that converts all instances of
205
219
  fields with type `PosixPath` to `str`).
206
220
 
207
221
  :return: dictionary representation of the configuration.
208
222
  :rtype: dict
209
- '''
223
+ """
210
224
 
211
225
  d = super().dict()
212
226
  for k,v in d.items():
213
227
  if isinstance(v, PosixPath):
214
228
  d[k] = str(v)
215
- return(d)
229
+ return d