ChessAnalysisPipeline 0.0.4__py3-none-any.whl → 0.0.6__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

Files changed (43) hide show
  1. CHAP/TaskManager.py +214 -0
  2. CHAP/common/models/__init__.py +0 -2
  3. CHAP/common/models/integration.py +392 -249
  4. CHAP/common/models/map.py +350 -198
  5. CHAP/common/processor.py +229 -191
  6. CHAP/common/reader.py +52 -39
  7. CHAP/common/utils/__init__.py +0 -37
  8. CHAP/common/utils/fit.py +1197 -991
  9. CHAP/common/utils/general.py +629 -372
  10. CHAP/common/utils/material.py +158 -121
  11. CHAP/common/utils/scanparsers.py +735 -339
  12. CHAP/common/writer.py +31 -25
  13. CHAP/edd/models.py +65 -51
  14. CHAP/edd/processor.py +136 -113
  15. CHAP/edd/reader.py +1 -1
  16. CHAP/edd/writer.py +1 -1
  17. CHAP/inference/processor.py +35 -28
  18. CHAP/inference/reader.py +1 -1
  19. CHAP/inference/writer.py +1 -1
  20. CHAP/pipeline.py +14 -28
  21. CHAP/processor.py +44 -75
  22. CHAP/reader.py +49 -40
  23. CHAP/runner.py +73 -32
  24. CHAP/saxswaxs/processor.py +1 -1
  25. CHAP/saxswaxs/reader.py +1 -1
  26. CHAP/saxswaxs/writer.py +1 -1
  27. CHAP/server.py +130 -0
  28. CHAP/sin2psi/processor.py +1 -1
  29. CHAP/sin2psi/reader.py +1 -1
  30. CHAP/sin2psi/writer.py +1 -1
  31. CHAP/tomo/__init__.py +1 -4
  32. CHAP/tomo/models.py +53 -31
  33. CHAP/tomo/processor.py +1326 -902
  34. CHAP/tomo/reader.py +4 -2
  35. CHAP/tomo/writer.py +4 -2
  36. CHAP/writer.py +47 -41
  37. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/METADATA +1 -1
  38. ChessAnalysisPipeline-0.0.6.dist-info/RECORD +52 -0
  39. ChessAnalysisPipeline-0.0.4.dist-info/RECORD +0 -50
  40. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/LICENSE +0 -0
  41. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/WHEEL +0 -0
  42. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/entry_points.txt +0 -0
  43. {ChessAnalysisPipeline-0.0.4.dist-info → ChessAnalysisPipeline-0.0.6.dist-info}/top_level.txt +0 -0
CHAP/edd/processor.py CHANGED
@@ -1,34 +1,39 @@
1
1
  #!/usr/bin/env python
2
2
  #-*- coding: utf-8 -*-
3
3
  #pylint: disable=
4
- '''
4
+ """
5
5
  File : processor.py
6
6
  Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
7
7
  Description: Module for Processors used only by EDD experiments
8
- '''
8
+ """
9
9
 
10
10
  # system modules
11
- import json
11
+ from json import dumps
12
+
13
+ # third party modules
14
+ import numpy as np
12
15
 
13
16
  # local modules
14
17
  from CHAP.processor import Processor
15
- from CHAP.common import StrainAnalysisProcessor
18
+
16
19
 
17
20
  class MCACeriaCalibrationProcessor(Processor):
18
- '''A Processor using a CeO2 scan to obtain tuned values for the bragg
19
- diffraction angle and linear correction parameters for MCA channel energies
20
- for an EDD experimental setup.
21
- '''
21
+ """A Processor using a CeO2 scan to obtain tuned values for the
22
+ bragg diffraction angle and linear correction parameters for MCA
23
+ channel energies for an EDD experimental setup.
24
+ """
22
25
 
23
26
  def _process(self, data):
24
- '''Return tuned values for 2&theta and linear correction parameters for
25
- the MCA channel energies.
27
+ """Return tuned values for 2&theta and linear correction
28
+ parameters for the MCA channel energies.
26
29
 
27
- :param data: input configuration for the raw data & tuning procedure
30
+ :param data: input configuration for the raw data & tuning
31
+ procedure
28
32
  :type data: list[dict[str,object]]
29
- :return: original configuration dictionary with tuned values added
33
+ :return: original configuration dictionary with tuned values
34
+ added
30
35
  :rtype: dict[str,float]
31
- '''
36
+ """
32
37
 
33
38
  calibration_config = self.get_config(data)
34
39
 
@@ -38,22 +43,23 @@ class MCACeriaCalibrationProcessor(Processor):
38
43
  calibration_config.slope_calibrated = slope
39
44
  calibration_config.intercept_calibrated = intercept
40
45
 
41
- return(calibration_config.dict())
46
+ return calibration_config.dict()
42
47
 
43
48
  def get_config(self, data):
44
- '''Get an instance of the configuration object needed by this
49
+ """Get an instance of the configuration object needed by this
45
50
  `Processor` from a returned value of `Reader.read`
46
51
 
47
- :param data: Result of `Reader.read` where at least one item has the
48
- value `'MCACeriaCalibrationConfig'` for the `'schema'` key.
52
+ :param data: Result of `Reader.read` where at least one item
53
+ has the value `'MCACeriaCalibrationConfig'` for the
54
+ `'schema'` key.
49
55
  :type data: list[dict[str,object]]
50
- :raises Exception: If a valid config object cannot be constructed from
51
- `data`.
52
- :return: a valid instance of a configuration object with field values
53
- taken from `data`.
56
+ :raises Exception: If a valid config object cannot be
57
+ constructed from `data`.
58
+ :return: a valid instance of a configuration object with field
59
+ values taken from `data`.
54
60
  :rtype: MCACeriaCalibrationConfig
55
- '''
56
-
61
+ """
62
+ # local modules
57
63
  from CHAP.edd.models import MCACeriaCalibrationConfig
58
64
 
59
65
  calibration_config = False
@@ -65,37 +71,39 @@ class MCACeriaCalibrationProcessor(Processor):
65
71
  break
66
72
 
67
73
  if not calibration_config:
68
- raise(ValueError('No MCA ceria calibration configuration found in input data'))
74
+ raise ValueError(
75
+ 'No MCA ceria calibration configuration found in input data')
69
76
 
70
- return(MCACeriaCalibrationConfig(**calibration_config))
77
+ return MCACeriaCalibrationConfig(**calibration_config)
71
78
 
72
79
  def calibrate(self, calibration_config):
73
- '''Iteratively calibrate 2&theta by fitting selected peaks of an MCA
74
- spectrum until the computed strain is sufficiently small. Use the fitted
75
- peak locations to determine linear correction parameters for the MCA's
76
- channel energies.
80
+ """Iteratively calibrate 2&theta by fitting selected peaks of
81
+ an MCA spectrum until the computed strain is sufficiently
82
+ small. Use the fitted peak locations to determine linear
83
+ correction parameters for the MCA's channel energies.
77
84
 
78
- :param calibration_config: object configuring the CeO2 calibration
79
- procedure
85
+ :param calibration_config: object configuring the CeO2
86
+ calibration procedure
80
87
  :type calibration_config: MCACeriaCalibrationConfig
81
- :return: calibrated values of 2&theta and linear correction parameters
82
- for MCA channel energies : tth, slope, intercept
88
+ :return: calibrated values of 2&theta and linear correction
89
+ parameters for MCA channel energies : tth, slope,
90
+ intercept
83
91
  :rtype: float, float, float
84
- '''
85
-
86
- from CHAP.common.utils import Fit, FitMultipeak
87
- import numpy as np
92
+ """
93
+ # third party modules
88
94
  from scipy.constants import physical_constants
89
95
 
90
- hc = (physical_constants['Planck constant in eV/Hz'][0]
91
- * physical_constants['speed of light in vacuum'][0]
92
- * 1e7) # We'll work in keV and A, not eV and m.
96
+ # local modules
97
+ from CHAP.common.utils.fit import Fit, FitMultipeak
98
+
99
+ # We'll work in keV and A, not eV and m.
100
+ hc = 1e7 * physical_constants['Planck constant in eV/Hz'][0] \
101
+ * physical_constants['speed of light in vacuum'][0]
93
102
 
94
103
  # Collect raw MCA data of interest
95
104
  mca_data = calibration_config.mca_data()
96
- mca_bin_energies = (np.arange(0, calibration_config.num_bins)
97
- * (calibration_config.max_energy_kev
98
- / calibration_config.num_bins))
105
+ mca_bin_energies = np.arange(0, calibration_config.num_bins) \
106
+ * (calibration_config.max_energy_kev/calibration_config.num_bins)
99
107
 
100
108
  # Mask out the corrected MCA data for fitting
101
109
  mca_mask = calibration_config.mca_mask()
@@ -103,22 +111,24 @@ class MCACeriaCalibrationProcessor(Processor):
103
111
  fit_mca_intensities = mca_data[mca_mask]
104
112
 
105
113
  # Correct raw MCA data for variable flux at different energies
106
- flux_correct = calibration_config.flux_correction_interpolation_function()
114
+ flux_correct = \
115
+ calibration_config.flux_correction_interpolation_function()
107
116
  mca_intensity_weights = flux_correct(fit_mca_energies)
108
- fit_mca_intensities = fit_mca_intensities / mca_intensity_weights
117
+ fit_mca_intensities = fit_mca_intensities/mca_intensity_weights
109
118
 
110
- # Get the HKLs and lattice spacings that will be used for fitting
119
+ # Get the HKLs and lattice spacings that will be used for
120
+ # fitting
111
121
  tth = calibration_config.tth_initial_guess
112
122
  fit_hkls, fit_ds = calibration_config.fit_ds()
113
123
  c_1 = fit_hkls[:,0]**2 + fit_hkls[:,1]**2 + fit_hkls[:,2]**2
114
124
 
115
125
  for iter_i in range(calibration_config.max_iter):
116
126
 
117
- ### Perform the uniform fit first ###
127
+ # Perform the uniform fit first
118
128
 
119
- # Get expected peak energy locations for this iteration's starting
120
- # value of tth
121
- fit_lambda = 2.0 * fit_ds * np.sin(0.5*np.radians(tth))
129
+ # Get expected peak energy locations for this iteration's
130
+ # starting value of tth
131
+ fit_lambda = 2.0*fit_ds*np.sin(0.5*np.radians(tth))
122
132
  fit_E0 = hc / fit_lambda
123
133
 
124
134
  # Run the uniform fit
@@ -127,51 +137,55 @@ class MCACeriaCalibrationProcessor(Processor):
127
137
  fit_mca_intensities,
128
138
  fit_E0,
129
139
  x=fit_mca_energies,
130
- fit_type='uniform')
140
+ fit_type='uniform',
141
+ plot=False)
131
142
 
132
- # Extract values of interest from the best values for the uniform fit
133
- # parameters
134
- uniform_fit_centers = [best_values[f'peak{i+1}_center'] for i in range(len(calibration_config.fit_hkls))]
143
+ # Extract values of interest from the best values for the
144
+ # uniform fit parameters
145
+ uniform_fit_centers = [
146
+ best_values[f'peak{i+1}_center']
147
+ for i in range(len(calibration_config.fit_hkls))]
135
148
  # uniform_a = best_values['scale_factor']
136
149
  # uniform_strain = np.log(
137
- # (uniform_a
150
+ # (uniform_a
138
151
  # / calibration_config.lattice_parameter_angstrom))
139
152
  # uniform_tth = tth * (1.0 + uniform_strain)
140
153
  # uniform_rel_rms_error = (np.linalg.norm(residual)
141
154
  # / np.linalg.norm(fit_mca_intensities))
142
155
 
143
- ### Next, perform the unconstrained fit ###
156
+ # Next, perform the unconstrained fit
144
157
 
145
- # Use the peak locations found in the uniform fit as the initial
146
- # guesses for peak locations in the unconstrained fit
158
+ # Use the peak locations found in the uniform fit as the
159
+ # initial guesses for peak locations in the unconstrained
160
+ # fit
147
161
  best_fit, residual, best_values, best_errors, redchi, success = \
148
162
  FitMultipeak.fit_multipeak(
149
163
  fit_mca_intensities,
150
164
  uniform_fit_centers,
151
165
  x=fit_mca_energies,
152
- fit_type='unconstrained')
166
+ fit_type='unconstrained',
167
+ plot=False)
153
168
 
154
169
  # Extract values of interest from the best values for the
155
170
  # unconstrained fit parameters
156
171
  unconstrained_fit_centers = np.array(
157
- [best_values[f'peak{i+1}_center'] for i in range(len(calibration_config.fit_hkls))])
158
- unconstrained_a = (0.5 * hc * np.sqrt(c_1)
159
- / (unconstrained_fit_centers
160
- * abs(np.sin(0.5*np.radians(tth)))))
172
+ [best_values[f'peak{i+1}_center']
173
+ for i in range(len(calibration_config.fit_hkls))])
174
+ unconstrained_a = 0.5*hc*np.sqrt(c_1) \
175
+ / (unconstrained_fit_centers*abs(np.sin(0.5*np.radians(tth))))
161
176
  unconstrained_strains = np.log(
162
- (unconstrained_a
163
- / calibration_config.lattice_parameter_angstrom))
177
+ unconstrained_a/calibration_config.lattice_parameter_angstrom)
164
178
  unconstrained_strain = np.mean(unconstrained_strains)
165
- unconstrained_tth = tth * (1.0 + unconstrained_strain)
166
- unconstrained_rel_rms_error = (np.linalg.norm(residual)
167
- / np.linalg.norm(fit_mca_intensities))
168
-
179
+ unconstrained_tth = tth * (1.0+unconstrained_strain)
180
+ unconstrained_rel_rms_error = (
181
+ np.linalg.norm(residual)/np.linalg.norm(fit_mca_intensities))
169
182
 
170
183
  # Update tth for the next iteration of tuning
171
184
  prev_tth = tth
172
185
  tth = unconstrained_tth
173
186
 
174
- # Stop tuning tth at this iteration if differences are small enough
187
+ # Stop tuning tth at this iteration if differences are
188
+ # small enough
175
189
  if abs(tth - prev_tth) < calibration_config.tune_tth_tol:
176
190
  break
177
191
 
@@ -185,46 +199,49 @@ class MCACeriaCalibrationProcessor(Processor):
185
199
  slope = fit.best_values['slope']
186
200
  intercept = fit.best_values['intercept']
187
201
 
188
- return(float(tth), float(slope), float(intercept))
202
+ return float(tth), float(slope), float(intercept)
203
+
189
204
 
190
205
  class MCADataProcessor(Processor):
191
- '''A Processor to return data from an MCA, restuctured to incorporate the
192
- shape & metadata associated with a map configuration to which the MCA data
193
- belongs, and linearly transformed according to the results of a ceria
194
- calibration.
195
- '''
206
+ """A Processor to return data from an MCA, restuctured to
207
+ incorporate the shape & metadata associated with a map
208
+ configuration to which the MCA data belongs, and linearly
209
+ transformed according to the results of a ceria calibration.
210
+ """
196
211
 
197
212
  def _process(self, data):
198
- '''Process configurations for a map and MCA detector(s), and return the
199
- calibrated MCA data collected over the map.
213
+ """Process configurations for a map and MCA detector(s), and
214
+ return the calibrated MCA data collected over the map.
200
215
 
201
- :param data: input map configuration and results of ceria calibration
216
+ :param data: input map configuration and results of ceria
217
+ calibration
202
218
  :type data: list[dict[str,object]]
203
219
  :return: calibrated and flux-corrected MCA data
204
220
  :rtype: nexusformat.nexus.NXentry
205
- '''
221
+ """
206
222
 
207
223
  map_config, calibration_config = self.get_configs(data)
208
224
  nxroot = self.get_nxroot(map_config, calibration_config)
209
225
 
210
- return(nxroot)
226
+ return nxroot
211
227
 
212
228
  def get_configs(self, data):
213
- '''Get instances of the configuration objects needed by this
229
+ """Get instances of the configuration objects needed by this
214
230
  `Processor` from a returned value of `Reader.read`
215
231
 
216
- :param data: Result of `Reader.read` where at least one item has the
217
- value `'MapConfig'` for the `'schema'` key, and at least one item has
218
- the value `'MCACeriaCalibrationConfig'` for the `'schema'` key.
232
+ :param data: Result of `Reader.read` where at least one item
233
+ has the value `'MapConfig'` for the `'schema'` key, and at
234
+ least one item has the value `'MCACeriaCalibrationConfig'`
235
+ for the `'schema'` key.
219
236
  :type data: list[dict[str,object]]
220
- :raises Exception: If valid config objects cannot be constructed from
221
- `data`.
222
- :return: valid instances of the configuration objects with field values
223
- taken from `data`.
237
+ :raises Exception: If valid config objects cannot be
238
+ constructed from `data`.
239
+ :return: valid instances of the configuration objects with
240
+ field values taken from `data`.
224
241
  :rtype: tuple[MapConfig, MCACeriaCalibrationConfig]
225
- '''
226
-
227
- from CHAP.common.models import MapConfig
242
+ """
243
+ # local modules
244
+ from CHAP.common.models.map import MapConfig
228
245
  from CHAP.edd.models import MCACeriaCalibrationConfig
229
246
 
230
247
  map_config = False
@@ -239,17 +256,20 @@ class MCADataProcessor(Processor):
239
256
  calibration_config = item.get('data')
240
257
 
241
258
  if not map_config:
242
- raise(ValueError('No map configuration found in input data'))
259
+ raise ValueError('No map configuration found in input data')
243
260
  if not calibration_config:
244
- raise(ValueError('No MCA ceria calibration configuration found in input data'))
261
+ raise ValueError('No MCA ceria calibration configuration found in '
262
+ 'input data')
245
263
 
246
- return(MapConfig(**map_config), MCACeriaCalibrationConfig(**calibration_config))
264
+ return (MapConfig(**map_config),
265
+ MCACeriaCalibrationConfig(**calibration_config))
247
266
 
248
267
  def get_nxroot(self, map_config, calibration_config):
249
- '''Get a map of the MCA data collected by the scans in `map_config`. The
250
- MCA data will be calibrated and flux-corrected according to the
251
- parameters included in `calibration_config`. The data will be returned
252
- along with relevant metadata in the form of a NeXus structure.
268
+ """Get a map of the MCA data collected by the scans in
269
+ `map_config`. The MCA data will be calibrated and
270
+ flux-corrected according to the parameters included in
271
+ `calibration_config`. The data will be returned along with
272
+ relevant metadata in the form of a NeXus structure.
253
273
 
254
274
  :param map_config: the map configuration
255
275
  :type map_config: MapConfig
@@ -257,16 +277,15 @@ class MCADataProcessor(Processor):
257
277
  :type calibration_config: MCACeriaCalibrationConfig
258
278
  :return: a map of the calibrated and flux-corrected MCA data
259
279
  :rtype: nexusformat.nexus.NXroot
260
- '''
261
-
262
- from CHAP.common import MapProcessor
263
-
280
+ """
281
+ # third party modules
264
282
  from nexusformat.nexus import (NXdata,
265
283
  NXdetector,
266
- NXentry,
267
284
  NXinstrument,
268
285
  NXroot)
269
- import numpy as np
286
+
287
+ # local modules
288
+ from CHAP.common import MapProcessor
270
289
 
271
290
  nxroot = NXroot()
272
291
 
@@ -275,17 +294,17 @@ class MCADataProcessor(Processor):
275
294
 
276
295
  nxentry.instrument = NXinstrument()
277
296
  nxentry.instrument.detector = NXdetector()
278
- nxentry.instrument.detector.calibration_configuration = json.dumps(calibration_config.dict())
297
+ nxentry.instrument.detector.calibration_configuration = dumps(
298
+ calibration_config.dict())
279
299
 
280
300
  nxentry.instrument.detector.data = NXdata()
281
301
  nxdata = nxentry.instrument.detector.data
282
302
  nxdata.raw = np.empty((*map_config.shape, calibration_config.num_bins))
283
303
  nxdata.raw.attrs['units'] = 'counts'
284
- nxdata.channel_energy = (calibration_config.slope_calibrated
285
- * np.arange(0, calibration_config.num_bins)
286
- * (calibration_config.max_energy_kev
287
- / calibration_config.num_bins)
288
- + calibration_config.intercept_calibrated)
304
+ nxdata.channel_energy = calibration_config.slope_calibrated \
305
+ * np.arange(0, calibration_config.num_bins) \
306
+ * (calibration_config.max_energy_kev/calibration_config.num_bins) \
307
+ + calibration_config.intercept_calibrated
289
308
  nxdata.channel_energy.attrs['units'] = 'keV'
290
309
 
291
310
  for scans in map_config.spec_scans:
@@ -311,11 +330,15 @@ class MCADataProcessor(Processor):
311
330
  nxentry.data.attrs['axes'],
312
331
  f'{calibration_config.detector_name}_channel_energy']
313
332
  else:
314
- nxentry.data.attrs['axes'] += [f'{calibration_config.detector_name}_channel_energy']
333
+ nxentry.data.attrs['axes'] += [
334
+ f'{calibration_config.detector_name}_channel_energy']
315
335
  nxentry.data.attrs['signal'] = calibration_config.detector_name
316
336
 
317
- return(nxroot)
337
+ return nxroot
338
+
318
339
 
319
340
  if __name__ == '__main__':
341
+ # local modules
320
342
  from CHAP.processor import main
343
+
321
344
  main()
CHAP/edd/reader.py CHANGED
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python
1
+ #!/usr/bin/env python
2
2
 
3
3
  if __name__ == '__main__':
4
4
  from CHAP.reader import main
CHAP/edd/writer.py CHANGED
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python
1
+ #!/usr/bin/env python
2
2
 
3
3
  if __name__ == '__main__':
4
4
  from CHAP.writer import main
@@ -1,11 +1,11 @@
1
- #!/usr/bin/env python
2
- #-*- coding: utf-8 -*-
3
- #pylint: disable=
4
- '''
5
- File : processor.py
6
- Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
7
- Description: Processor module
8
- '''
1
+ #!/usr/bin/env python
2
+ #-*- coding: utf-8 -*-
3
+ #pylint: disable=
4
+ """
5
+ File : processor.py
6
+ Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
7
+ Description: Processor module
8
+ """
9
9
 
10
10
  # system modules
11
11
  from time import time
@@ -13,14 +13,12 @@ from time import time
13
13
  # local modules
14
14
  from CHAP import Processor
15
15
 
16
+
16
17
  class TFaaSImageProcessor(Processor):
17
- '''
18
- A Processor to get predictions from TFaaS inference server.
19
- '''
18
+ """A Processor to get predictions from TFaaS inference server."""
19
+
20
20
  def process(self, data, url, model, verbose=False):
21
- '''
22
- process data API
23
- '''
21
+ """process data API"""
24
22
 
25
23
  t0 = time()
26
24
  self.logger.info(f'Executing "process" with url {url} model {model}')
@@ -29,40 +27,49 @@ class TFaaSImageProcessor(Processor):
29
27
 
30
28
  self.logger.info(f'Finished "process" in {time()-t0:.3f} seconds\n')
31
29
 
32
- return(data)
30
+ return data
33
31
 
34
32
  def _process(self, data, url, model, verbose):
35
- '''Print and return the input data.
33
+ """Print and return the input data.
36
34
 
37
- :param data: Input image data, either file name or actual image data
35
+ :param data: Input image data, either file name or actual
36
+ image data
38
37
  :type data: object
39
38
  :return: `data`
40
39
  :rtype: object
41
- '''
40
+ """
41
+ # system modules
42
+ from pathlib import Path
42
43
 
44
+ # local modules
43
45
  from MLaaS.tfaas_client import predictImage
44
- from pathlib import Path
45
46
 
46
47
  self.logger.info(f'input data {type(data)}')
47
48
  if isinstance(data, str) and Path(data).is_file():
48
- imgFile = data
49
- data = predictImage(url, imgFile, model, verbose)
49
+ img_file = data
50
+ data = predictImage(url, img_file, model, verbose)
50
51
  else:
52
+ # third party modules
53
+ from requests import Session
54
+
51
55
  rdict = data[0]
52
- import requests
53
56
  img = rdict['data']
54
- session = requests.Session()
57
+ session = Session()
55
58
  rurl = url + '/predict/image'
56
- payload = dict(model=model)
57
- files = dict(image=img)
58
- self.logger.info(f'HTTP request {rurl} with image file and {payload} payload')
59
- req = session.post(rurl, files=files, data=payload )
59
+ payload = {'model': model}
60
+ files = {'image': img}
61
+ self.logger.info(
62
+ f'HTTP request {rurl} with image file and {payload} payload')
63
+ req = session.post(rurl, files=files, data=payload)
60
64
  data = req.content
61
65
  data = data.decode('utf-8').replace('\n', '')
62
66
  self.logger.info(f'HTTP response {data}')
63
67
 
64
- return(data)
68
+ return data
69
+
65
70
 
66
71
  if __name__ == '__main__':
72
+ # local modules
67
73
  from CHAP.processor import main
74
+
68
75
  main()
CHAP/inference/reader.py CHANGED
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python
1
+ #!/usr/bin/env python
2
2
 
3
3
  if __name__ == '__main__':
4
4
  from CHAP.reader import main
CHAP/inference/writer.py CHANGED
@@ -1,4 +1,4 @@
1
- #!/usr/bin/env python
1
+ #!/usr/bin/env python
2
2
 
3
3
  if __name__ == '__main__':
4
4
  from CHAP.writer import main
CHAP/pipeline.py CHANGED
@@ -4,21 +4,19 @@
4
4
  """
5
5
  File : pipeline.py
6
6
  Author : Valentin Kuznetsov <vkuznet AT gmail dot com>
7
- Description:
7
+ Description:
8
8
  """
9
9
 
10
10
  # system modules
11
11
  import logging
12
12
  from time import time
13
13
 
14
+
14
15
  class Pipeline():
15
- """
16
- Pipeline represent generic Pipeline class
17
- """
16
+ """Pipeline represent generic Pipeline class"""
18
17
  def __init__(self, items=None, kwds=None):
19
- """
20
- Pipeline class constructor
21
-
18
+ """Pipeline class constructor
19
+
22
20
  :param items: list of objects
23
21
  :param kwds: list of method args for individual objects
24
22
  """
@@ -31,12 +29,10 @@ class Pipeline():
31
29
  self.logger.propagate = False
32
30
 
33
31
  def execute(self):
34
- """
35
- execute API
36
- """
32
+ """execute API"""
37
33
 
38
34
  t0 = time()
39
- self.logger.info(f'Executing "execute"\n')
35
+ self.logger.info('Executing "execute"\n')
40
36
 
41
37
  data = None
42
38
  for item, kwargs in zip(self.items, self.kwds):
@@ -52,33 +48,23 @@ class Pipeline():
52
48
 
53
49
  self.logger.info(f'Executed "execute" in {time()-t0:.3f} seconds')
54
50
 
51
+
55
52
  class PipelineObject():
56
- """
57
- PipelineObject represent generic Pipeline class
58
- """
59
- def __init__(self, reader, writer, processor, fitter):
60
- """
61
- PipelineObject class constructor
62
- """
53
+ """PipelineObject represent generic Pipeline class"""
54
+ def __init__(self, reader, writer, processor):
55
+ """PipelineObject class constructor"""
63
56
  self.reader = reader
64
57
  self.writer = writer
65
58
  self.processor = processor
66
59
 
67
60
  def read(self, filename):
68
- """
69
- read object API
70
- """
61
+ """read object API"""
71
62
  return self.reader.read(filename)
72
63
 
73
64
  def write(self, data, filename):
74
- """
75
- write object API
76
- """
65
+ """write object API"""
77
66
  return self.writer.write(data, filename)
78
67
 
79
68
  def process(self, data):
80
- """
81
- process object API
82
- """
69
+ """process object API"""
83
70
  return self.processor.process(data)
84
-