ChessAnalysisPipeline 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

@@ -0,0 +1,520 @@
1
+ #!/usr/bin/env python
2
+ #-*- coding: utf-8 -*-
3
+ #pylint: disable=
4
+ """
5
+ File : processor.py
6
+ Author : Rolf Verberg
7
+ Description: Module for Processors used only by GIWAXS experiments
8
+ """
9
+ # System modules
10
+ from copy import deepcopy
11
+ from json import dumps
12
+ import os
13
+
14
+ # Third party modules
15
+ import numpy as np
16
+
17
+ # Local modules
18
+ from CHAP.processor import Processor
19
+ from CHAP.common.models.map import MapConfig
20
+
21
+
22
+ class GiwaxsConversionProcessor(Processor):
23
+
24
+ def process(
25
+ self, data, config, save_figures=False, inputdir='.',
26
+ outputdir='.', interactive=False):
27
+
28
+ # Third party modules
29
+ from json import loads
30
+ from nexusformat.nexus import (
31
+ NXentry,
32
+ NXroot,
33
+ )
34
+ # Local modules
35
+ from CHAP.common.models.map import MapConfig
36
+
37
+ # Load the detector data
38
+ try:
39
+ nxentry = self.get_data(data, 'MapProcessor')
40
+ if not isinstance(nxentry, NXentry):
41
+ raise RuntimeError(
42
+ 'No valid NXentry data in MapProcessor pipeline data')
43
+ except:
44
+ try:
45
+ try:
46
+ nxroot = self.get_data(data, 'NexusReader')
47
+ except:
48
+ nxroot = self.get_data(data, 'NexusWriter')
49
+ if not isinstance(nxroot, NXroot):
50
+ raise RuntimeError(
51
+ 'No valid NXroot data in NexusWriter pipeline data')
52
+ nxentry = nxroot[nxroot.default]
53
+ if not isinstance(nxentry, NXentry):
54
+ raise RuntimeError(
55
+ 'No valid NXentry data in NexusWriter pipeline data')
56
+ except:
57
+ raise RuntimeError(
58
+ 'No valid detector data in input pipeline data')
59
+
60
+ # Load the validated GIWAXS conversion configuration
61
+ try:
62
+ config = self.get_config(
63
+ data, 'giwaxs.models.GiwaxsConversionConfig')
64
+ except Exception as data_exc:
65
+ self.logger.info('No valid conversion config in input pipeline '
66
+ 'data, using config parameter instead.')
67
+ try:
68
+ # Local modules
69
+ from CHAP.giwaxs.models import GiwaxsConversionConfig
70
+
71
+ giwaxs_config = GiwaxsConversionConfig(**config)
72
+ except Exception as dict_exc:
73
+ raise RuntimeError from dict_exc
74
+
75
+ return self.convert_q_rect(
76
+ nxentry, giwaxs_config, save_figures=save_figures,
77
+ interactive=interactive, outputdir=outputdir)
78
+
79
+ def convert_q_rect(
80
+ self, nxentry, config, save_figures=False, interactive=False,
81
+ outputdir='.'):
82
+ """Return NXroot containing the converted GIWAXS images.
83
+
84
+ :param nxentry: The GIWAXS map with the raw detector data.
85
+ :param config: The GIWAXS conversion configuration.
86
+ :type config: CHAP.giwaxs.models.GiwaxsConversionConfig
87
+ :param save_figures: Save .pngs of plots for checking inputs &
88
+ outputs of this Processor, defaults to `False`.
89
+ :type save_figures: bool, optional
90
+ :param interactive: Allows for user interactions, defaults to
91
+ `False`.
92
+ :type interactive: bool, optional
93
+ :param outputdir: Directory to which any output figures will
94
+ be saved, defaults to `'.'`.
95
+ :type outputdir: str, optional
96
+ :return: NXroot containing the converted GIWAXS images.
97
+ :rtype: nexusformat.nexus.NXroot
98
+ """
99
+ # Third party modules
100
+ from json import loads
101
+ if interactive or save_figures:
102
+ import matplotlib.pyplot as plt
103
+ from nexusformat.nexus import (
104
+ NXdata,
105
+ NXfield,
106
+ NXprocess,
107
+ NXroot,
108
+ )
109
+
110
+ # Local modules
111
+ from CHAP.common import MapProcessor
112
+
113
+ if nxentry.detector_names.size > 1 or len(config.detectors) > 1:
114
+ raise RuntimeError('More than one detector not yet implemented')
115
+ detector = config.detectors[0]
116
+ if str(nxentry.detector_names[0]) != detector.prefix:
117
+ raise RuntimeError(
118
+ f'Inconsistent detector names ({nxentry.detector_names[0]} vs '
119
+ f'{detector.prefix})')
120
+ if not isinstance(nxentry.data.attrs['axes'], str):
121
+ raise RuntimeError(
122
+ 'More than one independent dimension not yet implemented')
123
+
124
+ # Create the NXroot object
125
+ nxroot = NXroot()
126
+ nxroot[nxentry.nxname] = nxentry
127
+ nxprocess = NXprocess()
128
+ nxroot[f'{nxentry.nxname}_conversion'] = nxprocess
129
+ nxprocess.conversion_config = dumps(config.dict())
130
+
131
+ # Collect the raw giwaxs images
132
+ if config.scan_step_indices is None:
133
+ thetas = nxentry.data[nxentry.data.attrs['axes']]
134
+ giwaxs_data = nxentry.data.detector_data[0]
135
+ else:
136
+ thetas = nxentry.data[nxentry.data.attrs['axes']][
137
+ config.scan_step_indices]
138
+ giwaxs_data = nxentry.data.detector_data[0][
139
+ config.scan_step_indices]
140
+ self.logger.debug(f'giwaxs_data.shape: {giwaxs_data.shape}')
141
+ effective_map_shape = giwaxs_data.shape[:-2]
142
+ self.logger.debug(f'effective_map_shape: {effective_map_shape}')
143
+ image_dims = giwaxs_data.shape[1:]
144
+ self.logger.debug(f'image_dims: {image_dims}')
145
+
146
+ # Get the components of q parallel and perpendicular to the
147
+ # detector
148
+ q_par, q_perp = self._calc_q_coords(
149
+ giwaxs_data, thetas, detector.poni_file)
150
+
151
+ # Get the range of the perpendicular component of q and that
152
+ # of the parallel one at near grazing incidence as well as
153
+ # the corresponding rectangular grid with the same dimensions
154
+ # as the detector grid and converted to this grid from the
155
+ # (q_par, q_perp)-grid
156
+ # RV: For now use the same q-coords for all thetas, based on
157
+ # the range for the first theta
158
+ q_perp_min_index = np.argmin(np.abs(q_perp[:,0]))
159
+ q_par_rect = np.linspace(
160
+ q_par[q_perp_min_index,:].min(),
161
+ q_par[q_perp_min_index,:].max(), image_dims[1])
162
+ q_perp_rect = np.linspace(
163
+ q_perp.min(), q_perp.max(), image_dims[0])
164
+ giwaxs_data_rect = []
165
+ # q_par_rect = []
166
+ # q_perp_rect = []
167
+ for i, theta in enumerate(thetas):
168
+ # q_perp_min_index = np.argmin(np.abs(q_perp[i,:,0]))
169
+ # q_par_rect.append(np.linspace(
170
+ # q_par[i,q_perp_min_index,:].min(),
171
+ # q_par[i,q_perp_min_index,:].max(), image_dims[1]))
172
+ # q_perp_rect.append(np.linspace(
173
+ # q_perp[i].min(), q_perp[i].max(), image_dims[0]))
174
+ # giwaxs_data_rect.append(
175
+ # GiwaxsConversionProcessor.curved_to_rect(
176
+ # giwaxs_data[i], q_par[i], q_perp[i], q_par_rect[i],
177
+ # q_perp_rect[i]))
178
+ giwaxs_data_rect.append(
179
+ GiwaxsConversionProcessor.curved_to_rect(
180
+ giwaxs_data[i], q_par, q_perp, q_par_rect,
181
+ q_perp_rect))
182
+
183
+ if interactive or save_figures:
184
+ vmax = giwaxs_data[i].max()/10
185
+ fig, ax = plt.subplots(1,2, figsize=(10, 5))
186
+ ax[1].imshow(
187
+ giwaxs_data_rect[i],
188
+ vmin=0, vmax=vmax,
189
+ origin='lower',
190
+ extent=(q_par_rect.min(), q_par_rect.max(),
191
+ q_perp_rect.min(), q_perp_rect.max()))
192
+ ax[1].set_aspect('equal')
193
+ ax[1].set_title('Transformed Image')
194
+ ax[1].set_xlabel('q$_\parallel$ [\u212b$^{-1}$]')
195
+ ax[1].set_ylabel('q$_\perp$ [\u212b$^{-1}$]')
196
+ im = ax[0].imshow(giwaxs_data[i], vmin=0, vmax=vmax)
197
+ ax[0].set_aspect('equal')
198
+ lhs = ax[0].get_position().extents
199
+ rhs = ax[1].get_position().extents
200
+ ax[0].set_position(
201
+ (lhs[0], rhs[1], rhs[2] - rhs[0], rhs[3] - rhs[1]))
202
+ ax[0].set_title('Raw Image');
203
+ ax[0].set_xlabel('column index')
204
+ ax[0].set_ylabel('row index')
205
+ fig.subplots_adjust(right=0.85)
206
+ cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7])
207
+ fig.colorbar(im, cax=cbar_ax)
208
+ if interactive:
209
+ plt.show()
210
+ if save_figures:
211
+ fig.savefig(os.path.join(
212
+ outputdir,
213
+ f'converted_{config.scan_step_indices[i]}'))
214
+ plt.close()
215
+
216
+ # Create the NXdata object with the converted images
217
+ if False and len(thetas) == 1: #RV
218
+ nxprocess.data = NXdata(
219
+ NXfield(np.asarray(giwaxs_data_rect[0]), 'converted'),
220
+ (NXfield(
221
+ q_perp_rect[0], 'q_perp_rect',
222
+ attrs={'units': '\u212b$^{-1}$'}),
223
+ NXfield(
224
+ q_par_rect[0], 'q_par_rect',
225
+ attrs={'units': '\u212b$^{-1}$'})))
226
+ nxprocess.data.theta = NXfield(
227
+ thetas[0], 'thetas', attrs={'units': 'rad'})
228
+ if save_raw_data:
229
+ nxprocess.data.raw = NXfield(giwaxs_data[0])
230
+ else:
231
+ nxprocess.data = NXdata(
232
+ NXfield(np.asarray(giwaxs_data_rect), 'converted'),
233
+ (NXfield(
234
+ thetas, 'thetas', attrs={'units': 'rad'}),
235
+ NXfield(
236
+ q_perp_rect, 'q_perp_rect',
237
+ attrs={'units': '\u212b$^{-1}$'}),
238
+ NXfield(
239
+ q_par_rect, 'q_par_rect',
240
+ attrs={'units': '\u212b$^{-1}$'})))
241
+ if config.save_raw_data:
242
+ nxprocess.data.raw = NXfield(giwaxs_data)
243
+ nxprocess.default = 'data'
244
+
245
+ return nxroot
246
+
247
+ @staticmethod
248
+ def curved_to_rect(
249
+ data_curved, q_par, q_perp, q_par_rect, q_perp_rect,
250
+ return_maps=False, normalize=True):
251
+ """
252
+ data_rect = curved_to_rect(...):
253
+ distributes counts from a curvilinear grid (data_curved),
254
+ e.g. x-ray data collected in angular space, into a
255
+ rectilinear grid (reciprocal space).
256
+
257
+ data_rect, norm, xmap, ymap, xwid, ywid =
258
+ curved_to_rect(..., return_maps=True):
259
+ distributes counts from a curvilinear grid (data_curved),
260
+ e.g. x-ray data collected in angular space, into a
261
+ rectilinear grid (reciprocal space).
262
+
263
+ q_par, q_perp, and data_curved are M x N following the normal
264
+ convention where the the first & second index corrspond to
265
+ the vertical (y) and horizontal (x) locations of the
266
+ scattering pattern.
267
+ q_par, q_perp represent the q coordinates of the center of
268
+ pixels whose intensities are stored in data_curved.
269
+ Reiterating the convention above, q_par and q_perp vary
270
+ primarilly along the 2nd and 1st index, respectively.
271
+ rect_qpar and rect_qperp are evenly-spaced, monotonically
272
+ increasing, arrays determining the new grid.
273
+
274
+ data_rect : the new matrix with intensity from data_curved
275
+ disctributed into a regular grid defined by
276
+ rect_qpar, rect_qpar.
277
+ norm : a matrix with the same shape of data_rect representing
278
+ the area of the pixel in the original angular units.
279
+ It should be used to normalize the resulting array as
280
+ norm_z = data_rect / norm.
281
+
282
+ Algorithm:
283
+ Step 1 : Compute xmap, ymap, which containt the values of
284
+ q_par and q_perp, but represented in pixel units of
285
+ the target coordinates rect_qpar, rect_qperp.
286
+ In other words, xmap(i,j) = 3.4 means that
287
+ q_par(i,j) lands 2/5 of the q_distance between
288
+ rect_qpar(3) and rect_qpar(4). Intensity in
289
+ qpar(i,j) should thus be distributed in a 2:3 ratio
290
+ among neighboring mini-columns of pixels 3 and 4.
291
+ Step 2 : Use the procedure described by Barna et al
292
+ (RSI v.70, p. 2927, 1999) to distribute intensity
293
+ from each source pixel i,j into each of 9
294
+ destination pixels around the xmap(i,j) and
295
+ ymap(i,j). Keep track of how many source "pixels"
296
+ are placed into each bin in the variable, "norm".
297
+ Note also that if xmap(i,j)-floor(xmap(i,j)) > 0.5,
298
+ the "center" pixel of the 9 destination pixels is
299
+ floor(xmap+0.5).
300
+ (Outside this function): The normalized intensity in each
301
+ new pixel can be obtained asI = data_rect./norm, but
302
+ with the caveat that zero values of "norm" should be
303
+ changed to ones first, norm(data_rect == 0) = 1.0.
304
+
305
+ Example Usage:
306
+ 1. Compute the values of q_par and q_perp for each pixel in
307
+ the image z (according to scattering geometry).
308
+ 2. Set or determing a good target grid, e.g.:
309
+ min_qpar, max_qpar = q_par.mix(), q_par.max()
310
+ min_qperp, max_qperp = q_perp.mix(), q_perp.max()
311
+ q_par_rect, q_par_step = np.linspace(min_qpar ,
312
+ max_qpar, image_dim[1], retstep=True)
313
+ q_perp_rect, q_perp_step = np.linspace(min_qperp,
314
+ max_qperp, image_dim[0], retstep=True)
315
+ 3. data_rect = curved_to_rect(data_curved, q_par, q_perp,
316
+ q_par_rect, q_perp_rect)
317
+ 4. plt.imshow(data_rect, extent = [
318
+ q_par_rect[0], q_par_rect[-1],
319
+ q_perp_rect[-1], q_perp_rect[0]])
320
+ xlabel(['Q_{||} [' char(197) '^{-1}]'])
321
+ ylabel(['Q_{\perp} [' char(197) '^{-1}]'])
322
+ """
323
+ out_width, out_height = q_par_rect.size, q_perp_rect.size
324
+
325
+ # Check correct dimensionality
326
+ dims = data_curved.shape
327
+ assert q_par.shape == dims and q_perp.shape == dims
328
+
329
+ data_rect = np.zeros((out_height, out_width))
330
+ norm = np.zeros_like(data_rect)
331
+
332
+ rect_width = q_par_rect[1] - q_par_rect[0]
333
+ rect_height = q_perp_rect[1]- q_perp_rect[0]
334
+ rect_qpar_shift = q_par_rect - rect_width/2.0
335
+ rect_qperp_shift = q_perp_rect - rect_height/2.0
336
+
337
+ # Precompute source pixels that are outside the target area
338
+ out_of_bounds = (
339
+ (q_par < rect_qpar_shift[0])
340
+ | (q_par > rect_qpar_shift[-1] + rect_width)
341
+ | (q_perp < rect_qperp_shift[0])
342
+ | (q_perp > rect_qperp_shift[-1] + rect_height))
343
+
344
+ # Vectorize the search for where q_par[i, j] and q_perp[i,j]
345
+ # fall on the grid formed by q_par_rect and q_perp_rect
346
+ #
347
+ # 1. Expand rect_qpar_shift (a vector) such that
348
+ # rect_qpar_shift_cube[i. j, :] is identical to
349
+ # rect_qpar_shift, and is a rising sequence of values of
350
+ # qpar
351
+ # 2. Expand q_par such that qpar_cube[i, j, :] all correspond
352
+ # to the value q_par[i, j].
353
+ # - Note that I found tile first and used that in once case but
354
+ # not the other. I think broadcast_to should likely be used
355
+ # for both. But in both cases, it seemed to be easiest or
356
+ # only possible if the extra dimensions were leading dims,
357
+ # not trailing. That is the reason for the use of transpose
358
+ # in qpar_cube.
359
+ rect_qpar_shift_cube = np.tile(rect_qpar_shift, q_par.shape + (1,))
360
+ qpar_cube = np.transpose(np.broadcast_to(
361
+ q_par, ((len(rect_qpar_shift),) + q_par.shape)), (1,2,0))
362
+ rect_qperp_shift_cube = np.tile(rect_qperp_shift, q_perp.shape + (1,))
363
+ qperp_cube = np.transpose(np.broadcast_to(
364
+ q_perp, ((len(rect_qperp_shift),) + q_perp.shape)), (1,2,0))
365
+
366
+ # We want the index of the highest rect_qpar_shift that is
367
+ # still below qpar, whereas the argmax # operation yields the
368
+ # first rect_qpar_shift that is above qpar, We subtract 1 to
369
+ # take care of this and then correct for any negative indices
370
+ # to 0.
371
+ highpx_x = np.argmax(qpar_cube < rect_qpar_shift_cube, axis=2) - 1
372
+ highpx_y = np.argmax(qperp_cube < rect_qperp_shift_cube, axis=2) - 1
373
+ highpx_x[highpx_x < 0] = 0
374
+ highpx_y[highpx_y < 0] = 0
375
+
376
+ # Compute xmap and ymap
377
+ xmap = np.where(
378
+ out_of_bounds, np.nan,
379
+ highpx_x - 0.5 + (q_par - rect_qpar_shift[highpx_x]) / rect_width)
380
+ ymap = np.where(
381
+ out_of_bounds, np.nan,
382
+ highpx_y - 0.5
383
+ + (q_perp - rect_qperp_shift[highpx_y]) / rect_height)
384
+
385
+ # Optionally, print out-of-bounds pixels
386
+ if np.any(out_of_bounds):
387
+ print(f'Warning: Found {out_of_bounds.sum()} source pixels that '
388
+ 'are outside the target bounding box')
389
+ # out_of_bounds_indices = np.transpose(np.where(out_of_bounds))
390
+ # for i, j in out_of_bounds_indices:
391
+ # print(f'pixel {i}, {j} is out of bounds...skip')
392
+
393
+ x1 = np.floor(xmap + 0.5).astype(int)
394
+ y1 = np.floor(ymap + 0.5).astype(int)
395
+
396
+ # Compute the effective size of each source pixel (for
397
+ # comparison with target pixels)
398
+ xwid = np.abs(np.diff(xmap, axis=1))
399
+ ywid = np.abs(np.diff(ymap, axis=0))
400
+
401
+ # Prepend xwid, ywid with their first column, row
402
+ # (respectively) to match shape with xmap, ymap
403
+ xwid = np.insert(xwid, 0, xwid[:,0], axis=1)
404
+ ywid = np.insert(ywid, 0, ywid[0,:], axis=0)
405
+
406
+ # Compute mapping of source pixel to up to 9 closest pixels,
407
+ # after Barna (1999)
408
+ col = np.zeros((3,)+xmap.shape)
409
+ row = np.zeros((3,)+xmap.shape)
410
+ col[0,:,:] = np.where(
411
+ 0.5 - (xmap - x1 + 0.5)/xwid > 0.0,
412
+ 0.5 - (xmap - x1 + 0.5)/xwid, 0.0)
413
+ col[2,:,:] = np.where(
414
+ 0.5 + (xmap - x1 - 0.5)/xwid > 0.0,
415
+ 0.5 + (xmap - x1 - 0.5)/xwid, 0.0)
416
+ col[1,:,:] = 1.0 - col[0,:,:] - col[2,:,:]
417
+ row[0,:,:] = np.where(
418
+ 0.5 - (ymap - y1 + 0.5)/ywid > 0.0,
419
+ 0.5 - (ymap - y1 + 0.5)/ywid , 0.0)
420
+ row[2,:,:] = np.where(
421
+ 0.5 + (ymap - y1 - 0.5)/ywid > 0.0,
422
+ 0.5 + (ymap - y1 - 0.5)/ywid, 0.0)
423
+ row[1,:,:] = 1.0 - row[0,:,:] - row[2,:,:]
424
+
425
+ for k in (-1, 0, 1):
426
+ for m in (-1, 0, 1):
427
+ source_indices = (x1+k > -1) & (x1+k < out_width) & (y1+m > -1) & (y1+m < out_height)
428
+ x1_sub = x1[source_indices]+k
429
+ y1_sub = y1[source_indices]+m
430
+
431
+ np.add.at(data_rect, (y1_sub, x1_sub),
432
+ data_curved[source_indices] * col[k+1, source_indices] * row[m+1, source_indices])
433
+ np.add.at(norm, (y1_sub, x1_sub),
434
+ col[k+1, source_indices] * row[m+1, source_indices])
435
+ # The following fails because although
436
+ # [y1_sub, x1_sub] can refer to the same location more
437
+ # than once, the "+=" operation acts on the original
438
+ # value and does not have knowledge of incremental
439
+ # changes.
440
+ # data_rect[y1_sub, x1_sub] += (data_curved[source_indices]
441
+ # * col[k+1, source_indices] * row[m+1, source_indices])
442
+ # norm[y1_sub, x1_sub] += \
443
+ # col[k+1, source_indices] * row[m+1, source_indices]
444
+
445
+ if normalize:
446
+ norm[norm == 0] = 1.0
447
+ data_rect /= norm
448
+
449
+ if return_maps:
450
+ return data_rect, norm, xmap, ymap, xwid, ywid
451
+ else:
452
+ return data_rect
453
+
454
+ def _calc_q_coords(self, images, thetas, poni_file):
455
+ """Return a 3D arrays representing the perpendicular and
456
+ parallel components of q relative to the detector surface
457
+ for each pixel in an image for each theta.
458
+ """
459
+ # Third party modules
460
+ from pyFAI import load
461
+
462
+ # Load the PONI file info:
463
+ # PONI coordinates relative to the left bottom detector corner
464
+ # viewed along the beam with the "1" and "2" directions along
465
+ # the detector rows and columns, respectively
466
+ poni = load(poni_file)
467
+ assert poni.get_shape() == images.shape[1:]
468
+ image_dim = poni.get_shape()
469
+ sample_to_detector = poni.dist*1000 # Sample to detector (mm)
470
+ pixel_size = round(poni.pixel1*1000, 3) # Pixel size (mm)
471
+ poni1 = poni.poni1*1000 # Point of normal incidence 1 (mm)
472
+ poni2 = poni.poni2*1000 # Point of normal incidence 2 (mm)
473
+ rot1 = poni.rot1 # Rotational angle 1 (rad)
474
+ rot2 = poni.rot2 # Rotational angle 2 (rad)
475
+ xray_wavevector = 2.e-10*np.pi / poni.wavelength
476
+
477
+ # Pixel locations relative to where the incident beam
478
+ # intersects the detector in the GIWAXS coordinates frame
479
+ pixel_vert_position = (poni1 + sample_to_detector*np.tan(rot2)
480
+ - pixel_size*np.arange(image_dim[0]))
481
+ pixel_hor_position = (pixel_size*np.arange(image_dim[1])
482
+ - poni2 + sample_to_detector*np.tan(rot1))
483
+
484
+ # Deflection angles relative to the incident beam at each
485
+ # pixel location
486
+ delta = np.tile(
487
+ np.arctan(pixel_vert_position/sample_to_detector),
488
+ (image_dim[1],1)).T
489
+ nu = np.tile(
490
+ np.arctan(pixel_hor_position/sample_to_detector),
491
+ (image_dim[0],1))
492
+ sign_nu = 2*(nu>=0)-1
493
+
494
+ # Calculate q_par, q_perp
495
+ # RV: For now use the same q-coords for all thetas, based on
496
+ # the range for the first theta
497
+ # q_par = []
498
+ # q_perp = []
499
+ # for theta in thetas:
500
+ # alpha = np.deg2rad(theta)
501
+ # beta = delta - alpha;
502
+ # cosnu = np.cos(nu);
503
+ # cosb = np.cos(beta);
504
+ # cosa = np.cos(alpha);
505
+ # sina = np.sin(alpha);
506
+ # q_par.append(sign_nu * xray_wavevector * np.sqrt(
507
+ # cosa*cosa + cosb*cosb - 2*cosa*cosb*cosnu))
508
+ # q_perp.append(xray_wavevector*(sina + np.sin(beta)))
509
+ alpha = np.deg2rad(thetas[0])
510
+ beta = delta - alpha;
511
+ cosnu = np.cos(nu);
512
+ cosb = np.cos(beta);
513
+ cosa = np.cos(alpha);
514
+ sina = np.sin(alpha);
515
+ q_par = sign_nu * xray_wavevector * np.sqrt(
516
+ cosa*cosa + cosb*cosb - 2*cosa*cosb*cosnu)
517
+ q_perp = xray_wavevector*(sina + np.sin(beta))
518
+
519
+ return q_par, q_perp
520
+
CHAP/giwaxs/reader.py ADDED
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env python
2
+
3
+ if __name__ == '__main__':
4
+ from CHAP.reader import main
5
+ main()
CHAP/giwaxs/writer.py ADDED
@@ -0,0 +1,5 @@
1
+ #!/usr/bin/env python
2
+
3
+ if __name__ == '__main__':
4
+ from CHAP.writer import main
5
+ main()
CHAP/pipeline.py CHANGED
@@ -42,9 +42,10 @@ class Pipeline():
42
42
  if hasattr(item, 'execute'):
43
43
  self.logger.info(f'Calling "execute" on {item}')
44
44
  data = item.execute(data=data, **kwargs)
45
-
46
45
  self.logger.info(f'Executed "execute" in {time()-t0:.3f} seconds')
47
46
 
47
+ return data
48
+
48
49
 
49
50
  class PipelineData(dict):
50
51
  """Wrapper for all results of PipelineItem.execute"""
@@ -80,21 +81,21 @@ class PipelineItem():
80
81
  return [d['data'] for d in data]
81
82
 
82
83
  def get_config(self, data, schema, remove=True, **kwargs):
83
- """Look through `data` for an item whose value for the
84
+ """Look through `data` for an item whose value for the first
84
85
  `'schema'` key matches `schema`. Convert the value for that
85
86
  item's `'data'` key into the configuration `BaseModel`
86
87
  identified by `schema` and return it.
87
88
 
88
- :param data: input data from a previous `PipelineItem`
89
- :type data: list[PipelineData]
90
- :param schema: name of the `BaseModel` class to match in
91
- `data` & return
89
+ :param data: Input data from a previous `PipelineItem`
90
+ :type data: list[PipelineData].
91
+ :param schema: Name of the `BaseModel` class to match in
92
+ `data` & return,
92
93
  :type schema: str
93
- :param remove: if there is a matching entry in `data`, remove
94
+ :param remove: If there is a matching entry in `data`, remove
94
95
  it from the list, defaults to `True`.
95
96
  :type remove: bool, optional
96
- :raises ValueError: if there's no match for `schema` in `data`
97
- :return: matching configuration model
97
+ :raises ValueError: If there's no match for `schema` in `data`.
98
+ :return: The first matching configuration model.
98
99
  :rtype: BaseModel
99
100
  """
100
101
 
@@ -107,6 +108,7 @@ class PipelineItem():
107
108
  matching_config = d.get('data')
108
109
  if remove:
109
110
  data.pop(i)
111
+ break
110
112
 
111
113
  if not matching_config:
112
114
  raise ValueError(f'No configuration for {schema} found')
@@ -123,6 +125,40 @@ class PipelineItem():
123
125
 
124
126
  return model_config
125
127
 
128
+ def get_data(self, data, name, remove=True):
129
+ """Look through `data` for an item whose value for the first
130
+ `'name'` key matches `name` and return it.
131
+
132
+ :param data: Input data from a previous `PipelineItem`
133
+ :type data: list[PipelineData].
134
+ :param name: Name of the data item to match in `data` & return.
135
+ :type name: str
136
+ :param remove: If there is a matching entry in `data`, remove
137
+ it from the list, defaults to `True`.
138
+ :type remove: bool, optional
139
+ :raises ValueError: If there's no match for `name` in `data`.
140
+ :return: The first matching data item.
141
+ """
142
+
143
+ self.logger.debug(f'Getting {name} data item')
144
+ t0 = time()
145
+
146
+ matching_data = False
147
+ for i, d in enumerate(data):
148
+ if d.get('name') == name:
149
+ matching_data = d.get('data')
150
+ if remove:
151
+ data.pop(i)
152
+ break
153
+
154
+ if not matching_data:
155
+ raise ValueError(f'No match for {name} data item found')
156
+
157
+ self.logger.debug(
158
+ f'Got {name} data in {time()-t0:.3f} seconds')
159
+
160
+ return matching_data
161
+
126
162
  def execute(self, schema=None, **kwargs):
127
163
  """Run the appropriate method of the object and return the
128
164
  result.
@@ -223,6 +259,8 @@ class MultiplePipelineItem(PipelineItem):
223
259
  raise OSError('input directory is not accessible for '
224
260
  f'reading ({inputdir})')
225
261
  args['inputdir'] = inputdir
262
+ # FIX: Right now this can bomb if MultiplePipelineItem
263
+ # is called simultaneously from multiple nodes in MPI
226
264
  if 'outputdir' in item_args:
227
265
  outputdir = os.path.normpath(os.path.join(
228
266
  args['outputdir'], item_args.pop('outputdir')))