ChessAnalysisPipeline 0.0.17.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. CHAP/TaskManager.py +216 -0
  2. CHAP/__init__.py +27 -0
  3. CHAP/common/__init__.py +57 -0
  4. CHAP/common/models/__init__.py +8 -0
  5. CHAP/common/models/common.py +124 -0
  6. CHAP/common/models/integration.py +659 -0
  7. CHAP/common/models/map.py +1291 -0
  8. CHAP/common/processor.py +2869 -0
  9. CHAP/common/reader.py +658 -0
  10. CHAP/common/utils.py +110 -0
  11. CHAP/common/writer.py +730 -0
  12. CHAP/edd/__init__.py +23 -0
  13. CHAP/edd/models.py +876 -0
  14. CHAP/edd/processor.py +3069 -0
  15. CHAP/edd/reader.py +1023 -0
  16. CHAP/edd/select_material_params_gui.py +348 -0
  17. CHAP/edd/utils.py +1572 -0
  18. CHAP/edd/writer.py +26 -0
  19. CHAP/foxden/__init__.py +19 -0
  20. CHAP/foxden/models.py +71 -0
  21. CHAP/foxden/processor.py +124 -0
  22. CHAP/foxden/reader.py +224 -0
  23. CHAP/foxden/utils.py +80 -0
  24. CHAP/foxden/writer.py +168 -0
  25. CHAP/giwaxs/__init__.py +11 -0
  26. CHAP/giwaxs/models.py +491 -0
  27. CHAP/giwaxs/processor.py +776 -0
  28. CHAP/giwaxs/reader.py +8 -0
  29. CHAP/giwaxs/writer.py +8 -0
  30. CHAP/inference/__init__.py +7 -0
  31. CHAP/inference/processor.py +69 -0
  32. CHAP/inference/reader.py +8 -0
  33. CHAP/inference/writer.py +8 -0
  34. CHAP/models.py +227 -0
  35. CHAP/pipeline.py +479 -0
  36. CHAP/processor.py +125 -0
  37. CHAP/reader.py +124 -0
  38. CHAP/runner.py +277 -0
  39. CHAP/saxswaxs/__init__.py +7 -0
  40. CHAP/saxswaxs/processor.py +8 -0
  41. CHAP/saxswaxs/reader.py +8 -0
  42. CHAP/saxswaxs/writer.py +8 -0
  43. CHAP/server.py +125 -0
  44. CHAP/sin2psi/__init__.py +7 -0
  45. CHAP/sin2psi/processor.py +8 -0
  46. CHAP/sin2psi/reader.py +8 -0
  47. CHAP/sin2psi/writer.py +8 -0
  48. CHAP/tomo/__init__.py +15 -0
  49. CHAP/tomo/models.py +210 -0
  50. CHAP/tomo/processor.py +3862 -0
  51. CHAP/tomo/reader.py +9 -0
  52. CHAP/tomo/writer.py +59 -0
  53. CHAP/utils/__init__.py +6 -0
  54. CHAP/utils/converters.py +188 -0
  55. CHAP/utils/fit.py +2947 -0
  56. CHAP/utils/general.py +2655 -0
  57. CHAP/utils/material.py +274 -0
  58. CHAP/utils/models.py +595 -0
  59. CHAP/utils/parfile.py +224 -0
  60. CHAP/writer.py +122 -0
  61. MLaaS/__init__.py +0 -0
  62. MLaaS/ktrain.py +205 -0
  63. MLaaS/mnist_img.py +83 -0
  64. MLaaS/tfaas_client.py +371 -0
  65. chessanalysispipeline-0.0.17.dev3.dist-info/LICENSE +60 -0
  66. chessanalysispipeline-0.0.17.dev3.dist-info/METADATA +29 -0
  67. chessanalysispipeline-0.0.17.dev3.dist-info/RECORD +70 -0
  68. chessanalysispipeline-0.0.17.dev3.dist-info/WHEEL +5 -0
  69. chessanalysispipeline-0.0.17.dev3.dist-info/entry_points.txt +2 -0
  70. chessanalysispipeline-0.0.17.dev3.dist-info/top_level.txt +2 -0
@@ -0,0 +1,776 @@
1
+ #!/usr/bin/env python
2
+ #-*- coding: utf-8 -*-
3
+ """
4
+ File : processor.py
5
+ Author : Rolf Verberg
6
+ Description: Module for Processors used only by GIWAXS experiments
7
+ """
8
+ # System modules
9
+ from json import loads
10
+ import os
11
+
12
+ # Third party modules
13
+ import numpy as np
14
+
15
+ # Local modules
16
+ from CHAP.processor import Processor
17
+
18
+
19
+ class GiwaxsConversionProcessor(Processor):
20
+ """A processor for converting GIWAXS images from curved to
21
+ rectangular coordinates.
22
+ """
23
+ def process(
24
+ self, data, config, save_figures=False, inputdir='.',
25
+ outputdir='.', interactive=False):
26
+ """Process the GIWAXS input images & configuration and returns
27
+ a map of the images in rectangular coordinates as a
28
+ `nexusformat.nexus.NXroot` object.
29
+
30
+ :param data: Results of `common.MapProcessor` containing the
31
+ map of GIWAXS input images.
32
+ :type data: list[PipelineData]
33
+ :param config: Initialization parameters for an instance of
34
+ giwaxs.models.GiwaxsConversionConfig.
35
+ :type config: dict
36
+ :param save_figures: Save .pngs of plots for checking inputs &
37
+ outputs of this Processor, defaults to `False`.
38
+ :type save_figures: bool, optional
39
+ :param inputdir: Input directory, used only if files in the
40
+ input configuration are not absolute paths,
41
+ defaults to `'.'`.
42
+ :type inputdir: str, optional
43
+ :param outputdir: Directory to which any output figures will
44
+ be saved, defaults to `'.'`.
45
+ :type outputdir: str, optional
46
+ :param interactive: Allows for user interactions, defaults to
47
+ `False`.
48
+ :type interactive: bool, optional
49
+ :return: Converted GIWAXS images.
50
+ :rtype: nexusformat.nexus.NXroot
51
+ """
52
+ # Third party modules
53
+ from nexusformat.nexus import (
54
+ NXentry,
55
+ NXroot,
56
+ )
57
+
58
+ # Load the detector data
59
+ try:
60
+ nxobject = self.get_data(data)
61
+ if isinstance(nxobject, NXroot):
62
+ nxroot = nxobject
63
+ elif isinstance(nxobject, NXentry):
64
+ nxroot = NXroot()
65
+ nxroot[nxobject.nxname] = nxobject
66
+ nxobject.set_default()
67
+ else:
68
+ raise ValueError(
69
+ f'Invalid nxobject in data pipeline ({type(nxobject)}')
70
+ except Exception as exc:
71
+ raise RuntimeError(
72
+ 'No valid detector data in input pipeline data') from exc
73
+
74
+ # Load the validated GIWAXS conversion configuration
75
+ giwaxs_config = self.get_config(
76
+ data=data, config=config, inputdir=inputdir,
77
+ schema='giwaxs.models.GiwaxsConversionConfig')
78
+
79
+ return self.convert_q_rect(
80
+ nxroot, giwaxs_config, save_figures=save_figures,
81
+ interactive=interactive, outputdir=outputdir)
82
+
83
+ def convert_q_rect(
84
+ self, nxroot, config, save_figures=False, interactive=False,
85
+ outputdir='.'):
86
+ """Return NXroot containing the converted GIWAXS images.
87
+
88
+ :param nxroot: GIWAXS map with the raw detector data.
89
+ :type nxroot: nexusformat.nexus.NXroot
90
+ :param config: GIWAXS conversion configuration.
91
+ :type config: CHAP.giwaxs.models.GiwaxsConversionConfig
92
+ :param save_figures: Save .pngs of plots for checking inputs &
93
+ outputs of this Processor, defaults to `False`.
94
+ :type save_figures: bool, optional
95
+ :param interactive: Allows for user interactions, defaults to
96
+ `False`.
97
+ :type interactive: bool, optional
98
+ :param outputdir: Directory to which any output figures will
99
+ be saved, defaults to `'.'`.
100
+ :type outputdir: str, optional
101
+ :return: Converted GIWAXS images.
102
+ :rtype: nexusformat.nexus.NXroot
103
+ """
104
+ # Third party modules
105
+ if interactive or save_figures:
106
+ import matplotlib.pyplot as plt
107
+ from nexusformat.nexus import (
108
+ NXdata,
109
+ NXfield,
110
+ NXprocess,
111
+ )
112
+
113
+ # Add the NXprocess object to the NXroot
114
+ nxprocess = NXprocess()
115
+ try:
116
+ nxroot[f'{nxroot.default}_converted'] = nxprocess
117
+ except Exception:
118
+ # Local imports
119
+ from CHAP.utils.general import nxcopy
120
+
121
+ # Copy nxroot if nxroot is read as read-only
122
+ nxroot = nxcopy(nxroot)
123
+ nxroot[f'{nxroot.default}_converted'] = nxprocess
124
+ nxprocess.conversion_config = config.model_dump_json()
125
+
126
+ # Validate the azimuthal integrators and independent dimensions
127
+ nxentry = nxroot[nxroot.default]
128
+ nxdata = nxentry[nxentry.default]
129
+ ais = config.azimuthal_integrators
130
+ if len(ais) > 1:
131
+ raise RuntimeError(
132
+ 'More than one azimuthal integrator not yet implemented')
133
+ if ais[0].get_id() not in nxdata:
134
+ raise RuntimeError('Unable to find detector data for '
135
+ f'{ais[0].get_id()} in {nxentry.tree}')
136
+ if not isinstance(nxdata.attrs['axes'], str):
137
+ raise RuntimeError(
138
+ 'More than one independent dimension not yet implemented')
139
+
140
+ # Collect the raw giwaxs images
141
+ if config.scan_step_indices is None:
142
+ thetas = nxdata[nxdata.attrs['axes']]
143
+ giwaxs_data = nxdata[ais[0].get_id()]
144
+ else:
145
+ thetas = nxdata[nxdata.attrs['axes']][config.scan_step_indices]
146
+ giwaxs_data = nxdata[ais[0].get_id()][config.scan_step_indices]
147
+ self.logger.debug(f'giwaxs_data.shape: {giwaxs_data.shape}')
148
+ effective_map_shape = giwaxs_data.shape[:-2]
149
+ self.logger.debug(f'effective_map_shape: {effective_map_shape}')
150
+ image_dims = giwaxs_data.shape[1:]
151
+ self.logger.debug(f'image_dims: {image_dims}')
152
+
153
+ # Get the components of q parallel and perpendicular to the
154
+ # detector
155
+ q_par, q_perp = self._calc_q_coords(giwaxs_data, thetas, ais[0].ai)
156
+
157
+ # Get the range of the perpendicular component of q and that
158
+ # of the parallel one at near grazing incidence as well as
159
+ # the corresponding rectangular grid with the same dimensions
160
+ # as the detector grid and converted to this grid from the
161
+ # (q_par, q_perp)-grid
162
+ # RV: For now use the same q-coords for all thetas, based on
163
+ # the range for the first theta
164
+ q_perp_min_index = np.argmin(np.abs(q_perp[:,0]))
165
+ q_par_rect = np.linspace(
166
+ q_par[q_perp_min_index,:].min(),
167
+ q_par[q_perp_min_index,:].max(), image_dims[1])
168
+ q_perp_rect = np.linspace(
169
+ q_perp.min(), q_perp.max(), image_dims[0])
170
+ giwaxs_data_rect = []
171
+ # q_par_rect = []
172
+ # q_perp_rect = []
173
+ for i in range(len(thetas)):
174
+ # q_perp_min_index = np.argmin(np.abs(q_perp[i,:,0]))
175
+ # q_par_rect.append(np.linspace(
176
+ # q_par[i,q_perp_min_index,:].min(),
177
+ # q_par[i,q_perp_min_index,:].max(), image_dims[1]))
178
+ # q_perp_rect.append(np.linspace(
179
+ # q_perp[i].min(), q_perp[i].max(), image_dims[0]))
180
+ # giwaxs_data_rect.append(
181
+ # GiwaxsConversionProcessor.curved_to_rect(
182
+ # giwaxs_data[i], q_par[i], q_perp[i], q_par_rect[i],
183
+ # q_perp_rect[i]))
184
+ giwaxs_data_rect.append(
185
+ GiwaxsConversionProcessor.curved_to_rect(
186
+ giwaxs_data[i], q_par, q_perp, q_par_rect,
187
+ q_perp_rect))
188
+
189
+ if interactive or save_figures:
190
+ vmax = giwaxs_data[i].max()/10
191
+ fig, ax = plt.subplots(1,2, figsize=(10, 5))
192
+ ax[1].imshow(
193
+ giwaxs_data_rect[i],
194
+ vmin=0, vmax=vmax,
195
+ origin='lower',
196
+ extent=(q_par_rect.min(), q_par_rect.max(),
197
+ q_perp_rect.min(), q_perp_rect.max()))
198
+ ax[1].set_aspect('equal')
199
+ ax[1].set_title('Transformed Image')
200
+ ax[1].set_xlabel(r'q$_\parallel$'+' [\u212b$^{-1}$]')
201
+ ax[1].set_ylabel(r'q$_\perp$'+' [\u212b$^{-1}$]')
202
+ im = ax[0].imshow(giwaxs_data[i], vmin=0, vmax=vmax)
203
+ ax[0].set_aspect('equal')
204
+ lhs = ax[0].get_position().extents
205
+ rhs = ax[1].get_position().extents
206
+ ax[0].set_position(
207
+ (lhs[0], rhs[1], rhs[2] - rhs[0], rhs[3] - rhs[1]))
208
+ ax[0].set_title('Raw Image')
209
+ ax[0].set_xlabel('column index')
210
+ ax[0].set_ylabel('row index')
211
+ fig.subplots_adjust(right=0.85)
212
+ cbar_ax = fig.add_axes([0.9, 0.15, 0.025, 0.7])
213
+ fig.colorbar(im, cax=cbar_ax)
214
+ if interactive:
215
+ plt.show()
216
+ if save_figures:
217
+ if config.scan_step_indices is None:
218
+ fig.savefig(os.path.join(outputdir, 'converted'))
219
+ else:
220
+ fig.savefig(os.path.join(
221
+ outputdir,
222
+ f'converted_{config.scan_step_indices[i]}'))
223
+ plt.close()
224
+
225
+ # Create the NXdata object with the converted images
226
+ if False: #RV len(thetas) == 1:
227
+ nxprocess.data = NXdata(
228
+ NXfield(np.asarray(giwaxs_data_rect[0]), 'converted'),
229
+ (NXfield(
230
+ q_perp_rect[0], 'q_perp_rect',
231
+ attrs={'units': '\u212b$^{-1}$'}),
232
+ NXfield(
233
+ q_par_rect[0], 'q_par_rect',
234
+ attrs={'units': '\u212b$^{-1}$'})))
235
+ nxprocess.data.theta = NXfield(
236
+ thetas[0], 'thetas', attrs={'units': 'rad'})
237
+ else:
238
+ nxprocess.data = NXdata(
239
+ NXfield(np.asarray(giwaxs_data_rect), 'converted'),
240
+ (NXfield(
241
+ thetas, 'thetas', attrs={'units': 'rad'}),
242
+ NXfield(
243
+ q_perp_rect, 'q_perp_rect',
244
+ attrs={'units': '\u212b$^{-1}$'}),
245
+ NXfield(
246
+ q_par_rect, 'q_par_rect',
247
+ attrs={'units': '\u212b$^{-1}$'})))
248
+ nxprocess.default = 'data'
249
+
250
+ return nxroot
251
+
252
+ @staticmethod
253
+ def curved_to_rect(
254
+ data_curved, q_par, q_perp, q_par_rect, q_perp_rect,
255
+ return_maps=False, normalize=True):
256
+ """
257
+ data_rect = curved_to_rect(...):
258
+ distributes counts from a curvilinear grid (data_curved),
259
+ e.g. x-ray data collected in angular space, into a
260
+ rectilinear grid (reciprocal space).
261
+
262
+ data_rect, norm, xmap, ymap, xwid, ywid =
263
+ curved_to_rect(..., return_maps=True):
264
+ distributes counts from a curvilinear grid (data_curved),
265
+ e.g. x-ray data collected in angular space, into a
266
+ rectilinear grid (reciprocal space).
267
+
268
+ q_par, q_perp, and data_curved are M x N following the normal
269
+ convention where the the first & second index corrspond to
270
+ the vertical (y) and horizontal (x) locations of the
271
+ scattering pattern.
272
+ q_par, q_perp represent the q coordinates of the center of
273
+ pixels whose intensities are stored in data_curved.
274
+ Reiterating the convention above, q_par and q_perp vary
275
+ primarilly along the 2nd and 1st index, respectively.
276
+ q_par_rect and q_perp_rect are evenly-spaced, monotonically
277
+ increasing, arrays determining the new grid.
278
+
279
+ data_rect : the new matrix with intensity from data_curved
280
+ disctributed into a regular grid defined by
281
+ q_par_rect, q_perp_rect.
282
+ norm : a matrix with the same shape of data_rect representing
283
+ the area of the pixel in the original angular units.
284
+ It should be used to normalize the resulting array as
285
+ norm_z = data_rect / norm.
286
+
287
+ Algorithm:
288
+ Step 1 : Compute xmap, ymap, which containt the values of
289
+ q_par and q_perp, but represented in pixel units of
290
+ the target coordinates q_par_rect, q_perp_rect.
291
+ In other words, xmap(i,j) = 3.4 means that
292
+ q_par(i,j) lands 2/5 of the q_distance between
293
+ q_par_rect(3) and q_par_rect(4). Intensity in
294
+ qpar(i,j) should thus be distributed in a 2:3 ratio
295
+ among neighboring mini-columns of pixels 3 and 4.
296
+ Step 2 : Use the procedure described by Barna et al
297
+ (RSI v.70, p. 2927, 1999) to distribute intensity
298
+ from each source pixel i,j into each of 9
299
+ destination pixels around the xmap(i,j) and
300
+ ymap(i,j). Keep track of how many source "pixels"
301
+ are placed into each bin in the variable, "norm".
302
+ Note also that if xmap(i,j)-floor(xmap(i,j)) > 0.5,
303
+ the "center" pixel of the 9 destination pixels is
304
+ floor(xmap+0.5).
305
+ (Outside this function): The normalized intensity in each
306
+ new pixel can be obtained asI = data_rect./norm, but
307
+ with the caveat that zero values of "norm" should be
308
+ changed to ones first, norm(data_rect == 0) = 1.0.
309
+
310
+ Example Usage:
311
+ 1. Compute the values of q_par and q_perp for each pixel in
312
+ the image z (according to scattering geometry).
313
+ 2. Set or determing a good target grid, e.g.:
314
+ min_qpar, max_qpar = q_par.mix(), q_par.max()
315
+ min_qperp, max_qperp = q_perp.mix(), q_perp.max()
316
+ q_par_rect, q_par_step = np.linspace(min_qpar ,
317
+ max_qpar, image_dim[1], retstep=True)
318
+ q_perp_rect, q_perp_step = np.linspace(min_qperp,
319
+ max_qperp, image_dim[0], retstep=True)
320
+ 3. data_rect = curved_to_rect(data_curved, q_par, q_perp,
321
+ q_par_rect, q_perp_rect)
322
+ 4. plt.imshow(data_rect, extent = [
323
+ q_par_rect[0], q_par_rect[-1],
324
+ q_perp_rect[-1], q_perp_rect[0]])
325
+ xlabel(r'q$_\parallel$'' [\u212b$^{-1}$]')
326
+ ylabel(r'q$_\perp$'' [\u212b$^{-1}$]')
327
+ """
328
+ out_width, out_height = q_par_rect.size, q_perp_rect.size
329
+
330
+ # Check correct dimensionality
331
+ dims = data_curved.shape
332
+ assert q_par.shape == dims and q_perp.shape == dims
333
+
334
+ data_rect = np.zeros((out_height, out_width))
335
+ norm = np.zeros_like(data_rect)
336
+
337
+ rect_width = q_par_rect[1] - q_par_rect[0]
338
+ rect_height = q_perp_rect[1]- q_perp_rect[0]
339
+ q_par_rect_shift = q_par_rect - rect_width/2.0
340
+ q_perp_rect_shift = q_perp_rect - rect_height/2.0
341
+
342
+ # Precompute source pixels that are outside the target area
343
+ out_of_bounds = (
344
+ (q_par < q_par_rect_shift[0])
345
+ | (q_par > q_par_rect_shift[-1] + rect_width)
346
+ | (q_perp < q_perp_rect_shift[0])
347
+ | (q_perp > q_perp_rect_shift[-1] + rect_height))
348
+
349
+ # Vectorize the search for where q_par[i, j] and q_perp[i,j]
350
+ # fall on the grid formed by q_par_rect and q_perp_rect
351
+ #
352
+ # 1. Expand q_par_rect_shift (a vector) such that
353
+ # q_par_rect_shift_cube[i. j, :] is identical to
354
+ # q_par_rect_shift, and is a rising sequence of values of
355
+ # qpar
356
+ # 2. Expand q_par such that qpar_cube[i, j, :] all correspond
357
+ # to the value q_par[i, j].
358
+ # - Note that I found tile first and used that in once case but
359
+ # not the other. I think broadcast_to should likely be used
360
+ # for both. But in both cases, it seemed to be easiest or
361
+ # only possible if the extra dimensions were leading dims,
362
+ # not trailing. That is the reason for the use of transpose
363
+ # in qpar_cube.
364
+ q_par_rect_shift_cube = np.tile(q_par_rect_shift, q_par.shape + (1,))
365
+ qpar_cube = np.transpose(np.broadcast_to(
366
+ q_par, ((len(q_par_rect_shift),) + q_par.shape)), (1,2,0))
367
+ q_perp_rect_shift_cube = np.tile(
368
+ q_perp_rect_shift, q_perp.shape + (1,))
369
+ qperp_cube = np.transpose(np.broadcast_to(
370
+ q_perp, ((len(q_perp_rect_shift),) + q_perp.shape)), (1,2,0))
371
+
372
+ # We want the index of the highest q_par_rect_shift that is
373
+ # still below qpar, whereas the argmax # operation yields the
374
+ # first q_par_rect_shift that is above qpar, We subtract 1 to
375
+ # take care of this and then correct for any negative indices
376
+ # to 0.
377
+ highpx_x = np.argmax(qpar_cube < q_par_rect_shift_cube, axis=2) - 1
378
+ highpx_y = np.argmax(qperp_cube < q_perp_rect_shift_cube, axis=2) - 1
379
+ highpx_x[highpx_x < 0] = 0
380
+ highpx_y[highpx_y < 0] = 0
381
+
382
+ # Compute xmap and ymap
383
+ xmap = np.where(
384
+ out_of_bounds, np.nan,
385
+ highpx_x - 0.5 + (q_par - q_par_rect_shift[highpx_x]) / rect_width)
386
+ ymap = np.where(
387
+ out_of_bounds, np.nan,
388
+ highpx_y - 0.5
389
+ + (q_perp - q_perp_rect_shift[highpx_y]) / rect_height)
390
+
391
+ # Optionally, print out-of-bounds pixels
392
+ if np.any(out_of_bounds):
393
+ print(f'Warning: Found {out_of_bounds.sum()} source pixels that '
394
+ 'are outside the target bounding box')
395
+ # out_of_bounds_indices = np.transpose(np.where(out_of_bounds))
396
+ # for i, j in out_of_bounds_indices:
397
+ # print(f'pixel {i}, {j} is out of bounds...skip')
398
+
399
+ x1 = np.floor(xmap + 0.5).astype(int)
400
+ y1 = np.floor(ymap + 0.5).astype(int)
401
+
402
+ # Compute the effective size of each source pixel (for
403
+ # comparison with target pixels)
404
+ xwid = np.abs(np.diff(xmap, axis=1))
405
+ ywid = np.abs(np.diff(ymap, axis=0))
406
+
407
+ # Prepend xwid, ywid with their first column, row
408
+ # (respectively) to match shape with xmap, ymap
409
+ xwid = np.insert(xwid, 0, xwid[:,0], axis=1)
410
+ ywid = np.insert(ywid, 0, ywid[0,:], axis=0)
411
+
412
+ # Compute mapping of source pixel to up to 9 closest pixels,
413
+ # after Barna (1999)
414
+ col = np.zeros((3,)+xmap.shape)
415
+ row = np.zeros((3,)+xmap.shape)
416
+ col[0,:,:] = np.where(
417
+ 0.5 - (xmap - x1 + 0.5)/xwid > 0.0,
418
+ 0.5 - (xmap - x1 + 0.5)/xwid, 0.0)
419
+ col[2,:,:] = np.where(
420
+ 0.5 + (xmap - x1 - 0.5)/xwid > 0.0,
421
+ 0.5 + (xmap - x1 - 0.5)/xwid, 0.0)
422
+ col[1,:,:] = 1.0 - col[0,:,:] - col[2,:,:]
423
+ row[0,:,:] = np.where(
424
+ 0.5 - (ymap - y1 + 0.5)/ywid > 0.0,
425
+ 0.5 - (ymap - y1 + 0.5)/ywid , 0.0)
426
+ row[2,:,:] = np.where(
427
+ 0.5 + (ymap - y1 - 0.5)/ywid > 0.0,
428
+ 0.5 + (ymap - y1 - 0.5)/ywid, 0.0)
429
+ row[1,:,:] = 1.0 - row[0,:,:] - row[2,:,:]
430
+
431
+ for k in (-1, 0, 1):
432
+ for m in (-1, 0, 1):
433
+ source_indices = ((x1+k > -1) & (x1+k < out_width) &
434
+ (y1+m > -1) & (y1+m < out_height))
435
+ x1_sub = x1[source_indices]+k
436
+ y1_sub = y1[source_indices]+m
437
+
438
+ np.add.at(data_rect, (y1_sub, x1_sub),
439
+ data_curved[source_indices] *
440
+ col[k+1, source_indices] *
441
+ row[m+1, source_indices])
442
+ np.add.at(norm, (y1_sub, x1_sub),
443
+ col[k+1, source_indices] * row[m+1, source_indices])
444
+ # The following fails because although
445
+ # [y1_sub, x1_sub] can refer to the same location more
446
+ # than once, the "+=" operation acts on the original
447
+ # value and does not have knowledge of incremental
448
+ # changes.
449
+ # data_rect[y1_sub, x1_sub] += (data_curved[source_indices]
450
+ # * col[k+1, source_indices] * row[m+1, source_indices])
451
+ # norm[y1_sub, x1_sub] += \
452
+ # col[k+1, source_indices] * row[m+1, source_indices]
453
+
454
+ if normalize:
455
+ norm[norm == 0] = 1.0
456
+ data_rect /= norm
457
+
458
+ if return_maps:
459
+ return data_rect, norm, xmap, ymap, xwid, ywid
460
+ return data_rect
461
+
462
+ def _calc_q_coords(self, images, thetas, ai):
463
+ """Return a 3D arrays representing the perpendicular and
464
+ parallel components of q relative to the detector surface
465
+ for each pixel in an image for each theta.
466
+
467
+ :param images: GIWAXS images.
468
+ :type images: numpy.ndarray
469
+ :param thetas: Image theta values.
470
+ :type thetas: numpy.ndarray
471
+ :param ai: Azimuthal integrator.
472
+ :type ai: pyFAI.azimuthalIntegrator.AzimuthalIntegrator
473
+ :return: Perpendicular and parallel components of q relative
474
+ to the detector surface.
475
+ :rtype: numpy.ndarray, numpy.ndarray
476
+ """
477
+ # Load the PONI file info:
478
+ # PONI coordinates relative to the left bottom detector corner
479
+ # viewed along the beam with the "1" and "2" directions along
480
+ # the detector rows and columns, respectively
481
+ assert ai.get_shape() == images.shape[1:]
482
+ image_dim = ai.get_shape()
483
+ sample_to_detector = ai.dist*1000 # Sample to detector (mm)
484
+ pixel_size = round(ai.pixel1*1000, 3) # Pixel size (mm)
485
+ poni1 = ai.poni1*1000 # Point of normal incidence 1 (mm)
486
+ poni2 = ai.poni2*1000 # Point of normal incidence 2 (mm)
487
+ rot1 = ai.rot1 # Rotational angle 1 (rad)
488
+ rot2 = ai.rot2 # Rotational angle 2 (rad)
489
+ xray_wavevector = 2.e-10*np.pi / ai.wavelength
490
+
491
+ # Pixel locations relative to where the incident beam
492
+ # intersects the detector in the GIWAXS coordinates frame
493
+ pixel_vert_position = (poni1 + sample_to_detector*np.tan(rot2)
494
+ - pixel_size*np.arange(image_dim[0]))
495
+ pixel_hor_position = (pixel_size*np.arange(image_dim[1])
496
+ - poni2 + sample_to_detector*np.tan(rot1))
497
+
498
+ # Deflection angles relative to the incident beam at each
499
+ # pixel location
500
+ delta = np.tile(
501
+ np.arctan(pixel_vert_position/sample_to_detector),
502
+ (image_dim[1],1)).T
503
+ nu = np.tile(
504
+ np.arctan(pixel_hor_position/sample_to_detector),
505
+ (image_dim[0],1))
506
+ sign_nu = 2*(nu>=0)-1
507
+
508
+ # Calculate q_par, q_perp
509
+ # RV: For now use the same q-coords for all thetas, based on
510
+ # the range for the first theta
511
+ # q_par = []
512
+ # q_perp = []
513
+ # for theta in thetas:
514
+ # alpha = np.deg2rad(theta)
515
+ # beta = delta - alpha;
516
+ # cosnu = np.cos(nu);
517
+ # cosb = np.cos(beta);
518
+ # cosa = np.cos(alpha);
519
+ # sina = np.sin(alpha);
520
+ # q_par.append(sign_nu * xray_wavevector * np.sqrt(
521
+ # cosa*cosa + cosb*cosb - 2*cosa*cosb*cosnu))
522
+ # q_perp.append(xray_wavevector*(sina + np.sin(beta)))
523
+ alpha = np.deg2rad(thetas[0])
524
+ beta = delta - alpha
525
+ cosnu = np.cos(nu)
526
+ cosb = np.cos(beta)
527
+ cosa = np.cos(alpha)
528
+ sina = np.sin(alpha)
529
+ q_par = sign_nu * xray_wavevector * np.sqrt(
530
+ cosa*cosa + cosb*cosb - 2*cosa*cosb*cosnu)
531
+ q_perp = xray_wavevector*(sina + np.sin(beta))
532
+
533
+ return q_par, q_perp
534
+
535
+
536
+ class PyfaiIntegrationProcessor(Processor):
537
+ """A processor for azimuthally integrating images."""
538
+ def process(self, data, config, inputdir='.'):
539
+ """Process the input images & configuration and return a map of
540
+ the azimuthally integrated images.
541
+
542
+ :param data: Results of `common.MapProcessor` or other suitable
543
+ preprocessor of the raw detector data containing the map of
544
+ input images.
545
+ :type data: list[PipelineData]
546
+ :param config: Initialization parameters for an instance of
547
+ giwaxs.models.PyfaiIntegrationConfig.
548
+ :type config: dict
549
+ :param inputdir: Input directory, used only if files in the
550
+ input configuration are not absolute paths,
551
+ defaults to `'.'`.
552
+ :type inputdir: str, optional
553
+ :return: Integrated images.
554
+ :rtype: nexusformat.nexus.NXroot
555
+ """
556
+ # Third party modules
557
+ import fabio
558
+ from nexusformat.nexus import (
559
+ NXdata,
560
+ NXentry,
561
+ NXfield,
562
+ NXprocess,
563
+ NXroot,
564
+ nxsetconfig,
565
+ )
566
+ from pyFAI.gui.utils.units import Unit
567
+
568
+ # Local imports
569
+ from CHAP.utils.general import nxcopy
570
+
571
+ nxsetconfig(memory=100000)
572
+
573
+ # Load the detector data
574
+ try:
575
+ nxobject = self.get_data(data)
576
+ if isinstance(nxobject, NXroot):
577
+ nxroot = nxobject
578
+ elif isinstance(nxobject, NXentry):
579
+ nxroot = NXroot()
580
+ nxroot[nxobject.nxname] = nxobject
581
+ nxobject.set_default()
582
+ else:
583
+ raise ValueError(
584
+ f'Invalid nxobject in data pipeline ({type(nxobject)}')
585
+ except Exception as exc:
586
+ raise RuntimeError(
587
+ 'No valid detector data in input pipeline data') from exc
588
+
589
+ # Load the validated integration configuration
590
+ config = self.get_config(
591
+ data=data, config=config, inputdir=inputdir,
592
+ schema='giwaxs.models.PyfaiIntegrationConfig')
593
+
594
+ # Validate the azimuthal integrator configuration and check
595
+ # against the input data (availability and shape)
596
+ data = {}
597
+ independent_dims = {}
598
+ try:
599
+ nxprocess_converted = nxroot[f'{nxroot.default}_converted']
600
+ conversion_config = loads(
601
+ str(nxprocess_converted.conversion_config))
602
+ converted_ais = conversion_config['azimuthal_integrators']
603
+ if len(converted_ais) > 1:
604
+ raise RuntimeError(
605
+ 'More than one detector not yet implemented')
606
+ if config.azimuthal_integrators is None:
607
+ # Local modules
608
+ from CHAP.giwaxs.models import AzimuthalIntegratorConfig
609
+
610
+ config.azimuthal_integrators = [AzimuthalIntegratorConfig(
611
+ **converted_ais[0])]
612
+ else:
613
+ converted_ids = [ai['id'] for ai in converted_ais]
614
+ skipped_detectors = []
615
+ ais = []
616
+ for ai in config.azimuthal_integrators:
617
+ if ai.get_id() in converted_ids:
618
+ ais.append(ai)
619
+ else:
620
+ skipped_detectors.append(ai.get_id())
621
+ if skipped_detectors:
622
+ self.logger.warning(
623
+ f'Skipping detector(s) {skipped_detectors} '
624
+ '(no converted data)')
625
+ if not ais:
626
+ raise RuntimeError(
627
+ 'No matching azimuthal integrators found')
628
+ config.azimuthal_integrators = ais
629
+ nxdata = nxprocess_converted.data
630
+ axes = nxdata.attrs['axes']
631
+ if len(nxdata.attrs['axes']) != 3:
632
+ raise RuntimeError('More than one independent dimension '
633
+ 'not yet implemented')
634
+ axes = axes[0]
635
+ independent_dims[config.azimuthal_integrators[0].get_id()] = \
636
+ nxcopy(nxdata[axes])
637
+ data[config.azimuthal_integrators[0].get_id()] = np.flip(
638
+ nxdata.converted.nxdata, axis=1)
639
+ except Exception as exc:
640
+ experiment_type = loads(
641
+ str(nxroot[nxroot.default].map_config))['experiment_type']
642
+ if experiment_type == 'GIWAXS':
643
+ self.logger.warning(
644
+ 'No converted data found, use raw data for integration')
645
+ nxentry = nxroot[nxroot.default]
646
+ detector_ids = [
647
+ #str(id, 'utf-8') for id in nxentry.detector_ids.nxdata]
648
+ str(id) for id in nxentry.detector_ids.nxdata]
649
+ if len(detector_ids) > 1:
650
+ raise RuntimeError(
651
+ 'More than one detector not yet implemented') from exc
652
+ if config.azimuthal_integrators is None:
653
+ raise ValueError(
654
+ 'Missing azimuthal_integrators parameter in '
655
+ f'PyfaiIntegrationProcessor.config ({config})') from exc
656
+ nxdata = nxentry[nxentry.default]
657
+ skipped_detectors = []
658
+ ais = []
659
+ for ai in config.azimuthal_integrators:
660
+ if ai.get_id() in nxdata:
661
+ if nxdata[ai.get_id()].ndim != 3:
662
+ raise RuntimeError(
663
+ 'Inconsistent raw data dimension '
664
+ f'{nxdata[ai.get_id()].ndim}') from exc
665
+ ais.append(ai)
666
+ else:
667
+ skipped_detectors.append(ai.get_id())
668
+ if skipped_detectors:
669
+ self.logger.warning('Skipping detector(s) '
670
+ f'{skipped_detectors} (no raw data)')
671
+ if not ais:
672
+ raise RuntimeError(
673
+ 'No matching raw detector data found') from exc
674
+ config.azimuthal_integrators = ais
675
+ if 'unstructured_axes' in nxdata.attrs:
676
+ axes = nxdata.attrs['unstructured_axes']
677
+ independent_dims[ais[0].get_id()] = [
678
+ nxcopy(nxdata[a]) for a in axes]
679
+ elif 'axes' in nxdata.attrs:
680
+ axes = nxdata.attrs['axes']
681
+ independent_dims[ais[0].get_id()] = nxcopy(nxdata[axes])
682
+ else:
683
+ self.logger.warning('Unable to find independent_dimensions')
684
+ data[ais[0].get_id()] = nxdata[ais[0].get_id()]
685
+
686
+ # Select the images to integrate
687
+ if False and config.scan_step_indices is not None:
688
+ #FIX
689
+ independent_dims = independent_dims[config.scan_step_indices]
690
+ data = data[config.scan_step_indices]
691
+ self.logger.debug(
692
+ f'data shape(s): {[(k, v.shape) for k, v in data.items()]}')
693
+ if config.sum_axes:
694
+ data = {k:np.sum(v.nxdata, axis=0)[None,:,:]
695
+ for k, v in data.items()}
696
+ self.logger.debug('data shape(s) after summing: '
697
+ f'{[(k, v.shape) for k, v in data.items()]}')
698
+
699
+ # Read the mask(s)
700
+ masks = {}
701
+ for ai in config.azimuthal_integrators:
702
+ self.logger.debug(f'Reading {ai.mask_file}')
703
+ try:
704
+ with fabio.open(ai.mask_file) as f:
705
+ mask = f.data
706
+ self.logger.debug(
707
+ f'mask shape for {ai.get_id()}: {mask.shape}')
708
+ masks[ai.get_id()] = mask
709
+ except Exception:
710
+ self.logger.debug('No mask file found for {ai.get_id()}')
711
+ if not masks:
712
+ masks = None
713
+
714
+ # Perform integration(s)
715
+ ais = {ai.get_id(): ai.ai for ai in config.azimuthal_integrators}
716
+ for integration in config.integrations:
717
+
718
+ # Add a NXprocess object(s) to the NXroot
719
+ nxprocess = NXprocess()
720
+ try:
721
+ nxroot[f'{nxroot.default}_{integration.name}'] = nxprocess
722
+ except Exception:
723
+ # Copy nxroot if nxroot is read as read-only
724
+ nxroot = nxcopy(nxroot)
725
+ nxroot[f'{nxroot.default}_{integration.name}'] = nxprocess
726
+ nxprocess.integration_config = integration.model_dump_json()
727
+ nxprocess.azimuthal_integrators = [
728
+ ai.model_dump_json() for ai in config.azimuthal_integrators]
729
+
730
+ # Integrate the data
731
+ results = integration.integrate(ais, data, masks)
732
+
733
+ # Create the NXdata object with the integrated data
734
+ intensities = results['intensities']
735
+ if config.sum_axes:
736
+ coords = []
737
+ elif isinstance(axes, str):
738
+ coords = [v for k, v in independent_dims.items() if k in ais]
739
+ else:
740
+ coords = [i for k, v in independent_dims.items()
741
+ for i in v if k in ais]
742
+ if ('azimuthal' in results
743
+ and results['azimuthal']['unit'] == 'chi_deg'):
744
+ chi = results['azimuthal']['coords']
745
+ if integration.right_handed:
746
+ chi = -np.flip(chi)
747
+ intensities = np.flip(intensities, (len(coords)))
748
+ coords.append(NXfield(chi, 'chi', attrs={'units': 'deg'}))
749
+ if results['radial']['unit'] == 'q_A^-1':
750
+ unit = Unit.INV_ANGSTROM.symbol
751
+ coords.append(
752
+ NXfield(
753
+ results['radial']['coords'], 'q',
754
+ attrs={'units': unit}))
755
+ else:
756
+ coords.append(
757
+ NXfield(
758
+ results['radial']['coords'], 'r'))#,
759
+ # attrs={'units': '\u212b'}))
760
+ self.logger.warning(
761
+ f'Unknown radial unit: {results["radial"]["unit"]}')
762
+ nxdata = NXdata(NXfield(intensities, 'integrated'), tuple(coords))
763
+ if not isinstance(axes, str):
764
+ nxdata.attrs['unstructured_axes'] = nxdata.attrs['axes'][:-1]
765
+ del nxdata.attrs['axes']
766
+ nxprocess.data = nxdata
767
+ nxprocess.default = 'data'
768
+
769
+ return nxroot
770
+
771
+
772
+ if __name__ == '__main__':
773
+ # Local modules
774
+ from CHAP.processor import main
775
+
776
+ main()