ChessAnalysisPipeline 0.0.17.dev3__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. CHAP/TaskManager.py +216 -0
  2. CHAP/__init__.py +27 -0
  3. CHAP/common/__init__.py +57 -0
  4. CHAP/common/models/__init__.py +8 -0
  5. CHAP/common/models/common.py +124 -0
  6. CHAP/common/models/integration.py +659 -0
  7. CHAP/common/models/map.py +1291 -0
  8. CHAP/common/processor.py +2869 -0
  9. CHAP/common/reader.py +658 -0
  10. CHAP/common/utils.py +110 -0
  11. CHAP/common/writer.py +730 -0
  12. CHAP/edd/__init__.py +23 -0
  13. CHAP/edd/models.py +876 -0
  14. CHAP/edd/processor.py +3069 -0
  15. CHAP/edd/reader.py +1023 -0
  16. CHAP/edd/select_material_params_gui.py +348 -0
  17. CHAP/edd/utils.py +1572 -0
  18. CHAP/edd/writer.py +26 -0
  19. CHAP/foxden/__init__.py +19 -0
  20. CHAP/foxden/models.py +71 -0
  21. CHAP/foxden/processor.py +124 -0
  22. CHAP/foxden/reader.py +224 -0
  23. CHAP/foxden/utils.py +80 -0
  24. CHAP/foxden/writer.py +168 -0
  25. CHAP/giwaxs/__init__.py +11 -0
  26. CHAP/giwaxs/models.py +491 -0
  27. CHAP/giwaxs/processor.py +776 -0
  28. CHAP/giwaxs/reader.py +8 -0
  29. CHAP/giwaxs/writer.py +8 -0
  30. CHAP/inference/__init__.py +7 -0
  31. CHAP/inference/processor.py +69 -0
  32. CHAP/inference/reader.py +8 -0
  33. CHAP/inference/writer.py +8 -0
  34. CHAP/models.py +227 -0
  35. CHAP/pipeline.py +479 -0
  36. CHAP/processor.py +125 -0
  37. CHAP/reader.py +124 -0
  38. CHAP/runner.py +277 -0
  39. CHAP/saxswaxs/__init__.py +7 -0
  40. CHAP/saxswaxs/processor.py +8 -0
  41. CHAP/saxswaxs/reader.py +8 -0
  42. CHAP/saxswaxs/writer.py +8 -0
  43. CHAP/server.py +125 -0
  44. CHAP/sin2psi/__init__.py +7 -0
  45. CHAP/sin2psi/processor.py +8 -0
  46. CHAP/sin2psi/reader.py +8 -0
  47. CHAP/sin2psi/writer.py +8 -0
  48. CHAP/tomo/__init__.py +15 -0
  49. CHAP/tomo/models.py +210 -0
  50. CHAP/tomo/processor.py +3862 -0
  51. CHAP/tomo/reader.py +9 -0
  52. CHAP/tomo/writer.py +59 -0
  53. CHAP/utils/__init__.py +6 -0
  54. CHAP/utils/converters.py +188 -0
  55. CHAP/utils/fit.py +2947 -0
  56. CHAP/utils/general.py +2655 -0
  57. CHAP/utils/material.py +274 -0
  58. CHAP/utils/models.py +595 -0
  59. CHAP/utils/parfile.py +224 -0
  60. CHAP/writer.py +122 -0
  61. MLaaS/__init__.py +0 -0
  62. MLaaS/ktrain.py +205 -0
  63. MLaaS/mnist_img.py +83 -0
  64. MLaaS/tfaas_client.py +371 -0
  65. chessanalysispipeline-0.0.17.dev3.dist-info/LICENSE +60 -0
  66. chessanalysispipeline-0.0.17.dev3.dist-info/METADATA +29 -0
  67. chessanalysispipeline-0.0.17.dev3.dist-info/RECORD +70 -0
  68. chessanalysispipeline-0.0.17.dev3.dist-info/WHEEL +5 -0
  69. chessanalysispipeline-0.0.17.dev3.dist-info/entry_points.txt +2 -0
  70. chessanalysispipeline-0.0.17.dev3.dist-info/top_level.txt +2 -0
CHAP/edd/reader.py ADDED
@@ -0,0 +1,1023 @@
1
+ #!/usr/bin/env python
2
+ """EDD specific readers."""
3
+
4
+ # System modules
5
+ import os
6
+ from typing import Optional
7
+
8
+ # Third party modules
9
+ # pylint: disable=import-error
10
+ from chess_scanparsers import SMBMCAScanParser as ScanParser
11
+ # pylint: enable=import-error
12
+ import numpy as np
13
+ from pydantic import (
14
+ conint,
15
+ conlist,
16
+ constr,
17
+ field_validator,
18
+ )
19
+
20
+ # Local modules
21
+ from CHAP.reader import Reader
22
+ from CHAP.common.models.map import DetectorConfig
23
+
24
+
25
+ class EddMapReader(Reader):
26
+ """Reader for taking an EDD-style .par file and returning a
27
+ `MapConfig` representing one of the datasets in the
28
+ file. Independent dimensions are determined automatically, and a
29
+ specific set of items to use for extra scalar datasets to include
30
+ are hard-coded in. The raw data is read if detector_names are
31
+ specified.
32
+
33
+ :ivar scan_numbers: List of scan numbers to use.
34
+ :type scan_numbers: Union(int, list[int], str), optional
35
+ :ivar dataset_id: Dataset ID value in the .par file to return as a
36
+ map, defaults to `1`.
37
+ :type dataset_id: int, optional
38
+ """
39
+ scan_numbers: Optional[
40
+ conlist(item_type=conint(gt=0), min_length=1)] = None
41
+ dataset_id: Optional[conint(ge=1)] = 1
42
+
43
+ @field_validator('scan_numbers', mode='before')
44
+ @classmethod
45
+ def validate_scan_numbers(cls, scan_numbers):
46
+ """Validate the specified list of scan numbers.
47
+
48
+ :param scan_numbers: List of scan numbers.
49
+ :type scan_numbers: Union(int, list[int], str)
50
+ :raises ValueError: If a specified scan number is not found in
51
+ the SPEC file.
52
+ :return: List of scan numbers.
53
+ :rtype: list[int]
54
+ """
55
+ if isinstance(scan_numbers, int):
56
+ scan_numbers = [scan_numbers]
57
+ elif isinstance(scan_numbers, str):
58
+ # Local modules
59
+ from CHAP.utils.general import string_to_list
60
+
61
+ scan_numbers = string_to_list(scan_numbers)
62
+ return scan_numbers
63
+
64
+ def read(self):
65
+ """Return a validated `MapConfig` object representing an EDD
66
+ dataset.
67
+
68
+ :returns: Map configuration.
69
+ :rtype: PipelineData
70
+ """
71
+ # Local modules
72
+ from CHAP.common.models.map import MapConfig
73
+ from CHAP.utils.general import (
74
+ list_to_string,
75
+ )
76
+ from CHAP.utils.parfile import ParFile
77
+
78
+ parfile = ParFile(self.filename, scan_numbers=self.scan_numbers)
79
+ self.logger.debug(f'spec_file: {parfile.spec_file}')
80
+
81
+ attrs = {}
82
+
83
+ # Get list of scan numbers for the dataset
84
+ try:
85
+ dataset_ids = parfile.get_values('dataset_id')
86
+ dataset_rows = np.argwhere(np.where(
87
+ np.asarray(dataset_ids) == self.dataset_id, 1, 0)).flatten()
88
+ except (TypeError, ValueError):
89
+ dataset_rows = np.arange(len(parfile.scan_numbers))
90
+ attrs['dataset_id'] = 1
91
+ scan_nos = [parfile.data[i][parfile.scann_i] for i in dataset_rows
92
+ if parfile.data[i][parfile.scann_i] in
93
+ parfile.good_scan_numbers()]
94
+ if not scan_nos:
95
+ raise RuntimeError('Unable to find scans with dataset_id '
96
+ f'matching {self.dataset_id}')
97
+ self.logger.debug(f'Scan numbers: {list_to_string(scan_nos)}')
98
+ spec_scans = [
99
+ {'spec_file': parfile.spec_file, 'scan_numbers': scan_nos}]
100
+
101
+ # Get scan type for this dataset
102
+ try:
103
+ scan_types = parfile.get_values('scan_type', scan_numbers=scan_nos)
104
+ if any([st != scan_types[0] for st in scan_types]):
105
+ raise RuntimeError(
106
+ 'Only one scan type per dataset is suported.')
107
+ scan_type = scan_types[0]
108
+ except ValueError as e:
109
+ # Third party modules
110
+ # pylint: disable=import-error
111
+ from chess_scanparsers import SMBScanParser
112
+
113
+ scanparser = SMBScanParser(parfile.spec_file, scan_nos[0])
114
+ if scanparser.spec_macro == 'tseries':
115
+ scan_type = 0
116
+ else:
117
+ raise RuntimeError('Old style par files not supported for '
118
+ 'spec_macro != tseries') from e
119
+ attrs['scan_type'] = scan_type
120
+ self.logger.debug(f'Scan type: {scan_type}')
121
+
122
+ # Based on scan type, get independent_dimensions for the map
123
+ # Start by adding labx, laby, labz, and omega. Any "extra"
124
+ # dimensions will be sqeezed out of the map later.
125
+ independent_dimensions = [
126
+ {'label': 'labx', 'units': 'mm', 'data_type': 'smb_par',
127
+ 'name': 'labx'},
128
+ {'label': 'laby', 'units': 'mm', 'data_type': 'smb_par',
129
+ 'name': 'laby'},
130
+ {'label': 'labz', 'units': 'mm', 'data_type': 'smb_par',
131
+ 'name': 'labz'},
132
+ {'label': 'ometotal', 'units': 'degrees',
133
+ 'data_type': 'smb_par', 'name': 'ometotal'},
134
+ ]
135
+ scalar_data = []
136
+ if scan_type != 0:
137
+ self.logger.warning(
138
+ 'Assuming all fly axes parameters are identical for all scans')
139
+ attrs['fly_axis_labels'] = []
140
+ axes_labels = {
141
+ 1: 'fly_labx', 2: 'fly_laby', 3: 'fly_labz', 4: 'fly_ometotal'}
142
+ axes_units = {1: 'mm', 2: 'mm', 3: 'mm', 4: 'degrees'}
143
+ axes_added = []
144
+ scanparser = ScanParser(parfile.spec_file, scan_nos[0])
145
+ def add_fly_axis(fly_axis_index):
146
+ """Add the fly axis info."""
147
+ if fly_axis_index in axes_added:
148
+ return
149
+ fly_axis_key = scanparser.pars[f'fly_axis{fly_axis_index}']
150
+ independent_dimensions.append({
151
+ 'label': axes_labels[fly_axis_key],
152
+ 'data_type': 'spec_motor',
153
+ 'units': axes_units[fly_axis_key],
154
+ 'name': scanparser.spec_scan_motor_mnes[fly_axis_index],
155
+ })
156
+ axes_added.append(fly_axis_index)
157
+ attrs['fly_axis_labels'].append(axes_labels[fly_axis_key])
158
+ add_fly_axis(0)
159
+ if scan_type in (2, 3, 5):
160
+ add_fly_axis(1)
161
+ if scan_type == 5:
162
+ scalar_data.append({
163
+ 'label': 'bin_axis', 'units': 'n/a',
164
+ 'data_type': 'smb_par', 'name': 'bin_axis',
165
+ })
166
+ attrs['bin_axis_label'] = axes_labels[
167
+ scanparser.pars['bin_axis']].replace('fly_', '')
168
+
169
+ # Add in the usual extra scalar data maps for EDD
170
+ scalar_data.append({
171
+ 'label': 'SCAN_N', 'units': 'n/a', 'data_type': 'smb_par',
172
+ 'name': 'SCAN_N',
173
+ })
174
+ if 'rsgap_size' in parfile.column_names:
175
+ scalar_data.append({
176
+ 'label': 'rsgap_size', 'units': 'mm',
177
+ 'data_type': 'smb_par', 'name': 'rsgap_size',
178
+ })
179
+ if 'x_effective' in parfile.column_names:
180
+ scalar_data.append({
181
+ 'label': 'x_effective', 'units': 'mm',
182
+ 'data_type': 'smb_par', 'name': 'x_effective',
183
+ })
184
+ if 'z_effective' in parfile.column_names:
185
+ scalar_data.append({
186
+ 'label': 'z_effective', 'units': 'mm',
187
+ 'data_type': 'smb_par', 'name': 'z_effective',
188
+ })
189
+
190
+ # Construct and validate the initial map config dictionary
191
+ scanparser = ScanParser(parfile.spec_file, scan_nos[0])
192
+ map_config_dict = {
193
+ 'title': f'{scanparser.scan_name}_dataset{self.dataset_id}',
194
+ 'station': 'id1a3',
195
+ 'experiment_type': 'EDD',
196
+ 'sample': {'name': scanparser.scan_name},
197
+ 'spec_scans': spec_scans,
198
+ 'independent_dimensions': independent_dimensions,
199
+ 'scalar_data': scalar_data,
200
+ 'presample_intensity': {
201
+ 'name': 'a3ic1',
202
+ 'data_type': 'scan_column'},
203
+ 'postsample_intensity': {
204
+ 'name': 'diode',
205
+ 'data_type': 'scan_column'},
206
+ 'dwell_time_actual': {
207
+ 'name': 'sec',
208
+ 'data_type': 'scan_column'},
209
+ 'attrs': attrs,
210
+ }
211
+ map_config_dict = MapConfig(**map_config_dict)
212
+
213
+ # Add lab coordinates to the map's scalar_data only if they
214
+ # are NOT already one of the sqeezed map's
215
+ # independent_dimensions.
216
+ lab_dims = [
217
+ {'label': 'labx', 'units': 'mm', 'data_type': 'smb_par',
218
+ 'name': 'labx'},
219
+ {'label': 'laby', 'units': 'mm', 'data_type': 'smb_par',
220
+ 'name': 'laby'},
221
+ {'label': 'labz', 'units': 'mm', 'data_type': 'smb_par',
222
+ 'name': 'labz'},
223
+ {'label': 'ometotal', 'units': 'degrees',
224
+ 'data_type': 'smb_par', 'name': 'ometotal'},
225
+ ]
226
+ for dim in lab_dims:
227
+ if dim not in independent_dimensions:
228
+ scalar_data.append(dim)
229
+
230
+ # Convert list of scan_numbers to string notation
231
+ scan_numbers = map_config_dict.spec_scans[0].scan_numbers
232
+ map_config_dict.spec_scans[0].scan_numbers = list_to_string(
233
+ scan_numbers)
234
+
235
+ return map_config_dict.model_dump()
236
+
237
+
238
+ class EddMPIMapReader(Reader):
239
+ """Reader for taking an EDD-style .par file and returning a
240
+ representing one of the datasets in the file as a NeXus NXentry
241
+ object. Independent dimensions are determined automatically, and a
242
+ specific set of items to use for extra scalar datasets to include
243
+ are hard-coded in.
244
+
245
+ :ivar dataset_id: Dataset ID value in the .par file to return as a
246
+ map, defaults to `1`.
247
+ :type dataset_id: int, optional
248
+ :ivar detector_ids: Detector IDs for the raw data.
249
+ :type detector_ids: Union(int, list[int], str)
250
+ """
251
+ dataset_id: Optional[conint(ge=1)] = 1
252
+ detector_ids: conlist(item_type=conint(gt=0), min_length=1)
253
+
254
+ @field_validator('detector_ids', mode='before')
255
+ @classmethod
256
+ def validate_detector_ids(cls, detector_ids):
257
+ """Validate the specified list of detector IDs.
258
+
259
+ :param detector_ids: Detector IDs.
260
+ :type detector_ids: Union(int, list[int], str)
261
+ :return: List of Detector IDs.
262
+ :rtype: list[int]
263
+ """
264
+ if isinstance(detector_ids, int):
265
+ detector_ids = [detector_ids]
266
+ elif isinstance(detector_ids, str):
267
+ # Local modules
268
+ from CHAP.utils.general import string_to_list
269
+
270
+ detector_ids = string_to_list(detector_ids)
271
+ return detector_ids
272
+
273
+ def read(self):
274
+ """Return a NeXus NXentry object after validating the
275
+ `MapConfig` object representing an EDD dataset.
276
+
277
+ :returns: The EDD map including the raw data packaged.
278
+ :rtype: PipelineData
279
+ """
280
+ # Third party modules
281
+ # pylint: disable=no-name-in-module
282
+ from nexusformat.nexus import (
283
+ NXcollection,
284
+ NXdata,
285
+ NXentry,
286
+ NXfield,
287
+ NXsample,
288
+ )
289
+ # pylint: enable=no-name-in-module
290
+
291
+ # Local modules
292
+ from CHAP.common.models.map import MapConfig
293
+ from CHAP.utils.parfile import ParFile
294
+
295
+ parfile = ParFile(self.filename)
296
+ self.logger.debug(f'spec_file: {parfile.spec_file}')
297
+
298
+ # Get list of scan numbers for the dataset
299
+ dataset_ids = np.asarray(parfile.get_values('dataset_id'))
300
+ dataset_rows = np.argwhere(np.where(
301
+ np.asarray(dataset_ids) == self.dataset_id, 1, 0)).flatten()
302
+ scan_nos = [parfile.data[i][parfile.scann_i] for i in dataset_rows
303
+ if parfile.data[i][parfile.scann_i] in
304
+ parfile.good_scan_numbers()]
305
+ self.logger.debug(f'Scan numbers: {scan_nos}')
306
+ spec_scans = [
307
+ {'spec_file': parfile.spec_file, 'scan_numbers': scan_nos}]
308
+
309
+ # Get scan type for this dataset
310
+ scan_types = parfile.get_values('scan_type', scan_numbers=scan_nos)
311
+ if any([st != scan_types[0] for st in scan_types]):
312
+ msg = 'Only one scan type per dataset is suported.'
313
+ self.logger.error(msg)
314
+ raise ValueError(msg)
315
+ scan_type = scan_types[0]
316
+ self.logger.debug(f'Scan type: {scan_type}')
317
+
318
+ # Based on scan type, get independent_dimensions for the map
319
+ # Start by adding labx, laby, labz, and omega. Any "extra"
320
+ # dimensions will be sqeezed out of the map later.
321
+ independent_dimensions = [
322
+ {'label': 'labx', 'units': 'mm', 'data_type': 'smb_par',
323
+ 'name': 'labx'},
324
+ {'label': 'laby', 'units': 'mm', 'data_type': 'smb_par',
325
+ 'name': 'laby'},
326
+ {'label': 'labz', 'units': 'mm', 'data_type': 'smb_par',
327
+ 'name': 'labz'},
328
+ {'label': 'ometotal', 'units': 'degrees',
329
+ 'data_type': 'smb_par', 'name': 'ometotal'},
330
+ ]
331
+ scalar_data = []
332
+ attrs = {}
333
+ if scan_type != 0:
334
+ self.logger.warning(
335
+ 'Assuming all fly axes parameters are identical for all scans')
336
+ attrs['fly_axis_labels'] = []
337
+ axes_labels = {
338
+ 1: 'fly_labx', 2: 'fly_laby', 3: 'fly_labz', 4: 'fly_ometotal'}
339
+ axes_units = {1: 'mm', 2: 'mm', 3: 'mm', 4: 'degrees'}
340
+ axes_added = []
341
+ scanparser = ScanParser(parfile.spec_file, scan_nos[0])
342
+ def add_fly_axis(fly_axis_index):
343
+ """Add the fly axis info."""
344
+ if fly_axis_index in axes_added:
345
+ return
346
+ fly_axis_key = scanparser.pars[f'fly_axis{fly_axis_index}']
347
+ independent_dimensions.append({
348
+ 'label': axes_labels[fly_axis_key],
349
+ 'data_type': 'spec_motor',
350
+ 'units': axes_units[fly_axis_key],
351
+ 'name': scanparser.spec_scan_motor_mnes[fly_axis_index],
352
+ })
353
+ axes_added.append(fly_axis_index)
354
+ attrs['fly_axis_labels'].append(axes_labels[fly_axis_key])
355
+ add_fly_axis(0)
356
+ if scan_type in (2, 3, 5):
357
+ add_fly_axis(1)
358
+ if scan_type == 5:
359
+ scalar_data.append({
360
+ 'label': 'bin_axis', 'units': 'n/a',
361
+ 'data_type': 'smb_par', 'name': 'bin_axis',
362
+ })
363
+ attrs['bin_axis_label'] = axes_labels[
364
+ scanparser.pars['bin_axis']].replace('fly_', '')
365
+
366
+ # Add in the usual extra scalar data maps for EDD
367
+ scalar_data.extend([
368
+ {'label': 'SCAN_N', 'units': 'n/a', 'data_type': 'smb_par',
369
+ 'name': 'SCAN_N'},
370
+ {'label': 'rsgap_size', 'units': 'mm',
371
+ 'data_type': 'smb_par', 'name': 'rsgap_size'},
372
+ {'label': 'x_effective', 'units': 'mm',
373
+ 'data_type': 'smb_par', 'name': 'x_effective'},
374
+ {'label': 'z_effective', 'units': 'mm',
375
+ 'data_type': 'smb_par', 'name': 'z_effective'},
376
+ ])
377
+
378
+ # Construct and validate the initial map config dictionary
379
+ scanparser = ScanParser(parfile.spec_file, scan_nos[0])
380
+ map_config_dict = {
381
+ 'title': f'{scanparser.scan_name}_dataset{self.dataset_id}',
382
+ 'station': 'id1a3',
383
+ 'experiment_type': 'EDD',
384
+ 'sample': {'name': scanparser.scan_name},
385
+ 'spec_scans': spec_scans,
386
+ 'independent_dimensions': independent_dimensions,
387
+ 'scalar_data': scalar_data,
388
+ 'presample_intensity': {
389
+ 'name': 'a3ic1',
390
+ 'data_type': 'scan_column'},
391
+ 'postsample_intensity': {
392
+ 'name': 'diode',
393
+ 'data_type': 'scan_column'},
394
+ 'dwell_time_actual': {
395
+ 'name': 'sec',
396
+ 'data_type': 'scan_column'},
397
+ 'attrs': attrs,
398
+ }
399
+ map_config = MapConfig(**map_config_dict)
400
+
401
+ # Squeeze out extraneous independent dimensions (dimensions
402
+ # along which data were taken at only one unique coordinate
403
+ # value)
404
+ while 1 in map_config.shape:
405
+ remove_dim_index = map_config.shape.index(1)
406
+ self.logger.debug(
407
+ 'Map dimensions: '
408
+ + str([dim["label"] for dim in independent_dimensions]))
409
+ self.logger.debug(f'Map shape: {map_config.shape}')
410
+ self.logger.debug(
411
+ 'Sqeezing out independent dimension '
412
+ f'{independent_dimensions[remove_dim_index]["label"]}')
413
+ independent_dimensions.pop(remove_dim_index)
414
+ map_config = MapConfig(**map_config_dict)
415
+ self.logger.debug(
416
+ 'Map dimensions: '
417
+ + str([dim["label"] for dim in independent_dimensions]))
418
+ self.logger.debug(f'Map shape: {map_config.shape}')
419
+
420
+ # Add lab coordinates to the map's scalar_data only if they
421
+ # are NOT already one of the sqeezed map's
422
+ # independent_dimensions.
423
+ lab_dims = [
424
+ {'label': 'labx', 'units': 'mm', 'data_type': 'smb_par',
425
+ 'name': 'labx'},
426
+ {'label': 'laby', 'units': 'mm', 'data_type': 'smb_par',
427
+ 'name': 'laby'},
428
+ {'label': 'labz', 'units': 'mm', 'data_type': 'smb_par',
429
+ 'name': 'labz'},
430
+ {'label': 'ometotal', 'units': 'degrees',
431
+ 'data_type': 'smb_par', 'name': 'ometotal'},
432
+ ]
433
+ for dim in lab_dims:
434
+ if dim not in independent_dimensions:
435
+ scalar_data.append(dim)
436
+
437
+ # Set up NXentry and add misc. CHESS-specific metadata
438
+ nxentry = NXentry(name=map_config.title)
439
+ nxentry.attrs['station'] = map_config.station
440
+ nxentry.map_config = map_config.model_dump_json()
441
+ nxentry.spec_scans = NXcollection()
442
+ for scans in map_config.spec_scans:
443
+ nxentry.spec_scans[scans.scanparsers[0].scan_name] = NXfield(
444
+ value=scans.scan_numbers,
445
+ attrs={'spec_file': str(scans.spec_file)})
446
+
447
+ # Add sample metadata
448
+ nxentry[map_config.sample.name] = NXsample(
449
+ **map_config.sample.model_dump())
450
+
451
+ # Set up default data group
452
+ nxentry.data = NXdata()
453
+ independent_dimensions = map_config.independent_dimensions
454
+ for dim in independent_dimensions:
455
+ nxentry.data[dim.label] = NXfield(
456
+ units=dim.units,
457
+ attrs={'long_name': f'{dim.label} ({dim.units})',
458
+ 'data_type': dim.data_type,
459
+ 'local_name': dim.name})
460
+
461
+ # Read the raw data and independent dimensions
462
+ data = [[] for _ in self.detector_ids]
463
+ dims = [[] for _ in independent_dimensions]
464
+ for scans in map_config.spec_scans:
465
+ for scan_number in scans.scan_numbers:
466
+ scanparser = scans.get_scanparser(scan_number)
467
+ for i, detector_id in enumerate(self.detector_ids):
468
+ ddata = scanparser.get_detector_data(detector_id)
469
+ data[i].append(ddata)
470
+ for i, dim in enumerate(independent_dimensions):
471
+ dims[i].append(dim.get_value(
472
+ scans, scan_number, scan_step_index=-1, relative=True))
473
+
474
+ return map_config_dict
475
+
476
+
477
+ class ScanToMapReader(Reader):
478
+ """Reader for turning a single SPEC scan into a MapConfig.
479
+
480
+ :param scan_number: Number of the SPEC scan.
481
+ :type scan_number: int
482
+ """
483
+ scan_number: conint(ge=0)
484
+
485
+ def read(self):
486
+ """Return a dictionary representing a valid map configuration
487
+ consisting of the single SPEC scan specified.
488
+
489
+ :returns: Map configuration dictionary.
490
+ :rtype: dict
491
+ """
492
+ scanparser = ScanParser(self.filename, self.scan_number)
493
+
494
+ if (scanparser.spec_macro in ('tseries', 'loopscan') or
495
+ (scanparser.spec_macro == 'flyscan' and
496
+ not len(scanparser.spec_args) == 5)):
497
+ independent_dimensions = [{
498
+ 'label': 'Time', 'units': 'seconds',
499
+ 'data_type': 'scan_column', 'name': 'Time',
500
+ }]
501
+ else:
502
+ independent_dimensions = [
503
+ {'label': mne, 'units': 'unknown units',
504
+ 'data_type': 'spec_motor', 'name': mne}
505
+ for mne in scanparser.spec_scan_motor_mnes]
506
+
507
+ map_config_dict = {
508
+ 'title': f'{scanparser.scan_name}_{self.scan_number:03d}',
509
+ 'station': 'id1a3',
510
+ 'experiment_type': 'EDD',
511
+ 'sample': {'name': scanparser.scan_name},
512
+ 'spec_scans': [{
513
+ 'spec_file': self.filename,
514
+ 'scan_numbers': [self.scan_number]}],
515
+ 'independent_dimensions': independent_dimensions,
516
+ 'presample_intensity': {
517
+ 'name': 'a3ic1',
518
+ 'data_type': 'scan_column'},
519
+ 'postsample_intensity': {
520
+ 'name': 'diode',
521
+ 'data_type': 'scan_column'},
522
+ 'dwell_time_actual': {
523
+ 'name': 'sec',
524
+ 'data_type': 'scan_column'},
525
+ }
526
+
527
+ return map_config_dict
528
+
529
+
530
+ class SetupNXdataReader(Reader):
531
+ """Reader for converting the SPEC input .txt file for EDD dataset
532
+ collection to an approporiate input argument for
533
+ `CHAP.common.SetupNXdataProcessor`.
534
+
535
+ :ivar dataset_id: Dataset ID value in the .txt file to return
536
+ `CHAP.common.SetupNXdataProcessor.process` arguments for.
537
+ :type dataset_id: int
538
+ :ivar detectors: Detector list.
539
+ :type detectors: Union[
540
+ list[dict], CHAP.common.models.map.DetectorConfig]
541
+ """
542
+ dataset_id: conint(ge=1)
543
+ detectors: DetectorConfig
544
+
545
+ @field_validator('detectors', mode='before')
546
+ @classmethod
547
+ def validate_detectors(cls, detectors):
548
+ """Validate the specified list of detectors.
549
+
550
+ :param detectors: Detectors list.
551
+ :type detectors: list[CHAP.common.models.map.Detector]
552
+ :return: Detectors list.
553
+ :rtype: list[CHAP.common.models.map.Detector]
554
+ """
555
+ if detectors is None:
556
+ detectors = [{'id': i} for i in range(23)]
557
+ return DetectorConfig(detectors=detectors)
558
+
559
+ def read(self):
560
+ """Return a dictionary containing the `coords`, `signals`, and
561
+ `attrs` arguments appropriate for use with
562
+ `CHAP.common.SetupNXdataProcessor.process` to set up an
563
+ initial `NXdata` object representing a complete and organized
564
+ structured EDD dataset.
565
+
566
+ :returns: The dataset's coordinate names, values, attributes,
567
+ and signal names, shapes, and attributes.
568
+ :rtype: dict
569
+ """
570
+ # Columns in input .txt file:
571
+ # 0: scan number
572
+ # 1: dataset index
573
+ # 2: configuration descriptor
574
+ # 3: labx
575
+ # 4: laby
576
+ # 5: labz
577
+ # 6: omega (reference)
578
+ # 7: omega (offset)
579
+ # 8: dwell time
580
+ # 9: beam width
581
+ # 10: beam height
582
+ # 11: detector slit gap width
583
+ # 12: scan type
584
+
585
+ # Following columns used only for scan types 1 and up and
586
+ # specify flyscan/flymesh parameters.
587
+ # 13 + 4n: scan direction axis index
588
+ # 14 + 4n: lower bound
589
+ # 15 + 4n: upper bound
590
+ # 16 + 4n: no. points
591
+ # (For scan types 1, 4: n = 0)
592
+ # (For scan types 2, 3, 5: n = 0 or 1)
593
+
594
+ # For scan type 5 only:
595
+ # 21: bin axis
596
+
597
+ # Parse dataset from the input .txt file.
598
+ with open(self.filename, 'r') as f:
599
+ file_lines = f.readlines()
600
+ dataset_lines = []
601
+ for l in file_lines:
602
+ vals = l.split()
603
+ for i, v in enumerate(vals):
604
+ try:
605
+ vals[i] = int(v)
606
+ except ValueError:
607
+ try:
608
+ vals[i] = float(v)
609
+ except ValueError:
610
+ pass
611
+ if vals[1] == self.dataset_id:
612
+ dataset_lines.append(vals)
613
+
614
+ # Start inferring coords and signals lists for EDD experiments
615
+ self.logger.warning(
616
+ 'Assuming the following parameters are identical across the '
617
+ 'entire dataset: scan type, configuration descriptor')
618
+ scan_type = dataset_lines[0][12]
619
+ self.logger.debug(f'scan_type = {scan_type}')
620
+ # Set up even the potential "coordinates" (labx, laby, labz,
621
+ # ometotal) as "signals" because we want to force
622
+ # common.SetupNXdataProcessor to set up all EDD datasets as
623
+ # UNstructured with a single actual coordinate
624
+ # (dataset_pont_index).
625
+ signals = [
626
+ {'name': 'labx', 'shape': [], 'dtype': 'float64',
627
+ 'attrs': {'units': 'mm', 'local_name': 'labx',
628
+ 'data_type': 'smb_par'}},
629
+ {'name': 'laby', 'shape': [], 'dtype': 'float64',
630
+ 'attrs': {'units': 'mm', 'local_name': 'laby',
631
+ 'data_type': 'smb_par'}},
632
+ {'name': 'labz', 'shape': [], 'dtype': 'float64',
633
+ 'attrs': {'units': 'mm', 'local_name': 'labz',
634
+ 'data_type': 'smb_par'}},
635
+ {'name': 'ometotal', 'shape': [], 'dtype': 'float64',
636
+ 'attrs': {'units': 'degrees', 'local_name': 'ometotal',
637
+ 'data_type': 'smb_par'}},
638
+ {'name': 'presample_intensity', 'shape': [], 'dtype': 'uint64',
639
+ 'attrs': {'units': 'counts', 'local_name': 'a3ic1',
640
+ 'data_type': 'scan_column'}},
641
+ {'name': 'postsample_intensity', 'shape': [], 'dtype': 'uint64',
642
+ 'attrs': {'units': 'counts', 'local_name': 'diode',
643
+ 'data_type': 'scan_column'}},
644
+ {'name': 'dwell_time_actual', 'shape': [], 'dtype': 'float64',
645
+ 'attrs': {'units': 'seconds', 'local_name': 'sec',
646
+ 'data_type': 'scan_column'}},
647
+ {'name': 'SCAN_N', 'shape': [], 'dtype': 'uint8',
648
+ 'attrs': {'units': 'n/a', 'local_name': 'SCAN_N',
649
+ 'data_type': 'smb_par'}},
650
+ {'name': 'rsgap_size', 'shape': [], 'dtype': 'float64',
651
+ 'attrs': {'units': 'mm', 'local_name': 'rsgap_size',
652
+ 'data_type': 'smb_par'}},
653
+ {'name': 'x_effective', 'shape': [], 'dtype': 'float64',
654
+ 'attrs': {'units': 'mm', 'local_name': 'x_effective',
655
+ 'data_type': 'smb_par'}},
656
+ {'name': 'z_effective', 'shape': [], 'dtype': 'float64',
657
+ 'attrs': {'units': 'mm', 'local_name': 'z_effective',
658
+ 'data_type': 'smb_par'}},
659
+ ]
660
+
661
+ # Add each MCA channel to the list of signals
662
+ for d in self.detectors:
663
+ signals.append(
664
+ {'name': d.get_id(), 'attrs': d.attrs, 'dtype': 'uint64',
665
+ 'shape': d.attrs.get('shape', (4096,))})
666
+
667
+ # Attributes to attach for use by edd.StrainAnalysisProcessor:
668
+ attrs = {'dataset_id': self.dataset_id,
669
+ 'config_id': dataset_lines[0][2],
670
+ 'scan_type': scan_type,
671
+ 'unstructured_axes': ['labx', 'laby', 'labz', 'ometotal']}
672
+
673
+ # Append additional fly_* signals depending on the scan type
674
+ # of the dataset. Also find the number of points / scan.
675
+ if scan_type == 0:
676
+ scan_npts = 1
677
+ fly_axis_values = None
678
+ else:
679
+ self.logger.warning(
680
+ 'Assuming scan parameters are identical for all scans.')
681
+ axes_labels = {1: 'fly_labx', 2: 'fly_laby', 3: 'fly_labz',
682
+ 4: 'fly_ometotal'}
683
+ axes_units = {1: 'mm', 2: 'mm', 3: 'mm', 4: 'degrees'}
684
+ signals.append({
685
+ 'name': axes_labels[dataset_lines[0][13]],
686
+ 'attrs': {'units': axes_units[dataset_lines[0][13]],
687
+ 'relative': True},
688
+ 'shape': [], 'dtype': 'float64'
689
+ })
690
+ scan_npts = dataset_lines[0][16]
691
+ fly_axis_labels = [axes_labels[dataset_lines[0][13]]]
692
+ fly_axis_values = {fly_axis_labels[0]:
693
+ np.round(np.linspace(
694
+ dataset_lines[0][14], dataset_lines[0][15],
695
+ dataset_lines[0][16]), 3)}
696
+ scan_shape = (len(fly_axis_values[fly_axis_labels[0]]),)
697
+ if scan_type in (2, 3, 5):
698
+ signals.append({
699
+ 'name': axes_labels[dataset_lines[0][17]],
700
+ 'attrs': {'units': axes_units[dataset_lines[0][17]],
701
+ 'relative': True},
702
+ 'shape': [], 'dtype': 'float64'
703
+ })
704
+ scan_npts *= dataset_lines[0][20]
705
+ if scan_type == 5:
706
+ attrs['bin_axis'] = axes_labels[dataset_lines[0][21]]
707
+ fly_axis_labels.append(axes_labels[dataset_lines[0][17]])
708
+ fly_axis_values[fly_axis_labels[-1]] = np.round(
709
+ np.linspace(dataset_lines[0][18], dataset_lines[0][19],
710
+ dataset_lines[0][20]), 3)
711
+ scan_shape = (*scan_shape,
712
+ len(fly_axis_values[fly_axis_labels[-1]]))
713
+ attrs['fly_axis_labels'] = fly_axis_labels
714
+ attrs['unstructured_axes'].extend(fly_axis_labels)
715
+
716
+ # Set up the single unstructured dataset coordinate
717
+ dataset_npts = len(dataset_lines) * scan_npts
718
+ coords = [{'name': 'dataset_point_index',
719
+ 'values': list(range(dataset_npts)),
720
+ 'attrs': {'units': 'n/a'}}]
721
+
722
+ # Set up the list of data_points to fill out the known values
723
+ # of the physical "coordinates"
724
+ data_points = []
725
+ for i in range(dataset_npts):
726
+ l = dataset_lines[i // scan_npts]
727
+ data_point = {
728
+ 'dataset_point_index': i,
729
+ 'labx': l[3], 'laby': l[4], 'labz': l[5],
730
+ 'ometotal': l[6] + l[7]}
731
+ if fly_axis_values:
732
+ scan_step_index = i % scan_npts
733
+ scan_steps = np.ndindex(scan_shape[::-1])
734
+ ii = 0
735
+ while ii <= scan_step_index:
736
+ scan_step = next(scan_steps)
737
+ ii += 1
738
+ scan_step_indices = scan_step[::-1]
739
+ for iii, (k, v) in enumerate(fly_axis_values.items()):
740
+ data_point[k] = v[scan_step_indices[iii]]
741
+ data_points.append(data_point)
742
+
743
+ return {'coords': coords, 'signals': signals,
744
+ 'attrs': attrs, 'data_points': data_points}
745
+
746
+ class SliceNXdataReader(Reader):
747
+ """A reader class to load and slice an NXdata field from a NeXus
748
+ file. This class reads EDD (Energy Dispersive Diffraction) data
749
+ from an NXdata group and slices all fields according to the
750
+ provided slicing parameters.
751
+
752
+ :param scan_number: Number of the SPEC scan.
753
+ :type scan_number: int
754
+ """
755
+ scan_number: conint(ge=0)
756
+
757
+ def read(self):
758
+ """Reads an NXdata group from a NeXus file and slices the
759
+ fields within it based on the provided scan number.
760
+
761
+ :raises ValueError: If no NXdata group is found in the file.
762
+ :return: The root object of the NeXus file with sliced NXdata
763
+ fields.
764
+ :rtype: NXroot
765
+ """
766
+ # Third party modules
767
+ from nexusformat.nexus import NXentry, NXfield
768
+
769
+ # Local modules
770
+ from CHAP.common import NexusReader
771
+ from CHAP.utils.general import nxcopy
772
+
773
+ reader = NexusReader(**self.model_dump())
774
+ nxroot = nxcopy(reader.read())
775
+ nxdata = None
776
+ for nxname, nxobject in nxroot.items():
777
+ if isinstance(nxobject, NXentry):
778
+ nxdata = nxobject.data
779
+ if nxdata is None:
780
+ msg = 'Could not find NXdata group'
781
+ self.logger.error(msg)
782
+ raise ValueError(msg)
783
+
784
+ indices = np.argwhere(
785
+ nxdata.SCAN_N.nxdata == self.scan_number).flatten()
786
+ for nxname, nxobject in nxdata.items():
787
+ if isinstance(nxobject, NXfield):
788
+ nxdata[nxname] = NXfield(
789
+ value=nxobject.nxdata[indices],
790
+ dtype=nxdata[nxname].dtype,
791
+ attrs=nxdata[nxname].attrs,
792
+ )
793
+
794
+ return nxroot
795
+
796
+ class UpdateNXdataReader(Reader):
797
+ """Companion to `edd.SetupNXdataReader` and
798
+ `common.UpdateNXDataProcessor`. Constructs a list of data points
799
+ to pass as pipeline data to `common.UpdateNXDataProcessor` so that
800
+ an `NXdata` constructed by `edd.SetupNXdataReader` and
801
+ `common.SetupNXdataProcessor` can be updated live as individual
802
+ scans in an EDD dataset are completed.
803
+
804
+ :ivar detector_ids: Detector IDs for the raw data.
805
+ :type detector_ids: Union(int, list[int], str), optional
806
+ :param scan_number: Number of the SPEC scan.
807
+ :type scan_number: int
808
+ """
809
+ detector_ids: Optional[
810
+ conlist(item_type=conint(gt=0), min_length=1)] = None
811
+ scan_number: conint(ge=0)
812
+
813
+ def read(self):
814
+ """Return a list of data points containing raw data values for
815
+ a single EDD spec scan. The returned values can be passed
816
+ along to `common.UpdateNXdataProcessor` to fill in an existing
817
+ `NXdata` set up with `common.SetupNXdataProcessor`.
818
+
819
+ :returs: List of data points appropriate for input to
820
+ `common.UpdateNXdataProcessor`.
821
+ :rtype: list[dict[str, object]]
822
+ """
823
+ # Local modules
824
+ from CHAP.utils.parfile import ParFile
825
+
826
+ scanparser = ScanParser(self.filename, self.scan_number)
827
+ self.logger.debug('Parsed scan')
828
+
829
+ # A label / counter mne dict for convenience
830
+ counters = {
831
+ 'presample_intensity': 'a3ic0',
832
+ 'postsample_intensity': 'diode',
833
+ 'dwell_time_actual': 'sec',
834
+ }
835
+ # Determine the scan's own coordinate axes based on scan type
836
+ scan_type = scanparser.pars['scan_type']
837
+ self.logger.debug(f'scan_type = {scan_type}')
838
+ if scan_type == 0:
839
+ scan_axes = []
840
+ else:
841
+ axes_labels = {1: 'fly_labx', 2: 'fly_laby', 3: 'fly_labz',
842
+ 4: 'fly_ometotal'}
843
+ scan_axes = [axes_labels[scanparser.pars['fly_axis0']]]
844
+ if scan_type in (2, 3, 5):
845
+ scan_axes.append(axes_labels[scanparser.pars['fly_axis1']])
846
+ self.logger.debug(f'Determined scan axes: {scan_axes}')
847
+
848
+ # Par file values will be the same for all points in any scan
849
+ smb_par_values = {}
850
+ for smb_par in ('labx', 'laby', 'labz', 'ometotal', 'SCAN_N',
851
+ 'rsgap_size', 'x_effective', 'z_effective'):
852
+ smb_par_values[smb_par] = scanparser.pars[smb_par]
853
+
854
+ # Get offset for the starting index of this scan's points in
855
+ # the entire dataset.
856
+ dataset_id = scanparser.pars['dataset_id']
857
+ parfile = ParFile(scanparser.par_file)
858
+ good_scans = parfile.good_scan_numbers()
859
+ n_prior_dataset_scans = sum(
860
+ [1 if did == dataset_id and scan_n < self.scan_number else 0
861
+ for did, scan_n in zip(
862
+ parfile.get_values('dataset_id', scan_numbers=good_scans),
863
+ good_scans)])
864
+ dataset_point_index_offset = \
865
+ n_prior_dataset_scans * scanparser.spec_scan_npts
866
+ self.logger.debug(
867
+ f'dataset_point_index_offset = {dataset_point_index_offset}')
868
+
869
+ # Get full data point for every point in the scan
870
+ if self.detector_ids is None:
871
+ self.detector_ids = list(range(23))
872
+ detector_data = scanparser.get_detector_data(self.detector_ids)
873
+ detector_data = {id_: detector_data[:,i,:]
874
+ for i, id_ in enumerate(self.detector_ids)}
875
+ spec_scan_data = scanparser.spec_scan_data
876
+ self.logger.info(f'Getting {scanparser.spec_scan_npts} data points')
877
+ idx = slice(dataset_point_index_offset,
878
+ dataset_point_index_offset + scanparser.spec_scan_npts)
879
+ data_points = [
880
+ {'nxpath': f'entry/data/{k}',
881
+ 'value': [v] * scanparser.spec_scan_npts,
882
+ 'index': idx}
883
+ for k, v in smb_par_values.items()]
884
+ data_points.extend([
885
+ {'nxpath': f'entry/data/{id_}',
886
+ 'value': data,
887
+ 'index': idx}
888
+ for id_, data in detector_data.items()
889
+ ])
890
+ data_points.extend([
891
+ {'nxpath': f'entry/data/{c}',
892
+ 'value': spec_scan_data[counters[c]],
893
+ 'index': idx}
894
+ for c in counters
895
+ ])
896
+
897
+ return data_points
898
+
899
+
900
+ class NXdataSliceReader(Reader):
901
+ """Reader for returning a sliced verison of an `NXdata` (which
902
+ represents a full EDD dataset) that contains data from just a
903
+ single SPEC scan.
904
+
905
+ :ivar nxpath: Path to the existing full EDD dataset's NXdata
906
+ group in `filename`.
907
+ :type nxpath: str
908
+ :ivar scan_number: Number of the SPEC scan.
909
+ :type scan_number: int
910
+ :ivat spec_file: Name of the spec file containing whose data
911
+ will be the only contents of the returned `NXdata`.
912
+ :type spec_file: str
913
+ """
914
+ nxpath: constr(strip_whitespace=True, min_length=1)
915
+ scan_number: conint(ge=0)
916
+ spec_file: constr(strip_whitespace=True, min_length=1)
917
+
918
+ def read(self):
919
+ """Return a "slice" of an EDD dataset's NXdata that represents
920
+ just the data from one scan in the dataset.
921
+
922
+ :returns: An `NXdata` similar to the one at `nxpath` in
923
+ `filename`, but containing only the data collected by the
924
+ specified spec scan.
925
+ :rtype: nexusformat.nexus.NXdata
926
+ """
927
+ # Third party modules
928
+ from nexusformat.nexus import nxload
929
+
930
+ # Local modules
931
+ from CHAP.common import NXdataReader
932
+ from CHAP.utils.parfile import ParFile
933
+
934
+ # Parse existing NXdata
935
+ root = nxload(self.filename)
936
+ nxdata = root[self.nxpath]
937
+ if nxdata.nxclass != 'NXdata':
938
+ raise TypeError(
939
+ f'Object at {self.nxpath} in {self.filename} is not an NXdata')
940
+ self.logger.debug('Loaded existing NXdata')
941
+
942
+ # Parse scan
943
+ if not os.path.isabs(self.spec_file):
944
+ self.spec_file = os.path.join(self.inputdir, self.spec_file)
945
+ scanparser = ScanParser(self.spec_file, self.scan_number)
946
+ self.logger.debug('Parsed scan')
947
+
948
+ # Assemble arguments for NXdataReader
949
+ axes_names = [a.nxname for a in nxdata.nxaxes]
950
+ if nxdata.nxsignal is not None:
951
+ signal_name = nxdata.nxsignal.nxname
952
+ else:
953
+ signal_name = list(nxdata.entries.keys())[0]
954
+ attrs = nxdata.attrs
955
+ nxfield_params = []
956
+ if 'dataset_point_index' in nxdata:
957
+ # Get offset for the starting index of this scan's points in
958
+ # the entire dataset.
959
+ dataset_id = scanparser.pars['dataset_id']
960
+ parfile = ParFile(scanparser.par_file)
961
+ good_scans = parfile.good_scan_numbers()
962
+ n_prior_dataset_scans = sum(
963
+ [1 if did == dataset_id and scan_n < self.scan_number else 0
964
+ for did, scan_n in zip(
965
+ parfile.get_values(
966
+ 'dataset_id', scan_numbers=good_scans),
967
+ good_scans)])
968
+ dataset_point_index_offset = \
969
+ n_prior_dataset_scans * scanparser.spec_scan_npts
970
+ self.logger.debug(
971
+ f'dataset_point_index_offset = {dataset_point_index_offset}')
972
+ slice_params = {
973
+ 'start': dataset_point_index_offset,
974
+ 'end':
975
+ dataset_point_index_offset + scanparser.spec_scan_npts + 1,
976
+ }
977
+ nxfield_params = [{'filename': self.filename,
978
+ 'nxpath': entry.nxpath,
979
+ 'slice_params': [slice_params]}
980
+ for entry in nxdata]
981
+ else:
982
+ signal_slice_params = []
983
+ for a in nxdata.nxaxes:
984
+ if a.nxname.startswith('fly_'):
985
+ slice_params = {}
986
+ else:
987
+ value = scanparser.pars[a.nxname]
988
+ try:
989
+ index = np.where(a.nxdata == value)[0][0]
990
+ except Exception:
991
+ index = np.argmin(np.abs(a.nxdata - value))
992
+ self.logger.warning(
993
+ f'Nearest match for coordinate value {a.nxname}: '
994
+ f'{a.nxdata[index]} (actual value: {value})')
995
+ slice_params = {'start': index, 'end': index+1}
996
+ signal_slice_params.append(slice_params)
997
+ nxfield_params.append({
998
+ 'filename': self.filename,
999
+ 'nxpath': os.path.join(nxdata.nxpath, a.nxname),
1000
+ 'slice_params': [slice_params],
1001
+ })
1002
+ for _, entry in nxdata.entries.items():
1003
+ if entry in nxdata.nxaxes:
1004
+ continue
1005
+ nxfield_params.append({
1006
+ 'filename': self.filename,
1007
+ 'nxpath': entry.nxpath,
1008
+ 'slice_params': signal_slice_params,
1009
+ })
1010
+
1011
+ # Return the "sliced" NXdata
1012
+ reader = NXdataReader()
1013
+ reader.logger = self.logger
1014
+ return reader.read(name=nxdata.nxname, nxfield_params=nxfield_params,
1015
+ signal_name=signal_name, axes_names=axes_names,
1016
+ attrs=attrs)
1017
+
1018
+
1019
+ if __name__ == '__main__':
1020
+ # Local modules
1021
+ from CHAP.reader import main
1022
+
1023
+ main()