ChessAnalysisPipeline 0.0.15__py3-none-any.whl → 0.0.16__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of ChessAnalysisPipeline might be problematic. Click here for more details.

CHAP/edd/reader.py CHANGED
@@ -1,14 +1,22 @@
1
1
  #!/usr/bin/env python
2
- from CHAP.reader import Reader
3
2
 
3
+ # System modules
4
+ import os
5
+
6
+ # Third party modules
7
+ import numpy as np
8
+
9
+ # Local modules
10
+ from CHAP.reader import Reader
4
11
 
5
12
  class EddMapReader(Reader):
6
13
  """Reader for taking an EDD-style .par file and returning a
7
14
  `MapConfig` representing one of the datasets in the
8
15
  file. Independent dimensions are determined automatically, and a
9
16
  specific set of items to use for extra scalar datasets to include
10
- are hard-coded in."""
11
- def read(self, parfile, dataset_id):
17
+ are hard-coded in. The raw data is read if detector_names are
18
+ specified."""
19
+ def read(self, parfile, dataset_id, detector_names=None):
12
20
  """Return a validated `MapConfig` object representing an EDD
13
21
  dataset.
14
22
 
@@ -18,15 +26,187 @@ class EddMapReader(Reader):
18
26
  :param dataset_id: Number of the dataset in the .par file
19
27
  to return as a map.
20
28
  :type dataset_id: int
29
+ :param detector_names: Detector prefixes for the raw data.
30
+ :type detector_names: list[str], optional
21
31
  :returns: Map configuration packaged with the appropriate
22
32
  value for 'schema'.
23
33
  :rtype: PipelineData
24
34
  """
25
- import numpy as np
35
+ # Local modules
26
36
  from CHAP.common.models.map import MapConfig
27
37
  from CHAP.pipeline import PipelineData
38
+ from CHAP.utils.general import list_to_string
28
39
  from CHAP.utils.parfile import ParFile
29
- from CHAP.utils.scanparsers import SMBMCAScanParser as ScanParser
40
+ from chess_scanparsers import SMBMCAScanParser as ScanParser
41
+
42
+ if detector_names is not None:
43
+ assert is_str_series(detector_names, raise_error=True)
44
+
45
+ parfile = ParFile(parfile)
46
+ self.logger.debug(f'spec_file: {parfile.spec_file}')
47
+
48
+ # Get list of scan numbers for the dataset
49
+ dataset_ids = np.asarray(parfile.get_values('dataset_id'))
50
+ dataset_rows_i = np.argwhere(
51
+ np.where(
52
+ np.asarray(dataset_ids) == dataset_id, 1, 0)).flatten()
53
+ scan_nos = [parfile.data[i][parfile.scann_i] for i in dataset_rows_i\
54
+ if parfile.data[i][parfile.scann_i] in \
55
+ parfile.good_scan_numbers()]
56
+ self.logger.debug(f'Scan numbers: {list_to_string(scan_nos)}')
57
+ spec_scans = [dict(spec_file=parfile.spec_file, scan_numbers=scan_nos)]
58
+
59
+ # Get scan type for this dataset
60
+ scan_types = parfile.get_values('scan_type', scan_numbers=scan_nos)
61
+ if any([st != scan_types[0] for st in scan_types]):
62
+ msg = 'Only one scan type per dataset is suported.'
63
+ self.logger.error(msg)
64
+ raise Exception(msg)
65
+ scan_type = scan_types[0]
66
+ self.logger.debug(f'Scan type: {scan_type}')
67
+
68
+ # Based on scan type, get independent_dimensions for the map
69
+ # Start by adding labx, laby, labz, and omega. Any "extra"
70
+ # dimensions will be sqeezed out of the map later.
71
+ independent_dimensions = [
72
+ dict(label='labx', units='mm', data_type='smb_par',
73
+ name='labx'),
74
+ dict(label='laby', units='mm', data_type='smb_par',
75
+ name='laby'),
76
+ dict(label='labz', units='mm', data_type='smb_par',
77
+ name='labz'),
78
+ dict(label='ometotal', units='degrees', data_type='smb_par',
79
+ name='ometotal')
80
+ ]
81
+ scalar_data = []
82
+ attrs = {}
83
+ if scan_type != 0:
84
+ self.logger.warning(
85
+ 'Assuming all fly axes parameters are identical for all scans')
86
+ attrs['fly_axis_labels'] = []
87
+ axes_labels = {1: 'fly_labx', 2: 'fly_laby', 3: 'fly_labz',
88
+ 4: 'fly_ometotal'}
89
+ axes_units = {1: 'mm', 2: 'mm', 3: 'mm', 4: 'degrees'}
90
+ axes_added = []
91
+ scanparser = ScanParser(parfile.spec_file, scan_nos[0])
92
+ def add_fly_axis(fly_axis_index):
93
+ if fly_axis_index in axes_added:
94
+ return
95
+ fly_axis_key = scanparser.pars[f'fly_axis{fly_axis_index}']
96
+ independent_dimensions.append(dict(
97
+ label=axes_labels[fly_axis_key],
98
+ data_type='spec_motor',
99
+ units=axes_units[fly_axis_key],
100
+ name=scanparser.spec_scan_motor_mnes[fly_axis_index]))
101
+ axes_added.append(fly_axis_index)
102
+ attrs['fly_axis_labels'].append(axes_labels[fly_axis_key])
103
+ add_fly_axis(0)
104
+ if scan_type in (2, 3, 5):
105
+ add_fly_axis(1)
106
+ if scan_type == 5:
107
+ scalar_data.append(dict(
108
+ label='bin_axis', units='n/a', data_type='smb_par',
109
+ name='bin_axis'))
110
+ attrs['bin_axis_label'] = axes_labels[
111
+ scanparser.pars['bin_axis']].replace('fly_', '')
112
+
113
+ # Add in the usual extra scalar data maps for EDD
114
+ scalar_data.extend([
115
+ dict(label='SCAN_N', units='n/a', data_type='smb_par',
116
+ name='SCAN_N'),
117
+ dict(label='rsgap_size', units='mm', data_type='smb_par',
118
+ name='rsgap_size'),
119
+ dict(label='x_effective', units='mm', data_type='smb_par',
120
+ name='x_effective'),
121
+ dict(label='z_effective', units='mm', data_type='smb_par',
122
+ name='z_effective'),
123
+ ])
124
+
125
+ # Construct initial map config dictionary
126
+ scanparser = ScanParser(parfile.spec_file, scan_nos[0])
127
+ map_config_dict = dict(
128
+ title=f'{scanparser.scan_name}_dataset{dataset_id}',
129
+ station='id1a3',
130
+ experiment_type='EDD',
131
+ sample=dict(name=scanparser.scan_name),
132
+ spec_scans=[
133
+ dict(spec_file=parfile.spec_file, scan_numbers=scan_nos)],
134
+ independent_dimensions=independent_dimensions,
135
+ scalar_data=scalar_data,
136
+ presample_intensity=dict(name='a3ic1', data_type='scan_column'),
137
+ postsample_intensity=dict(name='diode', data_type='scan_column'),
138
+ dwell_time_actual=dict(name='sec', data_type='scan_column'),
139
+ attrs=attrs,
140
+ )
141
+ map_config = MapConfig(**map_config_dict)
142
+
143
+ # Add lab coordinates to the map's scalar_data only if they
144
+ # are NOT already one of the sqeezed map's
145
+ # independent_dimensions.
146
+ lab_dims = [
147
+ dict(label='labx', units='mm', data_type='smb_par', name='labx'),
148
+ dict(label='laby', units='mm', data_type='smb_par', name='laby'),
149
+ dict(label='labz', units='mm', data_type='smb_par', name='labz'),
150
+ dict(label='ometotal', units='degrees', data_type='smb_par',
151
+ name='ometotal')
152
+ ]
153
+ for dim in lab_dims:
154
+ if dim not in independent_dimensions:
155
+ scalar_data.append(dim)
156
+
157
+ # Convert list of scan_numbers to string notation
158
+ scan_numbers = map_config_dict['spec_scans'][0]['scan_numbers']
159
+ map_config_dict['spec_scans'][0]['scan_numbers'] = list_to_string(
160
+ scan_numbers)
161
+
162
+ # For now overrule the map type to be always unstructured
163
+ # Later take out the option of structured entirely from
164
+ # MapConfig
165
+ map_config_dict['map_type'] = 'unstructured'
166
+
167
+ return map_config_dict
168
+
169
+
170
+ class EddMPIMapReader(Reader):
171
+ """Reader for taking an EDD-style .par file and returning a
172
+ representing one of the datasets in the file as a NeXus NXentry
173
+ object. Independent dimensions are determined automatically, and a
174
+ specific set of items to use for extra scalar datasets to include
175
+ are hard-coded in."""
176
+ def read(self, parfile, dataset_id, detector_names):
177
+ """Return a NeXus NXentry object after validating the
178
+ `MapConfig` object representing an EDD dataset.
179
+
180
+ :param parfile: Name of the EDD-style .par file containing the
181
+ dataset.
182
+ :type parfile: str
183
+ :param dataset_id: Number of the dataset in the .par file
184
+ to return as a map.
185
+ :type dataset_id: int
186
+ :param detector_names: Detector prefixes for the raw data.
187
+ :type detector_names: list[str]
188
+ :returns: The EDD map including the raw data packaged with the
189
+ appropriate value for 'schema'.
190
+ :rtype: PipelineData
191
+ """
192
+ # Third party modules
193
+ from json import dumps
194
+ from nexusformat.nexus import (
195
+ NXcollection,
196
+ NXdata,
197
+ NXentry,
198
+ NXfield,
199
+ NXsample,
200
+ )
201
+
202
+ # Local modules
203
+ from CHAP.common.models.map import MapConfig
204
+ from CHAP.pipeline import PipelineData
205
+ from CHAP.utils.general import list_to_string
206
+ from CHAP.utils.parfile import ParFile
207
+ from chess_scanparsers import SMBMCAScanParser as ScanParser
208
+
209
+ assert is_str_series(detector_names, raise_error=True)
30
210
 
31
211
  parfile = ParFile(parfile)
32
212
  self.logger.debug(f'spec_file: {parfile.spec_file}')
@@ -130,7 +310,7 @@ class EddMapReader(Reader):
130
310
  # along which data were taken at only one unique coordinate
131
311
  # value)
132
312
  while 1 in map_config.shape:
133
- remove_dim_index = map_config.shape[::-1].index(1)
313
+ remove_dim_index = map_config.shape.index(1)
134
314
  self.logger.debug(
135
315
  'Map dimensions: '
136
316
  + str([dim["label"] for dim in independent_dimensions]))
@@ -159,6 +339,45 @@ class EddMapReader(Reader):
159
339
  if dim not in independent_dimensions:
160
340
  scalar_data.append(dim)
161
341
 
342
+ # Set up NXentry and add misc. CHESS-specific metadata
343
+ nxentry = NXentry(name=map_config.title)
344
+ nxentry.attrs['station'] = map_config.station
345
+ nxentry.map_config = dumps(map_config.dict())
346
+ nxentry.spec_scans = NXcollection()
347
+ for scans in map_config.spec_scans:
348
+ nxentry.spec_scans[scans.scanparsers[0].scan_name] = \
349
+ NXfield(value=scans.scan_numbers,
350
+ attrs={'spec_file': str(scans.spec_file)})
351
+
352
+ # Add sample metadata
353
+ nxentry[map_config.sample.name] = NXsample(
354
+ **map_config.sample.dict())
355
+
356
+ # Set up default data group
357
+ nxentry.data = NXdata()
358
+ independent_dimensions = map_config.independent_dimensions
359
+ for dim in independent_dimensions:
360
+ nxentry.data[dim.label] = NXfield(
361
+ units=dim.units,
362
+ attrs={'long_name': f'{dim.label} ({dim.units})',
363
+ 'data_type': dim.data_type,
364
+ 'local_name': dim.name})
365
+
366
+ # Read the raw data and independent dimensions
367
+ data = [[] for _ in detector_names]
368
+ dims = [[] for _ in independent_dimensions]
369
+ for scans in map_config.spec_scans:
370
+ for scan_number in scans.scan_numbers:
371
+ scanparser = scans.get_scanparser(scan_number)
372
+ for i, detector_name in enumerate(detector_names):
373
+ if isinstance(detector_name, int):
374
+ detector_name = str(detector_name)
375
+ ddata = scanparser.get_detector_data(detector_name)
376
+ data[i].append(ddata)
377
+ for i, dim in enumerate(independent_dimensions):
378
+ dims[i].append(dim.get_value(
379
+ scans, scan_number, scan_step_index=-1, relative=True))
380
+
162
381
  return map_config_dict
163
382
 
164
383
 
@@ -175,7 +394,8 @@ class ScanToMapReader(Reader):
175
394
  :returns: Map configuration dictionary
176
395
  :rtype: dict
177
396
  """
178
- from CHAP.utils.scanparsers import SMBMCAScanParser as ScanParser
397
+ # Local modules
398
+ from chess_scanparsers import SMBMCAScanParser as ScanParser
179
399
 
180
400
  scanparser = ScanParser(spec_file, scan_number)
181
401
 
@@ -277,8 +497,6 @@ class SetupNXdataReader(Reader):
277
497
  # For scan type 5 only:
278
498
  # 21: bin axis
279
499
 
280
- import numpy as np
281
-
282
500
  # Parse dataset from the input .txt file.
283
501
  with open(filename) as inf:
284
502
  file_lines = inf.readlines()
@@ -469,9 +687,9 @@ class UpdateNXdataReader(Reader):
469
687
  `common.UpdateNXdataProcessor`.
470
688
  :rtype: list[dict[str, object]]
471
689
  """
472
- import os
473
- from CHAP.utils.scanparsers import SMBMCAScanParser as ScanParser
690
+ # Local modules
474
691
  from CHAP.utils.parfile import ParFile
692
+ from chess_scanparsers import SMBMCAScanParser as ScanParser
475
693
 
476
694
  if not os.path.isabs(spec_file):
477
695
  spec_file = os.path.join(inputdir, spec_file)
@@ -582,14 +800,13 @@ class NXdataSliceReader(Reader):
582
800
  specified spec scan.
583
801
  :rtype: nexusformat.nexus.NXdata
584
802
  """
585
- import os
586
-
803
+ # Third party modules
587
804
  from nexusformat.nexus import nxload
588
- import numpy as np
589
805
 
806
+ # Local modules
590
807
  from CHAP.common import NXdataReader
591
808
  from CHAP.utils.parfile import ParFile
592
- from CHAP.utils.scanparsers import SMBMCAScanParser as ScanParser
809
+ from chess_scanparsers import SMBMCAScanParser as ScanParser
593
810
 
594
811
  # Parse existing NXdata
595
812
  root = nxload(filename)
@@ -0,0 +1,8 @@
1
+ """This subpackage contains `PipelineItems` unique to GIWAXS data
2
+ processing workflows.
3
+ """
4
+ # from CHAP.giwaxs.reader import
5
+ # from CHAP.giwaxs.processor import
6
+ # from CHAP.giwaxs.writer import
7
+
8
+ from CHAP.giwaxs.processor import GiwaxsConversionProcessor
CHAP/giwaxs/models.py ADDED
@@ -0,0 +1,100 @@
1
+ # System modules
2
+ from functools import cache
3
+ import os
4
+ from pathlib import PosixPath
5
+ from typing import (
6
+ Optional,
7
+ )
8
+
9
+ # Third party modules
10
+ import numpy as np
11
+ from pydantic import (
12
+ BaseModel,
13
+ DirectoryPath,
14
+ FilePath,
15
+ conint,
16
+ conlist,
17
+ constr,
18
+ field_validator,
19
+ model_validator,
20
+ )
21
+
22
+ # Local modules
23
+ from CHAP.common.models.map import MapConfig
24
+
25
+
26
+ class Detector(BaseModel):
27
+ """Detector class to represent a single detector used in the
28
+ experiment.
29
+
30
+ :param prefix: Prefix of the detector in the SPEC file.
31
+ :type prefix: str
32
+ :param poni_file: Path to the poni file.
33
+ :type poni_file: str
34
+ """
35
+ prefix: constr(strip_whitespace=True, min_length=1)
36
+ poni_file: FilePath
37
+
38
+ @field_validator('poni_file')
39
+ @classmethod
40
+ def validate_poni_file(cls, poni_file):
41
+ """Validate the poni file by checking if it's a valid PONI
42
+ file.
43
+
44
+ :param poni_file: Path to the poni file.
45
+ :type poni_file: str
46
+ :raises ValueError: If poni_file is not a valid PONI file.
47
+ :returns: Absolute path to the poni file.
48
+ :rtype: str
49
+ """
50
+ # Third party modules
51
+ from pyFAI import load
52
+
53
+ poni_file = os.path.abspath(poni_file)
54
+ try:
55
+ load(poni_file)
56
+ except Exception as exc:
57
+ raise ValueError(f'{poni_file} is not a valid PONI file') from exc
58
+ return poni_file
59
+
60
+
61
+ class GiwaxsConversionConfig(BaseModel):
62
+ """Class representing metadata required to locate GIWAXS image
63
+ files for a single scan to convert to q_par/q_perp coordinates.
64
+
65
+ :ivar detectors: List of detector configurations.
66
+ :type detectors: list[Detector]
67
+ :ivar scan_step_indices: Optional scan step indices to convert.
68
+ If not specified, all images will be converted.
69
+ :type scan_step_indices: Union(int, list[int], str), optional
70
+ :ivar save_raw_data: Save the raw data in the NeXus output,
71
+ default to `False`.
72
+ :type save_raw_data: bool, optional
73
+ """
74
+ detectors: conlist(item_type=Detector, min_length=1)
75
+ scan_step_indices: Optional[
76
+ conlist(item_type=conint(ge=0), min_length=1)] = None
77
+ save_raw_data: Optional[bool] = False
78
+
79
+ @field_validator('scan_step_indices', mode='before')
80
+ @classmethod
81
+ def validate_scan_step_indices(cls, scan_step_indices):
82
+ """Validate the specified list of scan step indices.
83
+
84
+ :param scan_step_indices: List of scan numbers.
85
+ :type scan_step_indices: list of int
86
+ :raises ValueError: If a specified scan number is not found in
87
+ the SPEC file.
88
+ :return: List of scan numbers.
89
+ :rtype: list of int
90
+ """
91
+ if isinstance(scan_step_indices, int):
92
+ scan_step_indices = [scan_step_indices]
93
+ if isinstance(scan_step_indices, str):
94
+ # Local modules
95
+ from CHAP.utils.general import string_to_list
96
+
97
+ scan_step_indices = string_to_list(scan_step_indices)
98
+
99
+ return scan_step_indices
100
+