pychemstation 0.8.4__py3-none-any.whl → 0.9.1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (33) hide show
  1. pychemstation/__init__.py +1 -1
  2. pychemstation/analysis/__init__.py +4 -1
  3. pychemstation/analysis/base_spectrum.py +4 -4
  4. pychemstation/{utils → analysis}/chromatogram.py +4 -7
  5. pychemstation/analysis/process_report.py +137 -73
  6. pychemstation/control/README.md +22 -46
  7. pychemstation/control/__init__.py +5 -0
  8. pychemstation/control/controllers/__init__.py +2 -0
  9. pychemstation/control/controllers/comm.py +39 -18
  10. pychemstation/control/controllers/devices/device.py +27 -14
  11. pychemstation/control/controllers/devices/injector.py +33 -89
  12. pychemstation/control/controllers/tables/method.py +266 -111
  13. pychemstation/control/controllers/tables/ms.py +7 -4
  14. pychemstation/control/controllers/tables/sequence.py +171 -82
  15. pychemstation/control/controllers/tables/table.py +192 -116
  16. pychemstation/control/hplc.py +117 -83
  17. pychemstation/generated/__init__.py +0 -2
  18. pychemstation/generated/dad_method.py +1 -1
  19. pychemstation/generated/pump_method.py +15 -19
  20. pychemstation/utils/injector_types.py +1 -1
  21. pychemstation/utils/macro.py +12 -11
  22. pychemstation/utils/method_types.py +3 -2
  23. pychemstation/{analysis/utils.py → utils/num_utils.py} +2 -2
  24. pychemstation/utils/parsing.py +1 -11
  25. pychemstation/utils/sequence_types.py +4 -5
  26. pychemstation/{analysis → utils}/spec_utils.py +1 -2
  27. pychemstation/utils/table_types.py +10 -9
  28. pychemstation/utils/tray_types.py +48 -38
  29. {pychemstation-0.8.4.dist-info → pychemstation-0.9.1.dist-info}/METADATA +64 -24
  30. pychemstation-0.9.1.dist-info/RECORD +37 -0
  31. pychemstation-0.8.4.dist-info/RECORD +0 -37
  32. {pychemstation-0.8.4.dist-info → pychemstation-0.9.1.dist-info}/WHEEL +0 -0
  33. {pychemstation-0.8.4.dist-info → pychemstation-0.9.1.dist-info}/licenses/LICENSE +0 -0
pychemstation/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
1
  """
2
2
  .. include:: ../README.md
3
- """
3
+ """
@@ -1 +1,4 @@
1
- from .base_spectrum import AbstractSpectrum
1
+ from .process_report import CSVProcessor
2
+ from .process_report import TXTProcessor
3
+
4
+ __all__ = ["CSVProcessor", "TXTProcessor"]
@@ -6,12 +6,12 @@ from abc import ABC, abstractmethod
6
6
  import matplotlib.pyplot as plt
7
7
  import numpy as np
8
8
  from scipy import (
9
- sparse,
10
- signal,
11
9
  integrate,
10
+ signal,
11
+ sparse,
12
12
  )
13
13
 
14
- from .utils import interpolate_to_index, find_nearest_value_index
14
+ from ..utils.num_utils import find_nearest_value_index, interpolate_to_index
15
15
 
16
16
 
17
17
  class AbstractSpectrum(ABC):
@@ -249,7 +249,7 @@ class AbstractSpectrum(ABC):
249
249
  os.makedirs(path, exist_ok=True)
250
250
  fig.savefig(os.path.join(path, f"{filename}.png"), dpi=150)
251
251
 
252
- def find_peaks(self, threshold=1, min_width=.1, min_dist=None, area=None):
252
+ def find_peaks(self, threshold=1, min_width=0.1, min_dist=None, area=None):
253
253
  """Finds all peaks above the threshold with at least min_width width.
254
254
 
255
255
  Args:
@@ -6,16 +6,14 @@ from dataclasses import dataclass
6
6
 
7
7
  import numpy as np
8
8
 
9
- from .parsing import CHFile
10
- from ..analysis import AbstractSpectrum
11
9
 
12
- # standard filenames for spectral data
13
- CHANNELS = {"A": "01", "B": "02", "C": "03", "D": "04"}
10
+ from ..utils.parsing import CHFile
11
+ from ..analysis.base_spectrum import AbstractSpectrum
14
12
 
15
13
  ACQUISITION_PARAMETERS = "acq.txt"
16
14
 
17
15
  # format used in acquisition parameters
18
- TIME_FORMAT = "%Y-%m-%d-%H-%M-%S"
16
+ TIME_FORMAT = "%Y-%m-%d %H-%M-%S"
19
17
  SEQUENCE_TIME_FORMAT = "%Y-%m-%d %H-%M"
20
18
 
21
19
 
@@ -39,12 +37,11 @@ class AgilentHPLCChromatogram(AbstractSpectrum):
39
37
  }
40
38
 
41
39
  def __init__(self, path=None, autosaving=False):
42
-
43
40
  if path is not None:
44
41
  os.makedirs(path, exist_ok=True)
45
42
  self.path = path
46
43
  else:
47
- self.path = os.path.join(".", "hplc_data")
44
+ self.path = os.path.join("../utils", "hplc_data")
48
45
  os.makedirs(self.path, exist_ok=True)
49
46
 
50
47
  super().__init__(path=path, autosaving=autosaving)
@@ -4,15 +4,22 @@ import re
4
4
  from abc import abstractmethod
5
5
  from dataclasses import dataclass
6
6
  from enum import Enum
7
- from typing import List, AnyStr, Dict, Optional, Pattern
7
+ from typing import AnyStr, Dict, List, Optional, Pattern
8
8
 
9
9
  import pandas as pd
10
- from aghplctools.ingestion.text import _no_peaks_re, _area_report_re, _header_block_re, _signal_info_re, \
11
- _signal_table_re, chunk_string
12
- from result import Result, Err, Ok
10
+ from aghplctools.ingestion.text import (
11
+ _area_report_re,
12
+ _header_block_re,
13
+ _no_peaks_re,
14
+ _signal_info_re,
15
+ _signal_table_re,
16
+ chunk_string,
17
+ )
18
+ from pandas._libs.parsers import EmptyDataError
19
+ from result import Err, Ok, Result
13
20
 
14
- from pychemstation.utils.chromatogram import AgilentHPLCChromatogram
15
- from pychemstation.utils.tray_types import Tray, FiftyFourVialPlate
21
+ from ..analysis.chromatogram import AgilentHPLCChromatogram
22
+ from ..utils.tray_types import FiftyFourVialPlate, Tray
16
23
 
17
24
 
18
25
  @dataclass
@@ -63,16 +70,31 @@ class CSVProcessor(ReportProcessor):
63
70
  """
64
71
  super().__init__(path)
65
72
 
73
+ def find_csv_prefix(self) -> str:
74
+ files = [f for f in os.listdir(self.path) if os.path.isfile(os.path.join(self.path, f))]
75
+ for file in files:
76
+ if "00" in file:
77
+ name, _, file_extension = file.partition(".")
78
+ if "00" in name and file_extension.lower() == "csv":
79
+ prefix, _, _ = name.partition("00")
80
+ return prefix
81
+ raise FileNotFoundError("Couldn't find the prefix for CSV")
82
+
66
83
  def process_report(self) -> Result[AgilentReport, AnyStr]:
67
84
  """
68
85
  Method to parse details from CSV report.
69
86
 
70
- :returns: subset of complete report details, specifically the sample location, solvents in pumps,
87
+ :return: subset of complete report details, specifically the sample location, solvents in pumps,
71
88
  and list of peaks at each wavelength channel.
72
89
  """
73
- labels = os.path.join(self.path, f'REPORT00.CSV')
74
- if os.path.exists(labels):
75
- df_labels: Dict[int, Dict[int: AnyStr]] = pd.read_csv(labels, encoding="utf-16", header=None).to_dict()
90
+ prefix = self.find_csv_prefix()
91
+ labels = os.path.join(self.path, f"{prefix}00.CSV")
92
+ if not os.path.exists(labels):
93
+ raise ValueError("CSV reports do not exist, make sure to turn on the post run CSV report option!")
94
+ elif os.path.exists(labels):
95
+ df_labels: Dict[int, Dict[int:AnyStr]] = pd.read_csv(
96
+ labels, encoding="utf-16", header=None
97
+ ).to_dict()
76
98
  vial_location = []
77
99
  signals = {}
78
100
  solvents = {}
@@ -85,18 +107,36 @@ class CSVProcessor(ReportProcessor):
85
107
  elif val == "Number of Signals":
86
108
  num_signals = int(df_labels[1][pos])
87
109
  for s in range(1, num_signals + 1):
88
- df = pd.read_csv(os.path.join(self.path, f'REPORT0{s}.CSV'),
89
- encoding="utf-16", header=None)
90
- peaks = df.apply(lambda row: AgilentPeak(*row), axis=1)
91
- wavelength = df_labels[1][pos + s].partition(",4 Ref=off")[0][-3:]
92
- signals[wavelength] = peaks
110
+ try:
111
+ df = pd.read_csv(
112
+ os.path.join(self.path, f"{prefix}0{s}.CSV"),
113
+ encoding="utf-16",
114
+ header=None,
115
+ )
116
+ peaks = df.apply(lambda row: AgilentPeak(*row), axis=1)
117
+ except EmptyDataError:
118
+ peaks = []
119
+ try:
120
+ wavelength = df_labels[1][pos + s].partition(",4 Ref=off")[0][
121
+ -3:
122
+ ]
123
+ wavelength = int(wavelength)
124
+ signals[wavelength] = list(peaks)
125
+ except (IndexError, ValueError):
126
+ # TODO: Ask about the MS signals
127
+ pass
93
128
  break
94
129
 
95
- return Ok(AgilentReport(
96
- signals=[Signals(wavelength=w, peaks=s, data=None) for w, s in signals.items()],
97
- vial_location=FiftyFourVialPlate.from_int(int(vial_location)),
98
- solvents=solvents
99
- ))
130
+ return Ok(
131
+ AgilentReport(
132
+ signals=[
133
+ Signals(wavelength=w, peaks=s, data=None)
134
+ for w, s in signals.items()
135
+ ],
136
+ vial_location=FiftyFourVialPlate.from_int(int(vial_location)),
137
+ solvents=solvents,
138
+ )
139
+ )
100
140
 
101
141
  return Err("No report found")
102
142
 
@@ -105,34 +145,39 @@ class TXTProcessor(ReportProcessor):
105
145
  """
106
146
  Regex matches for column and unit combinations, courtesy of Veronica Lai.
107
147
  """
148
+
108
149
  _column_re_dictionary = {
109
- 'Peak': { # peak index
110
- '#': '[ ]+(?P<Peak>[\d]+)', # number
150
+ "Peak": { # peak index
151
+ "#": "[ ]+(?P<Peak>[\d]+)", # number
111
152
  },
112
- 'RetTime': { # retention time
113
- '[min]': '(?P<RetTime>[\d]+.[\d]+)', # minutes
153
+ "RetTime": { # retention time
154
+ "[min]": "(?P<RetTime>[\d]+.[\d]+)", # minutes
114
155
  },
115
- 'Type': { # peak type
116
- '': '(?P<Type>[A-Z]{1,3}(?: [A-Z]{1,2})*)', # todo this is different from <4.8.8 aghplc tools
156
+ "Type": { # peak type
157
+ "": "(?P<Type>[A-Z]{1,3}(?: [A-Z]{1,2})*)", # todo this is different from <4.8.8 aghplc tools
117
158
  },
118
- 'Width': { # peak width
119
- '[min]': '(?P<Width>[\d]+.[\d]+[e+-]*[\d]+)',
159
+ "Width": { # peak width
160
+ "[min]": "(?P<Width>[\d]+.[\d]+[e+-]*[\d]+)",
120
161
  },
121
- 'Area': { # peak area
122
- '[mAU*s]': '(?P<Area>[\d]+.[\d]+[e+-]*[\d]+)', # area units
123
- '%': '(?P<percent>[\d]+.[\d]+[e+-]*[\d]+)', # percent
162
+ "Area": { # peak area
163
+ "[mAU*s]": "(?P<Area>[\d]+.[\d]+[e+-]*[\d]+)", # area units
164
+ "%": "(?P<percent>[\d]+.[\d]+[e+-]*[\d]+)", # percent
124
165
  },
125
- 'Height': { # peak height
126
- '[mAU]': '(?P<Height>[\d]+.[\d]+[e+-]*[\d]+)',
166
+ "Height": { # peak height
167
+ "[mAU]": "(?P<Height>[\d]+.[\d]+[e+-]*[\d]+)",
127
168
  },
128
- 'Name': {
129
- '': '(?P<Name>[^\s]+(?:\s[^\s]+)*)', # peak name
169
+ "Name": {
170
+ "": "(?P<Name>[^\s]+(?:\s[^\s]+)*)", # peak name
130
171
  },
131
172
  }
132
173
 
133
- def __init__(self, path: str, min_ret_time: int = 0,
134
- max_ret_time: int = 999,
135
- target_wavelength_range: List[int] = range(200, 300)):
174
+ def __init__(
175
+ self,
176
+ path: str,
177
+ min_ret_time: int = 0,
178
+ max_ret_time: int = 999,
179
+ target_wavelength_range: List[int] = range(200, 300),
180
+ ):
136
181
  """
137
182
  Class to process reports in CSV form.
138
183
 
@@ -149,16 +194,17 @@ class TXTProcessor(ReportProcessor):
149
194
  def process_report(self) -> Result[AgilentReport, AnyStr]:
150
195
  """
151
196
  Method to parse details from CSV report.
152
-
153
- :returns: subset of complete report details, specifically the sample location, solvents in pumps,
154
- and list of peaks at each wavelength channel.
155
-
156
197
  If you want more functionality, use `aghplctools`.
157
198
  `from aghplctools.ingestion.text import pull_hplc_area_from_txt`
158
199
  `signals = pull_hplc_area_from_txt(file_path)`
200
+
201
+ :return: subset of complete report details, specifically the sample location, solvents in pumps,
202
+ and list of peaks at each wavelength channel.
159
203
  """
160
204
 
161
- with open(os.path.join(self.path, "REPORT.TXT"), 'r', encoding='utf-16') as openfile:
205
+ with open(
206
+ os.path.join(self.path, "REPORT.TXT"), "r", encoding="utf-16"
207
+ ) as openfile:
162
208
  text = openfile.read()
163
209
 
164
210
  try:
@@ -166,25 +212,33 @@ class TXTProcessor(ReportProcessor):
166
212
  except ValueError as e:
167
213
  return Err("No peaks found: " + str(e))
168
214
 
169
- signals = {key: signals[key] for key in self.target_wavelength_range if key in signals}
215
+ signals = {
216
+ key: signals[key] for key in self.target_wavelength_range if key in signals
217
+ }
170
218
 
171
219
  parsed_signals = []
172
220
  for wavelength, wavelength_dict in signals.items():
173
- current_wavelength_signals = Signals(wavelength=wavelength, peaks=[], data=None)
221
+ current_wavelength_signals = Signals(
222
+ wavelength=int(wavelength), peaks=[], data=None
223
+ )
174
224
  for ret_time, ret_time_dict in wavelength_dict.items():
175
225
  if self.min_ret_time <= ret_time <= self.max_ret_time:
176
- current_wavelength_signals.peaks.append(AgilentPeak(retention_time=ret_time,
177
- area=ret_time_dict['Area'],
178
- width=ret_time_dict['Width'],
179
- height=ret_time_dict['Height'],
180
- peak_number=None,
181
- peak_type=ret_time_dict['Type'],
182
- height_percent=None))
226
+ current_wavelength_signals.peaks.append(
227
+ AgilentPeak(
228
+ retention_time=ret_time,
229
+ area=ret_time_dict["Area"],
230
+ width=ret_time_dict["Width"],
231
+ height=ret_time_dict["Height"],
232
+ peak_number=None,
233
+ peak_type=ret_time_dict["Type"],
234
+ area_percent=None,
235
+ )
236
+ )
183
237
  parsed_signals.append(current_wavelength_signals)
184
238
 
185
- return Ok(AgilentReport(vial_location=None,
186
- solvents=None,
187
- signals=parsed_signals))
239
+ return Ok(
240
+ AgilentReport(vial_location=None, solvents=None, signals=parsed_signals)
241
+ )
188
242
 
189
243
  def parse_area_report(self, report_text: str) -> Dict:
190
244
  """
@@ -200,7 +254,7 @@ class TXTProcessor(ReportProcessor):
200
254
  should be able to use the `parse_area_report` method of aghplctools v4.8.8
201
255
  """
202
256
  if re.search(_no_peaks_re, report_text): # There are no peaks in Report.txt
203
- raise ValueError(f'No peaks found in Report.txt')
257
+ raise ValueError("No peaks found in Report.txt")
204
258
  blocks = _header_block_re.split(report_text)
205
259
  signals = {} # output dictionary
206
260
  for ind, block in enumerate(blocks):
@@ -213,23 +267,28 @@ class TXTProcessor(ReportProcessor):
213
267
  si = _signal_info_re.match(table)
214
268
  if si is not None:
215
269
  # some error state (e.g. 'not found')
216
- if si.group('error') != '':
270
+ if si.group("error") != "":
217
271
  continue
218
- wavelength = float(si.group('wavelength'))
272
+ wavelength = float(si.group("wavelength"))
219
273
  if wavelength in signals:
220
274
  # placeholder error raise just in case (this probably won't happen)
221
275
  raise KeyError(
222
- f'The wavelength {float(si.group("wavelength"))} is already in the signals dictionary')
276
+ f"The wavelength {float(si.group('wavelength'))} is already in the signals dictionary"
277
+ )
223
278
  signals[wavelength] = {}
224
279
  # build peak regex
225
280
  peak_re = self.build_peak_regex(table)
226
- if peak_re is None: # if there are no columns (empty table), continue
281
+ if (
282
+ peak_re is None
283
+ ): # if there are no columns (empty table), continue
227
284
  continue
228
- for line in table.split('\n'):
285
+ for line in table.split("\n"):
229
286
  peak = peak_re.match(line)
230
287
  if peak is not None:
231
- signals[wavelength][float(peak.group('RetTime'))] = {}
232
- current = signals[wavelength][float(peak.group('RetTime'))]
288
+ signals[wavelength][float(peak.group("RetTime"))] = {}
289
+ current = signals[wavelength][
290
+ float(peak.group("RetTime"))
291
+ ]
233
292
  for key in self._column_re_dictionary:
234
293
  if key in peak.re.groupindex:
235
294
  try: # try float conversion, otherwise continue
@@ -248,30 +307,35 @@ class TXTProcessor(ReportProcessor):
248
307
  :param signal_table: block of lines associated with an area table
249
308
  :return: peak line regex object (<=3.6 _sre.SRE_PATTERN, >=3.7 re.Pattern)
250
309
  """
251
- split_table = signal_table.split('\n')
310
+ split_table = signal_table.split("\n")
252
311
  if len(split_table) <= 4: # catch peak table with no values
253
312
  return None
254
313
  # todo verify that these indicies are always true
255
314
  column_line = split_table[2] # table column line
256
315
  unit_line = split_table[3] # column unit line
257
- length_line = [len(val) + 1 for val in split_table[4].split('|')] # length line
316
+ length_line = [len(val) + 1 for val in split_table[4].split("|")] # length line
258
317
 
259
318
  # iterate over header values and units to build peak table regex
260
319
  peak_re_string = []
261
320
  for header, unit in zip(
262
- chunk_string(column_line, length_line),
263
- chunk_string(unit_line, length_line)
321
+ chunk_string(column_line, length_line), chunk_string(unit_line, length_line)
264
322
  ):
265
- if header == '': # todo create a better catch for an undefined header
323
+ if header == "": # todo create a better catch for an undefined header
266
324
  continue
267
325
  try:
268
326
  peak_re_string.append(
269
- self._column_re_dictionary[header][unit] # append the appropriate regex
327
+ self._column_re_dictionary[header][
328
+ unit
329
+ ] # append the appropriate regex
270
330
  )
271
331
  except KeyError: # catch for undefined regexes (need to be built)
272
- raise KeyError(f'The header/unit combination "{header}" "{unit}" is not defined in the peak regex '
273
- f'dictionary. Let Lars know.')
332
+ raise KeyError(
333
+ f'The header/unit combination "{header}" "{unit}" is not defined in the peak regex '
334
+ f"dictionary. Let Lars know."
335
+ )
274
336
  return re.compile(
275
- '[ ]+'.join(peak_re_string) # constructed string delimited by 1 or more spaces
276
- + '[\s]*' # and any remaining white space
337
+ "[ ]+".join(
338
+ peak_re_string
339
+ ) # constructed string delimited by 1 or more spaces
340
+ + "[\s]*" # and any remaining white space
277
341
  )
@@ -1,28 +1,24 @@
1
- # Examples of usecases
1
+ # Examples of usecases
2
2
 
3
- ## Initialization
4
3
  ```python
5
4
  from pychemstation.control import HPLCController
6
5
 
7
6
  DEFAULT_METHOD_DIR = "C:\\ChemStation\\1\\Methods\\"
8
- DATA_DIR = "C:\\Users\\Public\\Documents\\ChemStation\\3\\Data"
9
7
  SEQUENCE_DIR = "C:\\USERS\\PUBLIC\\DOCUMENTS\\CHEMSTATION\\3\\Sequence"
10
8
  DEFAULT_COMMAND_PATH = "C:\\Users\\User\\Desktop\\Lucy\\"
9
+ DATA_DIR_2 = "C:\\Users\\Public\\Documents\\ChemStation\\2\\Data"
10
+ DATA_DIR_3 = "C:\\Users\\Public\\Documents\\ChemStation\\3\\Data"
11
11
 
12
- hplc_controller = HPLCController(data_dir=DATA_DIR,
12
+ # Initialize HPLC Controller
13
+ hplc_controller = HPLCController(data_dirs=[DATA_DIR_2, DATA_DIR_3],
13
14
  comm_dir=DEFAULT_COMMAND_PATH,
14
15
  method_dir=DEFAULT_METHOD_DIR,
15
16
  sequence_dir=SEQUENCE_DIR)
16
- ```
17
17
 
18
- ## Switching a method
19
- ```python
18
+ # Switching a method
20
19
  hplc_controller.switch_method("General-Poroshell")
21
- ```
22
-
23
- ## Editing a method
24
20
 
25
- ```python
21
+ # Editing a method
26
22
  from pychemstation.utils.method_types import *
27
23
 
28
24
  new_method = MethodDetails(
@@ -45,47 +41,27 @@ new_method = MethodDetails(
45
41
  stop_time=5,
46
42
  post_time=2
47
43
  )
48
-
49
44
  hplc_controller.edit_method(new_method)
50
- ```
51
45
 
52
- ## Running a method and get data from last run method
53
- ```python
46
+ # Run a method and get a report or data from last run method
54
47
  hplc_controller.run_method(experiment_name="test_experiment")
55
48
  chrom = hplc_controller.get_last_run_method_data()
56
49
  channel_a_time = chrom.A.x
57
- ```
50
+ report = hplc_controller.get_last_run_method_report()
51
+ vial_location = report.vial_location
58
52
 
59
- ## Switching a sequence
60
- ```python
53
+ # switch the currently loaded sequence
61
54
  hplc_controller.switch_sequence(sequence_name="hplc_testing")
62
- ```
63
- ## Editing a Sequence Row
64
- ```python
65
- from pychemstation.utils.sequence_types import *
66
- from pychemstation.utils.tray_types import *
67
55
 
68
- hplc_controller.edit_sequence_row(SequenceEntry(
69
- vial_location=FiftyFourVialPlate(plate=Plate.TWO, letter=Letter.A, num=Num.SEVEN).value(),
70
- method="General-Poroshell",
71
- num_inj=3,
72
- inj_vol=4,
73
- sample_name="Blank",
74
- sample_type=SampleType.BLANK,
75
- inj_source=InjectionSource.HIP_ALS
76
- ), 1)
77
- ```
78
-
79
- ## Editing entire Sequence Table
80
- ```python
56
+ # edit the sequence table
81
57
  from pychemstation.utils.tray_types import *
82
58
  from pychemstation.utils.sequence_types import *
83
59
 
84
60
  seq_table = SequenceTable(
85
- name=DEFAULT_SEQUENCE,
61
+ name="hplc_testing",
86
62
  rows=[
87
63
  SequenceEntry(
88
- vial_location=FiftyFourVialPlate(plate=Plate.TWO, letter=Letter.A, num=Num.SEVEN).value(),
64
+ vial_location=FiftyFourVialPlate.from_str("P1-A1"),
89
65
  method="General-Poroshell",
90
66
  num_inj=3,
91
67
  inj_vol=4,
@@ -94,7 +70,7 @@ seq_table = SequenceTable(
94
70
  inj_source=InjectionSource.MANUAL
95
71
  ),
96
72
  SequenceEntry(
97
- vial_location=TenVialColumn.ONE.value,
73
+ vial_location=TenVialColumn.ONE,
98
74
  method="General-Poroshell",
99
75
  num_inj=1,
100
76
  inj_vol=1,
@@ -103,7 +79,7 @@ seq_table = SequenceTable(
103
79
  inj_source=InjectionSource.AS_METHOD
104
80
  ),
105
81
  SequenceEntry(
106
- vial_location=10,
82
+ vial_location=FiftyFourVialPlate.from_str("P2-B4"),
107
83
  method="General-Poroshell",
108
84
  num_inj=3,
109
85
  inj_vol=4,
@@ -114,11 +90,11 @@ seq_table = SequenceTable(
114
90
  ]
115
91
  )
116
92
  hplc_controller.edit_sequence(seq_table)
117
- ```
118
93
 
119
- ## Running a sequence and get data from last run sequence
120
- ```python
121
- hplc_controller.run_sequence(seq_table)
122
- chroms = hplc_controller.get_last_run_sequence_data()
123
- channel_A_time = chroms[0].A.x
94
+ # Run a sequence and get data or report from last run sequence
95
+ hplc_controller.run_sequence()
96
+ chroms = hplc_controller.get_last_run_sequence_data(read_uv=True)
97
+ row_1_channel_A_abs = chroms[0][210].y
98
+ report = hplc_controller.get_last_run_sequence_reports()
99
+ vial_location_row_1 = report[0].vial_location
124
100
  ```
@@ -1,4 +1,9 @@
1
1
  """
2
2
  .. include:: README.md
3
3
  """
4
+
4
5
  from .hplc import HPLCController
6
+
7
+ __all__ = [
8
+ "HPLCController",
9
+ ]
@@ -5,3 +5,5 @@
5
5
  from .comm import CommunicationController
6
6
  from .tables.method import MethodController
7
7
  from .tables.sequence import SequenceController
8
+
9
+ __all__ = ["CommunicationController", "MethodController", "SequenceController"]
@@ -9,13 +9,21 @@ been processed.
9
9
 
10
10
  Authors: Alexander Hammer, Hessam Mehr, Lucy Hao
11
11
  """
12
+
12
13
  import os
13
14
  import time
14
- from typing import Optional
15
+ from typing import Optional, Union
15
16
 
16
- from result import Result, Ok, Err
17
+ from result import Err, Ok, Result
17
18
 
18
- from ...utils.macro import *
19
+ from ...utils.macro import (
20
+ str_to_status,
21
+ HPLCAvailStatus,
22
+ HPLCErrorStatus,
23
+ Command,
24
+ Status,
25
+ Response,
26
+ )
19
27
 
20
28
 
21
29
  class CommunicationController:
@@ -27,11 +35,11 @@ class CommunicationController:
27
35
  MAX_CMD_NO = 255
28
36
 
29
37
  def __init__(
30
- self,
31
- comm_dir: str,
32
- cmd_file: str = "cmd",
33
- reply_file: str = "reply",
34
- debug: bool = False
38
+ self,
39
+ comm_dir: str,
40
+ cmd_file: str = "cmd",
41
+ reply_file: str = "reply",
42
+ debug: bool = False,
35
43
  ):
36
44
  """
37
45
  :param comm_dir:
@@ -55,7 +63,7 @@ class CommunicationController:
55
63
  self.reset_cmd_counter()
56
64
 
57
65
  # Initialize row counter for table operations
58
- self.send('Local Rows')
66
+ self.send("Local Rows")
59
67
 
60
68
  def get_num_val(self, cmd: str) -> Union[int, float]:
61
69
  tries = 5
@@ -138,6 +146,7 @@ class CommunicationController:
138
146
  :return: Potential ChemStation response
139
147
  """
140
148
  err: Optional[Union[OSError, IndexError]] = None
149
+ err_msg = ""
141
150
  for _ in range(num_attempts):
142
151
  time.sleep(1)
143
152
 
@@ -150,7 +159,11 @@ class CommunicationController:
150
159
 
151
160
  try:
152
161
  first_line = response.splitlines()[0]
153
- response_no = int(first_line.split()[0])
162
+ try:
163
+ response_no = int(first_line.split()[0])
164
+ except ValueError as e:
165
+ err = e
166
+ err_msg = f"Caused by {first_line}"
154
167
  except IndexError as e:
155
168
  err = e
156
169
  continue
@@ -161,7 +174,7 @@ class CommunicationController:
161
174
  else:
162
175
  continue
163
176
  else:
164
- return Err(f"Failed to receive reply to command #{cmd_no} due to {err}.")
177
+ return Err(f"Failed to receive reply to command #{cmd_no} due to {err} caused by {err_msg}.")
165
178
 
166
179
  def sleepy_send(self, cmd: Union[Command, str]):
167
180
  self.send("Sleep 0.1")
@@ -187,20 +200,28 @@ class CommunicationController:
187
200
  def receive(self) -> Result[Response, str]:
188
201
  """Returns messages received in reply file.
189
202
 
190
- :return: ChemStation response
203
+ :return: ChemStation response
191
204
  """
192
205
  num_response_prefix = "Numerical Responses:"
193
206
  str_response_prefix = "String Responses:"
194
207
  possible_response = self._receive(self.cmd_no)
195
208
  if possible_response.is_ok():
196
- lines = possible_response.value.splitlines()
209
+ lines = possible_response.ok_value.splitlines()
197
210
  for line in lines:
198
211
  if str_response_prefix in line and num_response_prefix in line:
199
- string_responses_dirty, _, numerical_responses = line.partition(num_response_prefix)
200
- _, _, string_responses = string_responses_dirty.partition(str_response_prefix)
201
- return Ok(Response(string_response=string_responses.strip(),
202
- num_response=float(numerical_responses.strip())))
203
- return Err(f"Could not retrieve HPLC response")
212
+ string_responses_dirty, _, numerical_responses = line.partition(
213
+ num_response_prefix
214
+ )
215
+ _, _, string_responses = string_responses_dirty.partition(
216
+ str_response_prefix
217
+ )
218
+ return Ok(
219
+ Response(
220
+ string_response=string_responses.strip(),
221
+ num_response=float(numerical_responses.strip()),
222
+ )
223
+ )
224
+ return Err("Could not retrieve HPLC response")
204
225
  else:
205
226
  return Err(f"Could not establish response to HPLC: {possible_response}")
206
227