pychemstation 0.8.4__py3-none-any.whl → 0.8.7__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pychemstation/__init__.py +1 -1
- pychemstation/analysis/__init__.py +4 -1
- pychemstation/analysis/base_spectrum.py +4 -4
- pychemstation/{utils → analysis}/chromatogram.py +4 -7
- pychemstation/analysis/process_report.py +173 -77
- pychemstation/control/README.md +22 -46
- pychemstation/control/__init__.py +5 -0
- pychemstation/control/controllers/__init__.py +2 -0
- pychemstation/control/controllers/comm.py +41 -18
- pychemstation/control/controllers/devices/device.py +27 -14
- pychemstation/control/controllers/devices/injector.py +33 -89
- pychemstation/control/controllers/tables/method.py +266 -111
- pychemstation/control/controllers/tables/ms.py +7 -4
- pychemstation/control/controllers/tables/sequence.py +171 -82
- pychemstation/control/controllers/tables/table.py +192 -116
- pychemstation/control/hplc.py +117 -83
- pychemstation/generated/__init__.py +0 -2
- pychemstation/generated/dad_method.py +1 -1
- pychemstation/generated/pump_method.py +15 -19
- pychemstation/utils/injector_types.py +1 -1
- pychemstation/utils/macro.py +12 -11
- pychemstation/utils/method_types.py +3 -2
- pychemstation/{analysis/utils.py → utils/num_utils.py} +2 -2
- pychemstation/utils/parsing.py +1 -11
- pychemstation/utils/sequence_types.py +4 -5
- pychemstation/{analysis → utils}/spec_utils.py +1 -2
- pychemstation/utils/table_types.py +10 -9
- pychemstation/utils/tray_types.py +48 -38
- {pychemstation-0.8.4.dist-info → pychemstation-0.8.7.dist-info}/METADATA +64 -23
- pychemstation-0.8.7.dist-info/RECORD +37 -0
- pychemstation-0.8.4.dist-info/RECORD +0 -37
- {pychemstation-0.8.4.dist-info → pychemstation-0.8.7.dist-info}/WHEEL +0 -0
- {pychemstation-0.8.4.dist-info → pychemstation-0.8.7.dist-info}/licenses/LICENSE +0 -0
pychemstation/__init__.py
CHANGED
@@ -6,12 +6,12 @@ from abc import ABC, abstractmethod
|
|
6
6
|
import matplotlib.pyplot as plt
|
7
7
|
import numpy as np
|
8
8
|
from scipy import (
|
9
|
-
sparse,
|
10
|
-
signal,
|
11
9
|
integrate,
|
10
|
+
signal,
|
11
|
+
sparse,
|
12
12
|
)
|
13
13
|
|
14
|
-
from .
|
14
|
+
from ..utils.num_utils import find_nearest_value_index, interpolate_to_index
|
15
15
|
|
16
16
|
|
17
17
|
class AbstractSpectrum(ABC):
|
@@ -249,7 +249,7 @@ class AbstractSpectrum(ABC):
|
|
249
249
|
os.makedirs(path, exist_ok=True)
|
250
250
|
fig.savefig(os.path.join(path, f"{filename}.png"), dpi=150)
|
251
251
|
|
252
|
-
def find_peaks(self, threshold=1, min_width
|
252
|
+
def find_peaks(self, threshold=1, min_width=0.1, min_dist=None, area=None):
|
253
253
|
"""Finds all peaks above the threshold with at least min_width width.
|
254
254
|
|
255
255
|
Args:
|
@@ -6,16 +6,14 @@ from dataclasses import dataclass
|
|
6
6
|
|
7
7
|
import numpy as np
|
8
8
|
|
9
|
-
from .parsing import CHFile
|
10
|
-
from ..analysis import AbstractSpectrum
|
11
9
|
|
12
|
-
|
13
|
-
|
10
|
+
from ..utils.parsing import CHFile
|
11
|
+
from ..analysis.base_spectrum import AbstractSpectrum
|
14
12
|
|
15
13
|
ACQUISITION_PARAMETERS = "acq.txt"
|
16
14
|
|
17
15
|
# format used in acquisition parameters
|
18
|
-
TIME_FORMAT = "%Y-%m-%d
|
16
|
+
TIME_FORMAT = "%Y-%m-%d %H-%M-%S"
|
19
17
|
SEQUENCE_TIME_FORMAT = "%Y-%m-%d %H-%M"
|
20
18
|
|
21
19
|
|
@@ -39,12 +37,11 @@ class AgilentHPLCChromatogram(AbstractSpectrum):
|
|
39
37
|
}
|
40
38
|
|
41
39
|
def __init__(self, path=None, autosaving=False):
|
42
|
-
|
43
40
|
if path is not None:
|
44
41
|
os.makedirs(path, exist_ok=True)
|
45
42
|
self.path = path
|
46
43
|
else:
|
47
|
-
self.path = os.path.join("
|
44
|
+
self.path = os.path.join("../utils", "hplc_data")
|
48
45
|
os.makedirs(self.path, exist_ok=True)
|
49
46
|
|
50
47
|
super().__init__(path=path, autosaving=autosaving)
|
@@ -4,15 +4,22 @@ import re
|
|
4
4
|
from abc import abstractmethod
|
5
5
|
from dataclasses import dataclass
|
6
6
|
from enum import Enum
|
7
|
-
from typing import
|
7
|
+
from typing import AnyStr, Dict, List, Optional, Pattern
|
8
8
|
|
9
9
|
import pandas as pd
|
10
|
-
from aghplctools.ingestion.text import
|
11
|
-
|
12
|
-
|
10
|
+
from aghplctools.ingestion.text import (
|
11
|
+
_area_report_re,
|
12
|
+
_header_block_re,
|
13
|
+
_no_peaks_re,
|
14
|
+
_signal_info_re,
|
15
|
+
_signal_table_re,
|
16
|
+
chunk_string,
|
17
|
+
)
|
18
|
+
from pandas._libs.parsers import EmptyDataError
|
19
|
+
from result import Err, Ok, Result
|
13
20
|
|
14
|
-
from
|
15
|
-
from
|
21
|
+
from ..analysis.chromatogram import AgilentHPLCChromatogram
|
22
|
+
from ..utils.tray_types import FiftyFourVialPlate, Tray
|
16
23
|
|
17
24
|
|
18
25
|
@dataclass
|
@@ -63,20 +70,68 @@ class CSVProcessor(ReportProcessor):
|
|
63
70
|
"""
|
64
71
|
super().__init__(path)
|
65
72
|
|
73
|
+
def find_csv_prefix(self) -> str:
|
74
|
+
files = [
|
75
|
+
f
|
76
|
+
for f in os.listdir(self.path)
|
77
|
+
if os.path.isfile(os.path.join(self.path, f))
|
78
|
+
]
|
79
|
+
for file in files:
|
80
|
+
if "00" in file:
|
81
|
+
name, _, file_extension = file.partition(".")
|
82
|
+
if "00" in name and file_extension.lower() == "csv":
|
83
|
+
prefix, _, _ = name.partition("00")
|
84
|
+
return prefix
|
85
|
+
raise FileNotFoundError("Couldn't find the prefix for CSV")
|
86
|
+
|
87
|
+
def report_contains(self, labels: List[str], want: List[str]):
|
88
|
+
for label in labels:
|
89
|
+
if label in want:
|
90
|
+
want.remove(label)
|
91
|
+
|
92
|
+
all_labels_seen = False
|
93
|
+
if len(want) != 0:
|
94
|
+
for want_label in want:
|
95
|
+
label_seen = False
|
96
|
+
for label in labels:
|
97
|
+
if want_label in label or want_label == label:
|
98
|
+
label_seen = True
|
99
|
+
all_labels_seen = label_seen
|
100
|
+
else:
|
101
|
+
return True
|
102
|
+
return all_labels_seen
|
103
|
+
|
66
104
|
def process_report(self) -> Result[AgilentReport, AnyStr]:
|
67
105
|
"""
|
68
106
|
Method to parse details from CSV report.
|
69
107
|
|
70
|
-
:
|
108
|
+
:return: subset of complete report details, specifically the sample location, solvents in pumps,
|
71
109
|
and list of peaks at each wavelength channel.
|
72
110
|
"""
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
111
|
+
prefix = self.find_csv_prefix()
|
112
|
+
labels = os.path.join(self.path, f"{prefix}00.CSV")
|
113
|
+
if not os.path.exists(labels):
|
114
|
+
raise ValueError(
|
115
|
+
"CSV reports do not exist, make sure to turn on the post run CSV report option!"
|
116
|
+
)
|
117
|
+
elif os.path.exists(labels):
|
118
|
+
LOCATION = "Location"
|
119
|
+
NUM_SIGNALS = "Number of Signals"
|
120
|
+
SOLVENT = "Solvent"
|
121
|
+
df_labels: Dict[int, Dict[int, str]] = pd.read_csv(
|
122
|
+
labels, encoding="utf-16", header=None
|
123
|
+
).to_dict()
|
124
|
+
vial_location: str = ""
|
125
|
+
signals: Dict[int, list[AgilentPeak]] = {}
|
126
|
+
solvents: Dict[str, str] = {}
|
127
|
+
report_labels: Dict[int, str] = df_labels[0]
|
128
|
+
|
129
|
+
if not self.report_contains(
|
130
|
+
list(report_labels.values()), [LOCATION, NUM_SIGNALS, SOLVENT]
|
131
|
+
):
|
132
|
+
return Err(f"Missing one of: {LOCATION}, {NUM_SIGNALS}, {SOLVENT}")
|
133
|
+
|
134
|
+
for pos, val in report_labels.items():
|
80
135
|
if val == "Location":
|
81
136
|
vial_location = df_labels[1][pos]
|
82
137
|
elif "Solvent" in val:
|
@@ -85,18 +140,35 @@ class CSVProcessor(ReportProcessor):
|
|
85
140
|
elif val == "Number of Signals":
|
86
141
|
num_signals = int(df_labels[1][pos])
|
87
142
|
for s in range(1, num_signals + 1):
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
143
|
+
try:
|
144
|
+
df = pd.read_csv(
|
145
|
+
os.path.join(self.path, f"{prefix}0{s}.CSV"),
|
146
|
+
encoding="utf-16",
|
147
|
+
header=None,
|
148
|
+
)
|
149
|
+
peaks = df.apply(lambda row: AgilentPeak(*row), axis=1)
|
150
|
+
except EmptyDataError:
|
151
|
+
peaks = []
|
152
|
+
try:
|
153
|
+
wavelength = df_labels[1][pos + s].partition(",4 Ref=off")[
|
154
|
+
0
|
155
|
+
][-3:]
|
156
|
+
signals[int(wavelength)] = list(peaks)
|
157
|
+
except (IndexError, ValueError):
|
158
|
+
# TODO: Ask about the MS signals
|
159
|
+
pass
|
93
160
|
break
|
94
161
|
|
95
|
-
return Ok(
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
162
|
+
return Ok(
|
163
|
+
AgilentReport(
|
164
|
+
signals=[
|
165
|
+
Signals(wavelength=w, peaks=s, data=None)
|
166
|
+
for w, s in signals.items()
|
167
|
+
],
|
168
|
+
vial_location=FiftyFourVialPlate.from_int(int(vial_location)),
|
169
|
+
solvents=solvents,
|
170
|
+
)
|
171
|
+
)
|
100
172
|
|
101
173
|
return Err("No report found")
|
102
174
|
|
@@ -105,34 +177,39 @@ class TXTProcessor(ReportProcessor):
|
|
105
177
|
"""
|
106
178
|
Regex matches for column and unit combinations, courtesy of Veronica Lai.
|
107
179
|
"""
|
180
|
+
|
108
181
|
_column_re_dictionary = {
|
109
|
-
|
110
|
-
|
182
|
+
"Peak": { # peak index
|
183
|
+
"#": "[ ]+(?P<Peak>[\d]+)", # number
|
111
184
|
},
|
112
|
-
|
113
|
-
|
185
|
+
"RetTime": { # retention time
|
186
|
+
"[min]": "(?P<RetTime>[\d]+.[\d]+)", # minutes
|
114
187
|
},
|
115
|
-
|
116
|
-
|
188
|
+
"Type": { # peak type
|
189
|
+
"": "(?P<Type>[A-Z]{1,3}(?: [A-Z]{1,2})*)", # todo this is different from <4.8.8 aghplc tools
|
117
190
|
},
|
118
|
-
|
119
|
-
|
191
|
+
"Width": { # peak width
|
192
|
+
"[min]": "(?P<Width>[\d]+.[\d]+[e+-]*[\d]+)",
|
120
193
|
},
|
121
|
-
|
122
|
-
|
123
|
-
|
194
|
+
"Area": { # peak area
|
195
|
+
"[mAU*s]": "(?P<Area>[\d]+.[\d]+[e+-]*[\d]+)", # area units
|
196
|
+
"%": "(?P<percent>[\d]+.[\d]+[e+-]*[\d]+)", # percent
|
124
197
|
},
|
125
|
-
|
126
|
-
|
198
|
+
"Height": { # peak height
|
199
|
+
"[mAU]": "(?P<Height>[\d]+.[\d]+[e+-]*[\d]+)",
|
127
200
|
},
|
128
|
-
|
129
|
-
|
201
|
+
"Name": {
|
202
|
+
"": "(?P<Name>[^\s]+(?:\s[^\s]+)*)", # peak name
|
130
203
|
},
|
131
204
|
}
|
132
205
|
|
133
|
-
def __init__(
|
134
|
-
|
135
|
-
|
206
|
+
def __init__(
|
207
|
+
self,
|
208
|
+
path: str,
|
209
|
+
min_ret_time: int = 0,
|
210
|
+
max_ret_time: int = 999,
|
211
|
+
target_wavelength_range: List[int] = range(200, 300),
|
212
|
+
):
|
136
213
|
"""
|
137
214
|
Class to process reports in CSV form.
|
138
215
|
|
@@ -149,16 +226,17 @@ class TXTProcessor(ReportProcessor):
|
|
149
226
|
def process_report(self) -> Result[AgilentReport, AnyStr]:
|
150
227
|
"""
|
151
228
|
Method to parse details from CSV report.
|
152
|
-
|
153
|
-
:returns: subset of complete report details, specifically the sample location, solvents in pumps,
|
154
|
-
and list of peaks at each wavelength channel.
|
155
|
-
|
156
229
|
If you want more functionality, use `aghplctools`.
|
157
230
|
`from aghplctools.ingestion.text import pull_hplc_area_from_txt`
|
158
231
|
`signals = pull_hplc_area_from_txt(file_path)`
|
232
|
+
|
233
|
+
:return: subset of complete report details, specifically the sample location, solvents in pumps,
|
234
|
+
and list of peaks at each wavelength channel.
|
159
235
|
"""
|
160
236
|
|
161
|
-
with open(
|
237
|
+
with open(
|
238
|
+
os.path.join(self.path, "REPORT.TXT"), "r", encoding="utf-16"
|
239
|
+
) as openfile:
|
162
240
|
text = openfile.read()
|
163
241
|
|
164
242
|
try:
|
@@ -166,25 +244,33 @@ class TXTProcessor(ReportProcessor):
|
|
166
244
|
except ValueError as e:
|
167
245
|
return Err("No peaks found: " + str(e))
|
168
246
|
|
169
|
-
signals = {
|
247
|
+
signals = {
|
248
|
+
key: signals[key] for key in self.target_wavelength_range if key in signals
|
249
|
+
}
|
170
250
|
|
171
251
|
parsed_signals = []
|
172
252
|
for wavelength, wavelength_dict in signals.items():
|
173
|
-
current_wavelength_signals = Signals(
|
253
|
+
current_wavelength_signals = Signals(
|
254
|
+
wavelength=int(wavelength), peaks=[], data=None
|
255
|
+
)
|
174
256
|
for ret_time, ret_time_dict in wavelength_dict.items():
|
175
257
|
if self.min_ret_time <= ret_time <= self.max_ret_time:
|
176
|
-
current_wavelength_signals.peaks.append(
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
258
|
+
current_wavelength_signals.peaks.append(
|
259
|
+
AgilentPeak(
|
260
|
+
retention_time=ret_time,
|
261
|
+
area=ret_time_dict["Area"],
|
262
|
+
width=ret_time_dict["Width"],
|
263
|
+
height=ret_time_dict["Height"],
|
264
|
+
peak_number=None,
|
265
|
+
peak_type=ret_time_dict["Type"],
|
266
|
+
area_percent=None,
|
267
|
+
)
|
268
|
+
)
|
183
269
|
parsed_signals.append(current_wavelength_signals)
|
184
270
|
|
185
|
-
return Ok(
|
186
|
-
|
187
|
-
|
271
|
+
return Ok(
|
272
|
+
AgilentReport(vial_location=None, solvents=None, signals=parsed_signals)
|
273
|
+
)
|
188
274
|
|
189
275
|
def parse_area_report(self, report_text: str) -> Dict:
|
190
276
|
"""
|
@@ -200,7 +286,7 @@ class TXTProcessor(ReportProcessor):
|
|
200
286
|
should be able to use the `parse_area_report` method of aghplctools v4.8.8
|
201
287
|
"""
|
202
288
|
if re.search(_no_peaks_re, report_text): # There are no peaks in Report.txt
|
203
|
-
raise ValueError(
|
289
|
+
raise ValueError("No peaks found in Report.txt")
|
204
290
|
blocks = _header_block_re.split(report_text)
|
205
291
|
signals = {} # output dictionary
|
206
292
|
for ind, block in enumerate(blocks):
|
@@ -213,23 +299,28 @@ class TXTProcessor(ReportProcessor):
|
|
213
299
|
si = _signal_info_re.match(table)
|
214
300
|
if si is not None:
|
215
301
|
# some error state (e.g. 'not found')
|
216
|
-
if si.group(
|
302
|
+
if si.group("error") != "":
|
217
303
|
continue
|
218
|
-
wavelength = float(si.group(
|
304
|
+
wavelength = float(si.group("wavelength"))
|
219
305
|
if wavelength in signals:
|
220
306
|
# placeholder error raise just in case (this probably won't happen)
|
221
307
|
raise KeyError(
|
222
|
-
f
|
308
|
+
f"The wavelength {float(si.group('wavelength'))} is already in the signals dictionary"
|
309
|
+
)
|
223
310
|
signals[wavelength] = {}
|
224
311
|
# build peak regex
|
225
312
|
peak_re = self.build_peak_regex(table)
|
226
|
-
if
|
313
|
+
if (
|
314
|
+
peak_re is None
|
315
|
+
): # if there are no columns (empty table), continue
|
227
316
|
continue
|
228
|
-
for line in table.split(
|
317
|
+
for line in table.split("\n"):
|
229
318
|
peak = peak_re.match(line)
|
230
319
|
if peak is not None:
|
231
|
-
signals[wavelength][float(peak.group(
|
232
|
-
current = signals[wavelength][
|
320
|
+
signals[wavelength][float(peak.group("RetTime"))] = {}
|
321
|
+
current = signals[wavelength][
|
322
|
+
float(peak.group("RetTime"))
|
323
|
+
]
|
233
324
|
for key in self._column_re_dictionary:
|
234
325
|
if key in peak.re.groupindex:
|
235
326
|
try: # try float conversion, otherwise continue
|
@@ -248,30 +339,35 @@ class TXTProcessor(ReportProcessor):
|
|
248
339
|
:param signal_table: block of lines associated with an area table
|
249
340
|
:return: peak line regex object (<=3.6 _sre.SRE_PATTERN, >=3.7 re.Pattern)
|
250
341
|
"""
|
251
|
-
split_table = signal_table.split(
|
342
|
+
split_table = signal_table.split("\n")
|
252
343
|
if len(split_table) <= 4: # catch peak table with no values
|
253
344
|
return None
|
254
345
|
# todo verify that these indicies are always true
|
255
346
|
column_line = split_table[2] # table column line
|
256
347
|
unit_line = split_table[3] # column unit line
|
257
|
-
length_line = [len(val) + 1 for val in split_table[4].split(
|
348
|
+
length_line = [len(val) + 1 for val in split_table[4].split("|")] # length line
|
258
349
|
|
259
350
|
# iterate over header values and units to build peak table regex
|
260
351
|
peak_re_string = []
|
261
352
|
for header, unit in zip(
|
262
|
-
|
263
|
-
chunk_string(unit_line, length_line)
|
353
|
+
chunk_string(column_line, length_line), chunk_string(unit_line, length_line)
|
264
354
|
):
|
265
|
-
if header ==
|
355
|
+
if header == "": # todo create a better catch for an undefined header
|
266
356
|
continue
|
267
357
|
try:
|
268
358
|
peak_re_string.append(
|
269
|
-
self._column_re_dictionary[header][
|
359
|
+
self._column_re_dictionary[header][
|
360
|
+
unit
|
361
|
+
] # append the appropriate regex
|
270
362
|
)
|
271
363
|
except KeyError: # catch for undefined regexes (need to be built)
|
272
|
-
raise KeyError(
|
273
|
-
|
364
|
+
raise KeyError(
|
365
|
+
f'The header/unit combination "{header}" "{unit}" is not defined in the peak regex '
|
366
|
+
f"dictionary. Let Lars know."
|
367
|
+
)
|
274
368
|
return re.compile(
|
275
|
-
|
276
|
-
|
369
|
+
"[ ]+".join(
|
370
|
+
peak_re_string
|
371
|
+
) # constructed string delimited by 1 or more spaces
|
372
|
+
+ "[\s]*" # and any remaining white space
|
277
373
|
)
|
pychemstation/control/README.md
CHANGED
@@ -1,28 +1,24 @@
|
|
1
|
-
#
|
1
|
+
# Examples of usecases
|
2
2
|
|
3
|
-
## Initialization
|
4
3
|
```python
|
5
4
|
from pychemstation.control import HPLCController
|
6
5
|
|
7
6
|
DEFAULT_METHOD_DIR = "C:\\ChemStation\\1\\Methods\\"
|
8
|
-
DATA_DIR = "C:\\Users\\Public\\Documents\\ChemStation\\3\\Data"
|
9
7
|
SEQUENCE_DIR = "C:\\USERS\\PUBLIC\\DOCUMENTS\\CHEMSTATION\\3\\Sequence"
|
10
8
|
DEFAULT_COMMAND_PATH = "C:\\Users\\User\\Desktop\\Lucy\\"
|
9
|
+
DATA_DIR_2 = "C:\\Users\\Public\\Documents\\ChemStation\\2\\Data"
|
10
|
+
DATA_DIR_3 = "C:\\Users\\Public\\Documents\\ChemStation\\3\\Data"
|
11
11
|
|
12
|
-
|
12
|
+
# Initialize HPLC Controller
|
13
|
+
hplc_controller = HPLCController(data_dirs=[DATA_DIR_2, DATA_DIR_3],
|
13
14
|
comm_dir=DEFAULT_COMMAND_PATH,
|
14
15
|
method_dir=DEFAULT_METHOD_DIR,
|
15
16
|
sequence_dir=SEQUENCE_DIR)
|
16
|
-
```
|
17
17
|
|
18
|
-
|
19
|
-
```python
|
18
|
+
# Switching a method
|
20
19
|
hplc_controller.switch_method("General-Poroshell")
|
21
|
-
```
|
22
|
-
|
23
|
-
## Editing a method
|
24
20
|
|
25
|
-
|
21
|
+
# Editing a method
|
26
22
|
from pychemstation.utils.method_types import *
|
27
23
|
|
28
24
|
new_method = MethodDetails(
|
@@ -45,47 +41,27 @@ new_method = MethodDetails(
|
|
45
41
|
stop_time=5,
|
46
42
|
post_time=2
|
47
43
|
)
|
48
|
-
|
49
44
|
hplc_controller.edit_method(new_method)
|
50
|
-
```
|
51
45
|
|
52
|
-
|
53
|
-
```python
|
46
|
+
# Run a method and get a report or data from last run method
|
54
47
|
hplc_controller.run_method(experiment_name="test_experiment")
|
55
48
|
chrom = hplc_controller.get_last_run_method_data()
|
56
49
|
channel_a_time = chrom.A.x
|
57
|
-
|
50
|
+
report = hplc_controller.get_last_run_method_report()
|
51
|
+
vial_location = report.vial_location
|
58
52
|
|
59
|
-
|
60
|
-
```python
|
53
|
+
# switch the currently loaded sequence
|
61
54
|
hplc_controller.switch_sequence(sequence_name="hplc_testing")
|
62
|
-
```
|
63
|
-
## Editing a Sequence Row
|
64
|
-
```python
|
65
|
-
from pychemstation.utils.sequence_types import *
|
66
|
-
from pychemstation.utils.tray_types import *
|
67
55
|
|
68
|
-
|
69
|
-
vial_location=FiftyFourVialPlate(plate=Plate.TWO, letter=Letter.A, num=Num.SEVEN).value(),
|
70
|
-
method="General-Poroshell",
|
71
|
-
num_inj=3,
|
72
|
-
inj_vol=4,
|
73
|
-
sample_name="Blank",
|
74
|
-
sample_type=SampleType.BLANK,
|
75
|
-
inj_source=InjectionSource.HIP_ALS
|
76
|
-
), 1)
|
77
|
-
```
|
78
|
-
|
79
|
-
## Editing entire Sequence Table
|
80
|
-
```python
|
56
|
+
# edit the sequence table
|
81
57
|
from pychemstation.utils.tray_types import *
|
82
58
|
from pychemstation.utils.sequence_types import *
|
83
59
|
|
84
60
|
seq_table = SequenceTable(
|
85
|
-
name=
|
61
|
+
name="hplc_testing",
|
86
62
|
rows=[
|
87
63
|
SequenceEntry(
|
88
|
-
vial_location=FiftyFourVialPlate
|
64
|
+
vial_location=FiftyFourVialPlate.from_str("P1-A1"),
|
89
65
|
method="General-Poroshell",
|
90
66
|
num_inj=3,
|
91
67
|
inj_vol=4,
|
@@ -94,7 +70,7 @@ seq_table = SequenceTable(
|
|
94
70
|
inj_source=InjectionSource.MANUAL
|
95
71
|
),
|
96
72
|
SequenceEntry(
|
97
|
-
vial_location=TenVialColumn.ONE
|
73
|
+
vial_location=TenVialColumn.ONE,
|
98
74
|
method="General-Poroshell",
|
99
75
|
num_inj=1,
|
100
76
|
inj_vol=1,
|
@@ -103,7 +79,7 @@ seq_table = SequenceTable(
|
|
103
79
|
inj_source=InjectionSource.AS_METHOD
|
104
80
|
),
|
105
81
|
SequenceEntry(
|
106
|
-
vial_location=
|
82
|
+
vial_location=FiftyFourVialPlate.from_str("P2-B4"),
|
107
83
|
method="General-Poroshell",
|
108
84
|
num_inj=3,
|
109
85
|
inj_vol=4,
|
@@ -114,11 +90,11 @@ seq_table = SequenceTable(
|
|
114
90
|
]
|
115
91
|
)
|
116
92
|
hplc_controller.edit_sequence(seq_table)
|
117
|
-
```
|
118
93
|
|
119
|
-
|
120
|
-
|
121
|
-
hplc_controller.
|
122
|
-
|
123
|
-
|
94
|
+
# Run a sequence and get data or report from last run sequence
|
95
|
+
hplc_controller.run_sequence()
|
96
|
+
chroms = hplc_controller.get_last_run_sequence_data(read_uv=True)
|
97
|
+
row_1_channel_A_abs = chroms[0][210].y
|
98
|
+
report = hplc_controller.get_last_run_sequence_reports()
|
99
|
+
vial_location_row_1 = report[0].vial_location
|
124
100
|
```
|