pytrms 0.9.2__py3-none-any.whl → 0.9.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- pytrms/__init__.py +38 -38
- pytrms/_base/__init__.py +24 -24
- pytrms/_base/ioniclient.py +32 -32
- pytrms/_base/mqttclient.py +119 -119
- pytrms/_version.py +26 -26
- pytrms/clients/__init__.py +33 -33
- pytrms/clients/db_api.py +200 -183
- pytrms/clients/ioniclient.py +87 -87
- pytrms/clients/modbus.py +532 -528
- pytrms/clients/mqtt.py +800 -797
- pytrms/clients/ssevent.py +82 -82
- pytrms/compose/__init__.py +2 -2
- pytrms/compose/composition.py +302 -302
- pytrms/data/IoniTofPrefs.ini +112 -112
- pytrms/data/ParaIDs.csv +731 -731
- pytrms/helpers.py +126 -120
- pytrms/instrument.py +119 -119
- pytrms/measurement.py +173 -173
- pytrms/peaktable.py +499 -501
- pytrms/plotting/__init__.py +4 -4
- pytrms/plotting/plotting.py +27 -27
- pytrms/readers/__init__.py +4 -4
- pytrms/readers/ionitof_reader.py +472 -472
- {pytrms-0.9.2.dist-info → pytrms-0.9.3.dist-info}/LICENSE +339 -339
- {pytrms-0.9.2.dist-info → pytrms-0.9.3.dist-info}/METADATA +3 -2
- pytrms-0.9.3.dist-info/RECORD +27 -0
- {pytrms-0.9.2.dist-info → pytrms-0.9.3.dist-info}/WHEEL +1 -1
- pytrms-0.9.2.dist-info/RECORD +0 -27
pytrms/clients/mqtt.py
CHANGED
|
@@ -1,797 +1,800 @@
|
|
|
1
|
-
import os
|
|
2
|
-
import time
|
|
3
|
-
import json
|
|
4
|
-
import queue
|
|
5
|
-
from collections import deque, namedtuple
|
|
6
|
-
from datetime import datetime
|
|
7
|
-
from functools import wraps
|
|
8
|
-
from itertools import cycle, chain, zip_longest
|
|
9
|
-
from threading import Condition, RLock
|
|
10
|
-
|
|
11
|
-
from . import _logging
|
|
12
|
-
from . import _par_id_file
|
|
13
|
-
from .._base import itype, MqttClientBase
|
|
14
|
-
|
|
15
|
-
|
|
16
|
-
log = _logging.getLogger(__name__)
|
|
17
|
-
|
|
18
|
-
__all__ = ['MqttClient', 'MqttClientBase']
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
with open(_par_id_file) as f:
|
|
22
|
-
from pandas import read_csv, isna
|
|
23
|
-
|
|
24
|
-
_par_id_info = read_csv(f, sep='\t').drop(0).set_index('Name')
|
|
25
|
-
if isna(_par_id_info.at['MPV_1', 'Access']):
|
|
26
|
-
log.warning(f'filling in read-properties still missing in {os.path.basename(_par_id_file)}')
|
|
27
|
-
_par_id_info.at['MPV_1', 'Access'] = 'RW'
|
|
28
|
-
_par_id_info.at['MPV_2', 'Access'] = 'RW'
|
|
29
|
-
_par_id_info.at['MPV_3', 'Access'] = 'RW'
|
|
30
|
-
|
|
31
|
-
|
|
32
|
-
|
|
33
|
-
## >>>>>>>> adaptor functions <<<<<<<< ##
|
|
34
|
-
|
|
35
|
-
def _build_header():
|
|
36
|
-
ts = datetime.now()
|
|
37
|
-
header = {
|
|
38
|
-
"TimeStamp": {
|
|
39
|
-
"Str": ts.isoformat(),
|
|
40
|
-
"sec": ts.timestamp() + 2082844800, # convert to LabVIEW time
|
|
41
|
-
},
|
|
42
|
-
}
|
|
43
|
-
return header
|
|
44
|
-
|
|
45
|
-
def _build_data_element(value, unit="-"):
|
|
46
|
-
elm = {
|
|
47
|
-
"Datatype": "",
|
|
48
|
-
"Index": -1,
|
|
49
|
-
"Value": str(value),
|
|
50
|
-
"Unit": str(unit),
|
|
51
|
-
}
|
|
52
|
-
if isinstance(value, bool):
|
|
53
|
-
# Note: True is also instance of int! Therefore, we must check it first:
|
|
54
|
-
elm.update({"Datatype": "BOOL", "Value": str(value).lower()})
|
|
55
|
-
elif isinstance(value, str):
|
|
56
|
-
elm.update({"Datatype": "STRING"})
|
|
57
|
-
elif isinstance(value, int):
|
|
58
|
-
elm.update({"Datatype": "I32"})
|
|
59
|
-
elif isinstance(value, float):
|
|
60
|
-
elm.update({"Datatype": "DBL"})
|
|
61
|
-
else:
|
|
62
|
-
raise NotImplemented("unknown datatype")
|
|
63
|
-
|
|
64
|
-
return elm
|
|
65
|
-
|
|
66
|
-
def _build_write_command(parID, value, future_cycle=None):
|
|
67
|
-
cmd = {
|
|
68
|
-
"ParaID": str(parID),
|
|
69
|
-
"Value": str(value),
|
|
70
|
-
"Datatype": "",
|
|
71
|
-
"CMDMode": "Set",
|
|
72
|
-
"Index": -1,
|
|
73
|
-
}
|
|
74
|
-
if future_cycle is not None:
|
|
75
|
-
cmd.update({
|
|
76
|
-
"SchedMode": "OverallCycle",
|
|
77
|
-
"Schedule": str(future_cycle),
|
|
78
|
-
})
|
|
79
|
-
if isinstance(value, bool):
|
|
80
|
-
# Note: True is also instance of int!
|
|
81
|
-
cmd.update({"Datatype": "BOOL", "Value": str(value).lower()})
|
|
82
|
-
elif isinstance(value, str):
|
|
83
|
-
cmd.update({"Datatype": "STRING"})
|
|
84
|
-
elif isinstance(value, int):
|
|
85
|
-
cmd.update({"Datatype": "I32"})
|
|
86
|
-
elif isinstance(value, float):
|
|
87
|
-
cmd.update({"Datatype": "DBL"})
|
|
88
|
-
else:
|
|
89
|
-
raise NotImplemented("unknown datatype")
|
|
90
|
-
|
|
91
|
-
return cmd
|
|
92
|
-
|
|
93
|
-
|
|
94
|
-
## >>>>>>>> parsing functions <<<<<<<< ##
|
|
95
|
-
|
|
96
|
-
class ParsingError(Exception):
|
|
97
|
-
pass
|
|
98
|
-
|
|
99
|
-
|
|
100
|
-
def _parse_data_element(elm):
|
|
101
|
-
'''
|
|
102
|
-
raises: ParsingError, KeyError
|
|
103
|
-
'''
|
|
104
|
-
# make a Python object of a DataElement
|
|
105
|
-
if elm["Datatype"] == "BOOL":
|
|
106
|
-
return bool(elm["Value"])
|
|
107
|
-
elif elm["Datatype"] == "DBL":
|
|
108
|
-
return float(elm["Value"])
|
|
109
|
-
elif elm["Datatype"] == "SGL":
|
|
110
|
-
return float(elm["Value"])
|
|
111
|
-
elif elm["Datatype"] == "I32":
|
|
112
|
-
return int(elm["Value"])
|
|
113
|
-
elif elm["Datatype"] == "I16":
|
|
114
|
-
return int(elm["Value"])
|
|
115
|
-
elif elm["Datatype"] == "STRING":
|
|
116
|
-
return str(elm["Value"])
|
|
117
|
-
raise ParsingError("unknown datatype: " + str(elm["Datatype"]))
|
|
118
|
-
|
|
119
|
-
def _parse_fullcycle(byte_string, need_add_data=False):
|
|
120
|
-
'''Parses 'timecycle', 'intensity', 'mass_cal' and 'add_data' from bytes.
|
|
121
|
-
|
|
122
|
-
Important: the byteorder of the parsed arrays will be big-endian! This
|
|
123
|
-
may be aligned if needed with the `.byteswap()`-method on the array,
|
|
124
|
-
but is not automatically performed to avoid any extra copy.
|
|
125
|
-
|
|
126
|
-
@params
|
|
127
|
-
- need_add_data if `False`, the 'mass_cal' and 'add_data' returned will be None
|
|
128
|
-
|
|
129
|
-
Parsing the AddData-cluster is much slower than parsing the intensity-array!
|
|
130
|
-
This may be skipped to improve performance, but is necessary for loading
|
|
131
|
-
the 'mass_cal' anyway. For orientation:
|
|
132
|
-
|
|
133
|
-
performance (on a Intel Core i5, 8th Gen Ubuntu Linux):
|
|
134
|
-
< 2 ms when `need_add_data=False` (default)
|
|
135
|
-
6-7 ms when needing to parse the AddData-cluster (else)
|
|
136
|
-
|
|
137
|
-
@returns a namedtuple ('timecycle', 'intensity', 'mass_cal', 'add_data')
|
|
138
|
-
'''
|
|
139
|
-
import numpy as np
|
|
140
|
-
|
|
141
|
-
_f32 = np.dtype(np.float32).newbyteorder('>')
|
|
142
|
-
_f64 = np.dtype(np.float64).newbyteorder('>')
|
|
143
|
-
_i16 = np.dtype(np.int16).newbyteorder('>')
|
|
144
|
-
_i32 = np.dtype(np.int32).newbyteorder('>')
|
|
145
|
-
_i64 = np.dtype(np.int64).newbyteorder('>')
|
|
146
|
-
_chr = np.dtype(np.int8).newbyteorder('>')
|
|
147
|
-
|
|
148
|
-
offset = 0
|
|
149
|
-
|
|
150
|
-
def rd_single(dtype=_i32):
|
|
151
|
-
nonlocal offset
|
|
152
|
-
_arr = np.frombuffer(byte_string, dtype=dtype, count=1, offset=offset)
|
|
153
|
-
offset += _arr.nbytes
|
|
154
|
-
return _arr[0]
|
|
155
|
-
|
|
156
|
-
def rd_arr1d(dtype=_f32, count=None):
|
|
157
|
-
nonlocal offset
|
|
158
|
-
if count is None:
|
|
159
|
-
count = rd_single()
|
|
160
|
-
arr = np.frombuffer(byte_string, dtype=dtype, count=count, offset=offset)
|
|
161
|
-
offset += arr.nbytes
|
|
162
|
-
return arr
|
|
163
|
-
|
|
164
|
-
def rd_arr2d(dtype=_f32):
|
|
165
|
-
nonlocal offset
|
|
166
|
-
n = rd_single()
|
|
167
|
-
m = rd_single()
|
|
168
|
-
arr = np.frombuffer(byte_string, dtype=dtype, count=n*m, offset=offset)
|
|
169
|
-
offset += arr.nbytes
|
|
170
|
-
return arr.reshape((n, m))
|
|
171
|
-
|
|
172
|
-
def rd_string():
|
|
173
|
-
nonlocal offset
|
|
174
|
-
return rd_arr1d(dtype=_chr).tobytes().decode('latin-1').lstrip('\x00')
|
|
175
|
-
|
|
176
|
-
tc_cluster = rd_arr1d(dtype=_f64, count=4)
|
|
177
|
-
run__, cpx__ = rd_arr1d(dtype=_f64, count=2) # (discarded)
|
|
178
|
-
# SpecData #
|
|
179
|
-
intensity = rd_arr1d(dtype=_f32)
|
|
180
|
-
sum_inty = rd_arr1d(dtype=_f32) # (discarded)
|
|
181
|
-
mon_peaks = rd_arr2d(dtype=_f32) # (discarded)
|
|
182
|
-
|
|
183
|
-
if not need_add_data:
|
|
184
|
-
# skip costly parsing of Trace- and Add-Data cluster:
|
|
185
|
-
return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, None, None)
|
|
186
|
-
|
|
187
|
-
# TraceData # (as yet discarded)
|
|
188
|
-
tc_cluster2 = rd_arr1d(dtype=_f64, count=6)
|
|
189
|
-
twoD_raw = rd_arr2d(dtype=_f32)
|
|
190
|
-
sum_raw = rd_arr1d(dtype=_f32)
|
|
191
|
-
sum_corr = rd_arr1d(dtype=_f32)
|
|
192
|
-
sum_conz = rd_arr1d(dtype=_f32)
|
|
193
|
-
calc_traces = rd_arr1d(dtype=_f32)
|
|
194
|
-
n_calc_trcs = rd_single()
|
|
195
|
-
for i in range(n_calc_trcs):
|
|
196
|
-
calc_names = rd_arr1d(dtype=_chr)
|
|
197
|
-
peak_centrs = rd_arr1d(dtype=_f32)
|
|
198
|
-
# AddData #
|
|
199
|
-
add_data = dict()
|
|
200
|
-
n_add_data = rd_single()
|
|
201
|
-
for i in range(n_add_data):
|
|
202
|
-
grp_name = rd_string()
|
|
203
|
-
descr = []
|
|
204
|
-
for i in range(rd_single()):
|
|
205
|
-
descr.append(rd_string())
|
|
206
|
-
units = []
|
|
207
|
-
for i in range(rd_single()):
|
|
208
|
-
units.append(rd_string())
|
|
209
|
-
data = rd_arr1d(dtype=_f32)
|
|
210
|
-
view = rd_arr1d(dtype=_chr)
|
|
211
|
-
n_lv_times = rd_single()
|
|
212
|
-
offset += 16 * n_lv_times # skipping LabVIEW timestamp
|
|
213
|
-
add_data[grp_name] = [itype.add_data_item_t(*tup) for tup in zip_longest(data, descr, units, view)]
|
|
214
|
-
|
|
215
|
-
# MassCal #
|
|
216
|
-
mc_masses = rd_arr1d(dtype=_f64)
|
|
217
|
-
mc_tbins = rd_arr1d(dtype=_f64)
|
|
218
|
-
cal_paras = rd_arr1d(dtype=_f64)
|
|
219
|
-
segmnt_cal_pars = rd_arr2d(dtype=_f64)
|
|
220
|
-
mcal_mode = rd_single(dtype=_i16)
|
|
221
|
-
mass_cal = itype.masscal_t(mcal_mode, mc_masses, mc_tbins, cal_paras, segmnt_cal_pars)
|
|
222
|
-
|
|
223
|
-
return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, mass_cal, add_data)
|
|
224
|
-
|
|
225
|
-
|
|
226
|
-
class CalcConzInfo:
|
|
227
|
-
|
|
228
|
-
def __init__(self):
|
|
229
|
-
self.tables = {
|
|
230
|
-
"primary_ions": list(),
|
|
231
|
-
"transmission": list(),
|
|
232
|
-
}
|
|
233
|
-
|
|
234
|
-
@staticmethod
|
|
235
|
-
def load_json(json_string):
|
|
236
|
-
cc = CalcConzInfo()
|
|
237
|
-
j = json.loads(json_string)
|
|
238
|
-
delm = j["DataElement"]
|
|
239
|
-
for li in delm["Value"]["PISets"]["PiSets"]:
|
|
240
|
-
if not li["PriIonSetName"]:
|
|
241
|
-
log.info(f'loaded ({len(cc.tables["primary_ions"])}) primary-ion settings')
|
|
242
|
-
break
|
|
243
|
-
|
|
244
|
-
masses = map(float, filter(lambda x: x > 0, li["PriIonSetMasses"]))
|
|
245
|
-
values = map(float, li["PriIonSetMultiplier"])
|
|
246
|
-
cc.tables["primary_ions"].append(itype.table_setting_t(str(li["PriIonSetName"]), list(zip(masses, values))))
|
|
247
|
-
|
|
248
|
-
for li in j["DataElement"]["Value"]["TransSets"]["Transsets"]:
|
|
249
|
-
if not li["Name"]:
|
|
250
|
-
log.info(f'loaded ({len(cc.tables["transmission"])}) transmission settings')
|
|
251
|
-
break
|
|
252
|
-
|
|
253
|
-
masses = map(float, filter(lambda x: x > 0, li["Mass"]))
|
|
254
|
-
values = map(float, li["Value"])
|
|
255
|
-
# float(li["Voltage"]) # (not used)
|
|
256
|
-
cc.tables["transmission"].append(itype.table_setting_t(str(li["Name"]), list(zip(masses, values))))
|
|
257
|
-
|
|
258
|
-
return cc
|
|
259
|
-
|
|
260
|
-
|
|
261
|
-
## >>>>>>>> callback functions <<<<<<<< ##
|
|
262
|
-
|
|
263
|
-
def follow_calc_conz_info(client, self, msg):
|
|
264
|
-
if not msg.payload:
|
|
265
|
-
# empty payload will clear a retained topic
|
|
266
|
-
self._calcconzinfo = MqttClient._calcconzinfo
|
|
267
|
-
return
|
|
268
|
-
|
|
269
|
-
if not self._calcconzinfo[0] is _NOT_INIT:
|
|
270
|
-
# nothing to do..
|
|
271
|
-
return
|
|
272
|
-
|
|
273
|
-
log.debug(f"updating tm-/pi-table from {msg.topic}...")
|
|
274
|
-
self._calcconzinfo.append(CalcConzInfo.load_json(msg.payload.decode('latin-1')))
|
|
275
|
-
|
|
276
|
-
follow_calc_conz_info.topics = ["PTR/Act/PTR_CalcConzInfo"]
|
|
277
|
-
|
|
278
|
-
def follow_schedule(client, self, msg):
|
|
279
|
-
with follow_schedule._lock:
|
|
280
|
-
if msg.topic.endswith("SRV_ScheduleClear"):
|
|
281
|
-
self._sched_cmds.clear()
|
|
282
|
-
return
|
|
283
|
-
|
|
284
|
-
if msg.topic.endswith("SRV_Schedule"):
|
|
285
|
-
if not msg.payload:
|
|
286
|
-
log.warn("empty ACQ_SRV_Schedule payload has cleared retained topic")
|
|
287
|
-
self._sched_cmds.clear()
|
|
288
|
-
return
|
|
289
|
-
|
|
290
|
-
if msg.retain:
|
|
291
|
-
# Note: we either have received a message that has been
|
|
292
|
-
# retained because of a new connection..
|
|
293
|
-
payload = json.loads(msg.payload.decode())
|
|
294
|
-
self._sched_cmds.clear()
|
|
295
|
-
self._sched_cmds.extend(payload["CMDs"])
|
|
296
|
-
else:
|
|
297
|
-
# ..or the schedule as maintained by IoniTOF has changed,
|
|
298
|
-
# which we handle ourselves below:
|
|
299
|
-
pass
|
|
300
|
-
|
|
301
|
-
if msg.topic.startswith("IC_Command"):
|
|
302
|
-
if not msg.payload:
|
|
303
|
-
log.error("empty IC_Command! has topic been cleared?")
|
|
304
|
-
return
|
|
305
|
-
|
|
306
|
-
# these are the freshly added scheduling requests:
|
|
307
|
-
payload = json.loads(msg.payload.decode())
|
|
308
|
-
self._sched_cmds.extend(payload["CMDs"])
|
|
309
|
-
|
|
310
|
-
follow_schedule.topics = [
|
|
311
|
-
"DataCollection/Act/ACQ_SRV_Schedule",
|
|
312
|
-
"DataCollection/Set/ACQ_SRV_ScheduleClear",
|
|
313
|
-
"IC_Command/Write/Scheduled"
|
|
314
|
-
]
|
|
315
|
-
follow_schedule._lock = RLock()
|
|
316
|
-
|
|
317
|
-
def follow_state(client, self, msg):
|
|
318
|
-
if not msg.payload:
|
|
319
|
-
# empty payload will clear a retained topic
|
|
320
|
-
self._server_state = MqttClient._server_state
|
|
321
|
-
return
|
|
322
|
-
|
|
323
|
-
payload = json.loads(msg.payload.decode())
|
|
324
|
-
state = payload["DataElement"]["Value"]
|
|
325
|
-
log.debug(f"[{self}] new server-state: " + str(state))
|
|
326
|
-
# replace the current state with the new element:
|
|
327
|
-
self._server_state.append(state)
|
|
328
|
-
meas_running = (state == "ACQ_Aquire") # yes, there's a typo, plz keep it :)
|
|
329
|
-
just_started = (meas_running and not msg.retain)
|
|
330
|
-
if meas_running:
|
|
331
|
-
# signal the relevant thread(s) that we need an update:
|
|
332
|
-
self._calcconzinfo.append(_NOT_INIT)
|
|
333
|
-
if just_started:
|
|
334
|
-
# invalidate the source-file until we get a new one:
|
|
335
|
-
self._sf_filename.append(_NOT_INIT)
|
|
336
|
-
|
|
337
|
-
follow_state.topics = ["DataCollection/Act/ACQ_SRV_CurrentState"]
|
|
338
|
-
|
|
339
|
-
def follow_sourcefile(client, self, msg):
|
|
340
|
-
if not msg.payload:
|
|
341
|
-
# empty payload will clear a retained topic
|
|
342
|
-
self._sf_filename = MqttClient._sf_filename
|
|
343
|
-
return
|
|
344
|
-
|
|
345
|
-
payload = json.loads(msg.payload.decode())
|
|
346
|
-
path = payload["DataElement"]["Value"]
|
|
347
|
-
log.debug(f"[{self}] new source-file: " + str(path))
|
|
348
|
-
# replace the current path with the new element:
|
|
349
|
-
self._sf_filename.append(path)
|
|
350
|
-
|
|
351
|
-
follow_sourcefile.topics = ["DataCollection/Act/ACQ_SRV_SetFullStorageFile"]
|
|
352
|
-
|
|
353
|
-
def follow_act_set_values(client, self, msg):
|
|
354
|
-
if not msg.payload:
|
|
355
|
-
# empty payload will clear a retained topic
|
|
356
|
-
return
|
|
357
|
-
|
|
358
|
-
try:
|
|
359
|
-
server, kind, parID = msg.topic.split('/')
|
|
360
|
-
if server == "DataCollection":
|
|
361
|
-
# Note: this topic doesn't strictly follow the convention and is handled separately
|
|
362
|
-
return
|
|
363
|
-
|
|
364
|
-
if server == "Sequencer":
|
|
365
|
-
# Note: this is a separate program and will be ignored (has its own AUTO_-numbers et.c.)
|
|
366
|
-
return
|
|
367
|
-
|
|
368
|
-
if parID == "PTR_CalcConzInfo":
|
|
369
|
-
# another "special" topic handled in 'follow_calc_conz_info' ...
|
|
370
|
-
return
|
|
371
|
-
|
|
372
|
-
if parID not in _par_id_info.index:
|
|
373
|
-
log.warning(f"unknown par-ID in [{msg.topic}]")
|
|
374
|
-
return
|
|
375
|
-
|
|
376
|
-
payload = json.loads(msg.payload.decode())
|
|
377
|
-
if kind == "Act":
|
|
378
|
-
self.act_values[parID] = _parse_data_element(payload["DataElement"])
|
|
379
|
-
if kind == "Set":
|
|
380
|
-
self.set_values[parID] = _parse_data_element(payload["DataElement"])
|
|
381
|
-
except json.decoder.JSONDecodeError as exc:
|
|
382
|
-
log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
|
|
383
|
-
raise
|
|
384
|
-
except KeyError as exc:
|
|
385
|
-
log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
|
|
386
|
-
pass
|
|
387
|
-
except ParsingError as exc:
|
|
388
|
-
log.error(f"while parsing [{parID}] :: {str(exc)}")
|
|
389
|
-
pass
|
|
390
|
-
|
|
391
|
-
follow_act_set_values.topics = ["+/Act/+", "+/Set/+"]
|
|
392
|
-
|
|
393
|
-
def follow_cycle(client, self, msg):
|
|
394
|
-
if not msg.payload:
|
|
395
|
-
# empty payload will clear a retained topic
|
|
396
|
-
return
|
|
397
|
-
|
|
398
|
-
payload = json.loads(msg.payload.decode())
|
|
399
|
-
current = int(payload["DataElement"]["Value"])
|
|
400
|
-
# replace the current timecycle with the new element:
|
|
401
|
-
self._overallcycle.append(current)
|
|
402
|
-
|
|
403
|
-
follow_cycle.topics = ["DataCollection/Act/ACQ_SRV_OverallCycle"]
|
|
404
|
-
|
|
405
|
-
# collect all follow-functions together:
|
|
406
|
-
_subscriber_functions = [fun for name, fun in list(vars().items())
|
|
407
|
-
if callable(fun) and name.startswith('follow_')]
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
_NOT_INIT = object()
|
|
411
|
-
|
|
412
|
-
|
|
413
|
-
class MqttClient(MqttClientBase):
|
|
414
|
-
"""a simplified client for the Ionicon MQTT API.
|
|
415
|
-
|
|
416
|
-
> mq = MqttClient()
|
|
417
|
-
> mq.write('TCP_MCP_B', 3400)
|
|
418
|
-
ValueError()
|
|
419
|
-
|
|
420
|
-
"""
|
|
421
|
-
|
|
422
|
-
_sched_cmds = deque([_NOT_INIT], maxlen=None)
|
|
423
|
-
_server_state = deque([_NOT_INIT], maxlen=1)
|
|
424
|
-
_calcconzinfo = deque([_NOT_INIT], maxlen=1)
|
|
425
|
-
_sf_filename = deque([""], maxlen=1)
|
|
426
|
-
_overallcycle = deque([0], maxlen=1)
|
|
427
|
-
act_values = dict()
|
|
428
|
-
set_values = dict()
|
|
429
|
-
|
|
430
|
-
set_value_limit = {
|
|
431
|
-
"TCP_MCP_B": 3200.0,
|
|
432
|
-
}
|
|
433
|
-
|
|
434
|
-
@property
|
|
435
|
-
def is_connected(self):
|
|
436
|
-
'''Returns `True` if connection to IoniTOF could be established.'''
|
|
437
|
-
return (super().is_connected
|
|
438
|
-
and self._server_state[0] is not _NOT_INIT
|
|
439
|
-
and (len(self._sched_cmds) == 0 or self._sched_cmds[0] is not _NOT_INIT))
|
|
440
|
-
|
|
441
|
-
@property
|
|
442
|
-
def is_running(self):
|
|
443
|
-
'''Returns `True` if IoniTOF is currently acquiring data.'''
|
|
444
|
-
return self.current_server_state == 'ACQ_Aquire' # yes, there's a typo, plz keep it :)
|
|
445
|
-
|
|
446
|
-
@property
|
|
447
|
-
def current_schedule(self):
|
|
448
|
-
'''Returns a list with the upcoming write commands in ascending order.'''
|
|
449
|
-
if not self.is_connected:
|
|
450
|
-
return []
|
|
451
|
-
|
|
452
|
-
current_cycle = self._overallcycle[0]
|
|
453
|
-
filter_fun = lambda cmd: float(cmd["Schedule"]) > current_cycle
|
|
454
|
-
sorted_fun = lambda cmd: float(cmd["Schedule"])
|
|
455
|
-
|
|
456
|
-
return sorted(filter(filter_fun, self._sched_cmds), key=sorted_fun)
|
|
457
|
-
|
|
458
|
-
@property
|
|
459
|
-
def current_server_state(self):
|
|
460
|
-
'''Returns the state of the acquisition-server. One of:
|
|
461
|
-
|
|
462
|
-
- "ACQ_Idle"
|
|
463
|
-
- "ACQ_JustStarted"
|
|
464
|
-
- "ACQ_Aquire"
|
|
465
|
-
- "ACQ_Stopping"
|
|
466
|
-
|
|
467
|
-
or "<unknown>" if there's no connection to IoniTOF.
|
|
468
|
-
'''
|
|
469
|
-
if self.is_connected:
|
|
470
|
-
return self._server_state[0]
|
|
471
|
-
return "<unknown>"
|
|
472
|
-
|
|
473
|
-
@property
|
|
474
|
-
def current_sourcefile(self):
|
|
475
|
-
'''Returns the path to the hdf5-file that is currently being written.
|
|
476
|
-
|
|
477
|
-
Returns an empty string if no measurement is running.
|
|
478
|
-
'''
|
|
479
|
-
if not self.is_running:
|
|
480
|
-
return ""
|
|
481
|
-
|
|
482
|
-
if self._sf_filename[0] is not _NOT_INIT:
|
|
483
|
-
return self._sf_filename[0]
|
|
484
|
-
|
|
485
|
-
# Note: '_NOT_INIT' is set by us on start of acquisition, so we'd expect
|
|
486
|
-
# to receive the source-file-topic after a (generous) timeout:
|
|
487
|
-
timeout_s = 15
|
|
488
|
-
started_at = time.monotonic()
|
|
489
|
-
while time.monotonic() < started_at + timeout_s:
|
|
490
|
-
if self._sf_filename[0] is not _NOT_INIT:
|
|
491
|
-
return self._sf_filename[0]
|
|
492
|
-
|
|
493
|
-
time.sleep(10e-3)
|
|
494
|
-
else:
|
|
495
|
-
raise TimeoutError(f"[{self}] unable to retrieve source-file after ({timeout_s = })");
|
|
496
|
-
|
|
497
|
-
@property
|
|
498
|
-
def current_cycle(self):
|
|
499
|
-
'''Returns the current 'AbsCycle' (/'OverallCycle').'''
|
|
500
|
-
if self.is_running:
|
|
501
|
-
return self._overallcycle[0]
|
|
502
|
-
return 0
|
|
503
|
-
|
|
504
|
-
def __init__(self, host='127.0.0.1', port=1883):
|
|
505
|
-
# this sets up the mqtt connection with default callbacks:
|
|
506
|
-
super().__init__(host, port, _subscriber_functions, None, None, None)
|
|
507
|
-
log.debug(f"connection check ({self.is_connected}) :: {self._server_state = } / {self._sched_cmds = }");
|
|
508
|
-
|
|
509
|
-
def disconnect(self):
|
|
510
|
-
super().disconnect()
|
|
511
|
-
log.debug(f"[{self}] has disconnected")
|
|
512
|
-
# reset internal queues to their defaults:
|
|
513
|
-
self._sched_cmds = MqttClient._sched_cmds
|
|
514
|
-
self._server_state = MqttClient._server_state
|
|
515
|
-
self._calcconzinfo = MqttClient._calcconzinfo
|
|
516
|
-
self._sf_filename = MqttClient._sf_filename
|
|
517
|
-
self._overallcycle = MqttClient._overallcycle
|
|
518
|
-
self.act_values = MqttClient.act_values
|
|
519
|
-
self.set_values = MqttClient.set_values
|
|
520
|
-
|
|
521
|
-
def get(self, parID, kind="set"):
|
|
522
|
-
'''Return the last known value for the given `parID`.
|
|
523
|
-
|
|
524
|
-
- kind: one of 'set'/'act' (default: 'set')
|
|
525
|
-
|
|
526
|
-
A `KeyError` will be raised if the given `parID` is unknown!
|
|
527
|
-
'''
|
|
528
|
-
if not self.is_connected:
|
|
529
|
-
raise Exception(f"[{self}] no connection to instrument");
|
|
530
|
-
|
|
531
|
-
_lut = self.act_values if kind.lower() == "act" else self.set_values
|
|
532
|
-
is_read_only = ('W' not in _par_id_info.loc[parID].Access) # may raise KeyError!
|
|
533
|
-
if _lut is self.set_values and is_read_only:
|
|
534
|
-
raise ValueError(f"'{parID}' is read-only, did you mean `kind='act'`?")
|
|
535
|
-
|
|
536
|
-
if not parID in _lut:
|
|
537
|
-
# Note: The values should need NO! time to be populated from the MQTT topics,
|
|
538
|
-
# because all topics are published as *retained* by the PTR-server.
|
|
539
|
-
# However, a short timeout is respected before raising a `KeyError`:
|
|
540
|
-
time.sleep(200e-3)
|
|
541
|
-
rv = _lut.get(parID)
|
|
542
|
-
if rv is not None:
|
|
543
|
-
return rv
|
|
544
|
-
|
|
545
|
-
# still not found? give some useful hints for the user not to go crazy:
|
|
546
|
-
error_hint = (
|
|
547
|
-
"act" if parID in self.act_values else
|
|
548
|
-
"set" if parID in self.set_values else
|
|
549
|
-
"")
|
|
550
|
-
raise KeyError(str(parID) + (' (did you mean `kind="%s"`?)' % error_hint) if error_hint else "")
|
|
551
|
-
return _lut[parID]
|
|
552
|
-
|
|
553
|
-
def get_table(self, table_name):
|
|
554
|
-
timeout_s = 10
|
|
555
|
-
started_at = time.monotonic()
|
|
556
|
-
try:
|
|
557
|
-
while time.monotonic() < started_at + timeout_s:
|
|
558
|
-
# confirm change of state:
|
|
559
|
-
if not self._calcconzinfo[0] is _NOT_INIT:
|
|
560
|
-
return self._calcconzinfo[0].tables[table_name]
|
|
561
|
-
|
|
562
|
-
time.sleep(10e-3)
|
|
563
|
-
else:
|
|
564
|
-
raise TimeoutError(f"[{self}] unable to retrieve calc-conz-info from PTR server");
|
|
565
|
-
except KeyError as exc:
|
|
566
|
-
raise KeyError(str(exc) + f", possible values: {list(CalcConzInfo.tables.keys())}")
|
|
567
|
-
|
|
568
|
-
def set(self, parID, new_value, unit='-'):
|
|
569
|
-
'''Set a 'new_value' to 'parID' in the DataCollection.'''
|
|
570
|
-
if not self.is_connected:
|
|
571
|
-
raise Exception(f"[{self}] no connection to instrument");
|
|
572
|
-
|
|
573
|
-
raise NotImplementedError("DataCollection/Set, did you mean .write(parID)?")
|
|
574
|
-
|
|
575
|
-
topic, qos, retain = "DataCollection/Set/" + str(parID), 1, True
|
|
576
|
-
log.info(f"setting '{parID}' ~> [{new_value}]")
|
|
577
|
-
payload = {
|
|
578
|
-
"Header": _build_header(),
|
|
579
|
-
"DataElement": _build_data_element(new_value, unit),
|
|
580
|
-
}
|
|
581
|
-
return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
|
|
582
|
-
|
|
583
|
-
def filter_schedule(self, parID):
|
|
584
|
-
'''Returns a list with the upcoming write commands for 'parID' in ascending order.'''
|
|
585
|
-
return (cmd for cmd in self.current_schedule if cmd["ParaID"] == str(parID))
|
|
586
|
-
|
|
587
|
-
def write(self, parID, new_value):
|
|
588
|
-
'''Write a 'new_value' to 'parID' directly.'''
|
|
589
|
-
if not self.is_connected:
|
|
590
|
-
raise Exception(f"[{self}] no connection to instrument");
|
|
591
|
-
|
|
592
|
-
if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
|
|
593
|
-
raise ValueError(f"'{parID}' is read-only")
|
|
594
|
-
|
|
595
|
-
if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
|
|
596
|
-
raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
|
|
597
|
-
|
|
598
|
-
topic, qos, retain = "IC_Command/Write/Direct", 1, False
|
|
599
|
-
log.info(f"writing '{parID}' ~> [{new_value}]")
|
|
600
|
-
cmd = _build_write_command(parID, new_value)
|
|
601
|
-
payload = {
|
|
602
|
-
"Header": _build_header(),
|
|
603
|
-
"CMDs": [ cmd, ]
|
|
604
|
-
}
|
|
605
|
-
return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
|
|
606
|
-
|
|
607
|
-
def schedule(self, parID, new_value, future_cycle):
|
|
608
|
-
'''Schedule a 'new_value' to 'parID' for the given 'future_cycle'.
|
|
609
|
-
|
|
610
|
-
If 'future_cycle' is in fact in the past, the behaviour is defined by IoniTOF
|
|
611
|
-
(most likely the command is ignored). To be sure, the '.current_cycle' should
|
|
612
|
-
be checked before and after running the '.schedule' command programmatically!
|
|
613
|
-
'''
|
|
614
|
-
if not self.is_connected:
|
|
615
|
-
raise Exception(f"[{self}] no connection to instrument");
|
|
616
|
-
|
|
617
|
-
if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
|
|
618
|
-
raise ValueError(f"'{parID}' is read-only")
|
|
619
|
-
|
|
620
|
-
if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
|
|
621
|
-
raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
|
|
622
|
-
|
|
623
|
-
if (future_cycle == 0 and not self.is_running):
|
|
624
|
-
# Note: ioniTOF40 doesn't handle scheduling for the 0th cycle!
|
|
625
|
-
if parID == "AME_ActionNumber":
|
|
626
|
-
# a) the action-number will trigger a script for the 0th cycle, so
|
|
627
|
-
# we *must* be scheduling it!
|
|
628
|
-
self.write("AME_ActionNumber", new_value)
|
|
629
|
-
elif parID.startswith("AME_"):
|
|
630
|
-
# b) the AME-numbers cannot (currently) be set (i.e. written), but since
|
|
631
|
-
# they are inserted just *before* the cycle, this will work just fine:
|
|
632
|
-
future_cycle = 1
|
|
633
|
-
else:
|
|
634
|
-
# c) in all other cases, let's assume the measurement will start soon
|
|
635
|
-
# and dare to write immediately, skipping the schedule altogether:
|
|
636
|
-
log.debug(f"immediately writing {parID = } @ cycle '0' (measurement stopped)")
|
|
637
|
-
return self.write(parID, new_value)
|
|
638
|
-
|
|
639
|
-
if not future_cycle > self.current_cycle:
|
|
640
|
-
log.warn(f"attempting to schedule past cycle, hope you know what you're doing");
|
|
641
|
-
pass # and at least let's debug it in MQTT browser (see also doc-string above)!
|
|
642
|
-
|
|
643
|
-
topic, qos, retain = "IC_Command/Write/Scheduled", 1, False
|
|
644
|
-
log.info(f"scheduling '{parID}' ~> [{new_value}] for cycle ({future_cycle})")
|
|
645
|
-
cmd = _build_write_command(parID, new_value, future_cycle)
|
|
646
|
-
payload = {
|
|
647
|
-
"Header": _build_header(),
|
|
648
|
-
"CMDs": [ cmd, ]
|
|
649
|
-
}
|
|
650
|
-
return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
|
|
651
|
-
|
|
652
|
-
def schedule_filename(self, path, future_cycle):
|
|
653
|
-
'''Start writing to a new .h5 file with the beginning of 'future_cycle'.'''
|
|
654
|
-
assert str(path), "filename cannot be empty!"
|
|
655
|
-
# try to make sure that IoniTOF accepts the path:
|
|
656
|
-
if self.host == '127.0.0.1':
|
|
657
|
-
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
658
|
-
try:
|
|
659
|
-
with open(path, 'x'):
|
|
660
|
-
log.info("touched new file:", path)
|
|
661
|
-
except FileExistsError as exc:
|
|
662
|
-
log.error(f"new filename '{path}' already exists and will not be scheduled!")
|
|
663
|
-
return
|
|
664
|
-
|
|
665
|
-
return self.schedule('ACQ_SRV_SetFullStorageFile', path.replace('/', '\\'), future_cycle)
|
|
666
|
-
|
|
667
|
-
def start_measurement(self, path=None):
|
|
668
|
-
'''Start a new measurement and block until the change is confirmed.
|
|
669
|
-
|
|
670
|
-
If 'path' is not None, write to the given .h5 file.
|
|
671
|
-
'''
|
|
672
|
-
if not path:
|
|
673
|
-
self.write('ACQ_SRV_Start_Meas_Quick', True)
|
|
674
|
-
else:
|
|
675
|
-
self.write('ACQ_SRV_Start_Meas_Record', path.replace('/', '\\'))
|
|
676
|
-
timeout_s = 30
|
|
677
|
-
started_at = time.monotonic()
|
|
678
|
-
while time.monotonic() < started_at + timeout_s:
|
|
679
|
-
if self.is_running:
|
|
680
|
-
break
|
|
681
|
-
|
|
682
|
-
time.sleep(10e-3)
|
|
683
|
-
else:
|
|
684
|
-
self.disconnect()
|
|
685
|
-
raise TimeoutError(f"[{self}] error starting measurement");
|
|
686
|
-
|
|
687
|
-
def stop_measurement(self, future_cycle=None):
|
|
688
|
-
'''Stop the current measurement and block until the change is confirmed.
|
|
689
|
-
|
|
690
|
-
If 'future_cycle' is not None and in the future, schedule the stop command.'''
|
|
691
|
-
if future_cycle is None or not future_cycle > self._overallcycle[0]:
|
|
692
|
-
self.write('ACQ_SRV_Stop_Meas', True)
|
|
693
|
-
else:
|
|
694
|
-
self.schedule('ACQ_SRV_Stop_Meas', True, future_cycle)
|
|
695
|
-
# may need to wait until the scheduled event..
|
|
696
|
-
if future_cycle is not None:
|
|
697
|
-
self.block_until(future_cycle)
|
|
698
|
-
# ..for this timeout to be applicable:
|
|
699
|
-
timeout_s = 30
|
|
700
|
-
started_at = time.monotonic()
|
|
701
|
-
while time.monotonic() < started_at + timeout_s:
|
|
702
|
-
# confirm change of state:
|
|
703
|
-
if not self.is_running:
|
|
704
|
-
break
|
|
705
|
-
|
|
706
|
-
time.sleep(10e-3)
|
|
707
|
-
else:
|
|
708
|
-
self.disconnect()
|
|
709
|
-
raise TimeoutError(f"[{self}] error stopping measurement");
|
|
710
|
-
|
|
711
|
-
def block_until(self, cycle):
|
|
712
|
-
'''Blocks the current thread until at least 'cycle' has passed or acquisition stopped.
|
|
713
|
-
|
|
714
|
-
Returns the actual current cycle.
|
|
715
|
-
'''
|
|
716
|
-
while self.is_running:
|
|
717
|
-
if self._overallcycle[0] >= int(cycle):
|
|
718
|
-
break
|
|
719
|
-
time.sleep(10e-3)
|
|
720
|
-
else:
|
|
721
|
-
return 0
|
|
722
|
-
|
|
723
|
-
return self._overallcycle[0]
|
|
724
|
-
|
|
725
|
-
def iter_specdata(self, timeout_s=None, buffer_size=300):
|
|
726
|
-
'''Returns an iterator over the fullcycle-data as long as it is available.
|
|
727
|
-
|
|
728
|
-
* This will wait up to `timeout_s` (or indefinitely if `None`) for a
|
|
729
|
-
measurement to start or raise a TimeoutError (default: None).
|
|
730
|
-
* Elements will be buffered up to a maximum of `buffer_size` cycles (default: 300).
|
|
731
|
-
* Cycles recorded prior to calling `next()` on the iterator may be missed,
|
|
732
|
-
so ideally this should be set up before any measurement is running.
|
|
733
|
-
* [Important]: When the buffer runs full, a `queue.Full` exception will be raised!
|
|
734
|
-
Therefore, the caller should consume the iterator as soon as possible while the
|
|
735
|
-
measurement is running.
|
|
736
|
-
'''
|
|
737
|
-
q = queue.Queue(buffer_size)
|
|
738
|
-
topic = "DataCollection/Act/ACQ_SRV_FullCycleData"
|
|
739
|
-
qos = 2
|
|
740
|
-
|
|
741
|
-
def callback(client, self, msg):
|
|
742
|
-
try:
|
|
743
|
-
q.put_nowait(_parse_fullcycle(msg.payload, need_add_data=True))
|
|
744
|
-
log.debug(f"received fullcycle, buffer at ({q.qsize()}/{q.maxsize})")
|
|
745
|
-
except queue.Full:
|
|
746
|
-
# DO NOT FAIL INSIDE THE CALLBACK!
|
|
747
|
-
log.error(f"iter_specdata({q.maxsize}): fullcycle buffer overrun!")
|
|
748
|
-
client.unsubscribe(topic)
|
|
749
|
-
|
|
750
|
-
|
|
751
|
-
|
|
752
|
-
|
|
753
|
-
|
|
754
|
-
|
|
755
|
-
|
|
756
|
-
#
|
|
757
|
-
|
|
758
|
-
|
|
759
|
-
|
|
760
|
-
|
|
761
|
-
|
|
762
|
-
|
|
763
|
-
#
|
|
764
|
-
|
|
765
|
-
|
|
766
|
-
|
|
767
|
-
|
|
768
|
-
|
|
769
|
-
|
|
770
|
-
|
|
771
|
-
|
|
772
|
-
|
|
773
|
-
|
|
774
|
-
|
|
775
|
-
|
|
776
|
-
|
|
777
|
-
|
|
778
|
-
|
|
779
|
-
|
|
780
|
-
|
|
781
|
-
|
|
782
|
-
|
|
783
|
-
|
|
784
|
-
|
|
785
|
-
|
|
786
|
-
|
|
787
|
-
|
|
788
|
-
#
|
|
789
|
-
#
|
|
790
|
-
|
|
791
|
-
|
|
792
|
-
|
|
793
|
-
|
|
794
|
-
|
|
795
|
-
|
|
796
|
-
|
|
797
|
-
|
|
1
|
+
import os
|
|
2
|
+
import time
|
|
3
|
+
import json
|
|
4
|
+
import queue
|
|
5
|
+
from collections import deque, namedtuple
|
|
6
|
+
from datetime import datetime
|
|
7
|
+
from functools import wraps
|
|
8
|
+
from itertools import cycle, chain, zip_longest
|
|
9
|
+
from threading import Condition, RLock
|
|
10
|
+
|
|
11
|
+
from . import _logging
|
|
12
|
+
from . import _par_id_file
|
|
13
|
+
from .._base import itype, MqttClientBase
|
|
14
|
+
|
|
15
|
+
|
|
16
|
+
log = _logging.getLogger(__name__)
|
|
17
|
+
|
|
18
|
+
__all__ = ['MqttClient', 'MqttClientBase']
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
with open(_par_id_file) as f:
|
|
22
|
+
from pandas import read_csv, isna
|
|
23
|
+
|
|
24
|
+
_par_id_info = read_csv(f, sep='\t').drop(0).set_index('Name')
|
|
25
|
+
if isna(_par_id_info.at['MPV_1', 'Access']):
|
|
26
|
+
log.warning(f'filling in read-properties still missing in {os.path.basename(_par_id_file)}')
|
|
27
|
+
_par_id_info.at['MPV_1', 'Access'] = 'RW'
|
|
28
|
+
_par_id_info.at['MPV_2', 'Access'] = 'RW'
|
|
29
|
+
_par_id_info.at['MPV_3', 'Access'] = 'RW'
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
|
|
33
|
+
## >>>>>>>> adaptor functions <<<<<<<< ##
|
|
34
|
+
|
|
35
|
+
def _build_header():
|
|
36
|
+
ts = datetime.now()
|
|
37
|
+
header = {
|
|
38
|
+
"TimeStamp": {
|
|
39
|
+
"Str": ts.isoformat(),
|
|
40
|
+
"sec": ts.timestamp() + 2082844800, # convert to LabVIEW time
|
|
41
|
+
},
|
|
42
|
+
}
|
|
43
|
+
return header
|
|
44
|
+
|
|
45
|
+
def _build_data_element(value, unit="-"):
|
|
46
|
+
elm = {
|
|
47
|
+
"Datatype": "",
|
|
48
|
+
"Index": -1,
|
|
49
|
+
"Value": str(value),
|
|
50
|
+
"Unit": str(unit),
|
|
51
|
+
}
|
|
52
|
+
if isinstance(value, bool):
|
|
53
|
+
# Note: True is also instance of int! Therefore, we must check it first:
|
|
54
|
+
elm.update({"Datatype": "BOOL", "Value": str(value).lower()})
|
|
55
|
+
elif isinstance(value, str):
|
|
56
|
+
elm.update({"Datatype": "STRING"})
|
|
57
|
+
elif isinstance(value, int):
|
|
58
|
+
elm.update({"Datatype": "I32"})
|
|
59
|
+
elif isinstance(value, float):
|
|
60
|
+
elm.update({"Datatype": "DBL"})
|
|
61
|
+
else:
|
|
62
|
+
raise NotImplemented("unknown datatype")
|
|
63
|
+
|
|
64
|
+
return elm
|
|
65
|
+
|
|
66
|
+
def _build_write_command(parID, value, future_cycle=None):
|
|
67
|
+
cmd = {
|
|
68
|
+
"ParaID": str(parID),
|
|
69
|
+
"Value": str(value),
|
|
70
|
+
"Datatype": "",
|
|
71
|
+
"CMDMode": "Set",
|
|
72
|
+
"Index": -1,
|
|
73
|
+
}
|
|
74
|
+
if future_cycle is not None:
|
|
75
|
+
cmd.update({
|
|
76
|
+
"SchedMode": "OverallCycle",
|
|
77
|
+
"Schedule": str(future_cycle),
|
|
78
|
+
})
|
|
79
|
+
if isinstance(value, bool):
|
|
80
|
+
# Note: True is also instance of int!
|
|
81
|
+
cmd.update({"Datatype": "BOOL", "Value": str(value).lower()})
|
|
82
|
+
elif isinstance(value, str):
|
|
83
|
+
cmd.update({"Datatype": "STRING"})
|
|
84
|
+
elif isinstance(value, int):
|
|
85
|
+
cmd.update({"Datatype": "I32"})
|
|
86
|
+
elif isinstance(value, float):
|
|
87
|
+
cmd.update({"Datatype": "DBL"})
|
|
88
|
+
else:
|
|
89
|
+
raise NotImplemented("unknown datatype")
|
|
90
|
+
|
|
91
|
+
return cmd
|
|
92
|
+
|
|
93
|
+
|
|
94
|
+
## >>>>>>>> parsing functions <<<<<<<< ##
|
|
95
|
+
|
|
96
|
+
class ParsingError(Exception):
|
|
97
|
+
pass
|
|
98
|
+
|
|
99
|
+
|
|
100
|
+
def _parse_data_element(elm):
|
|
101
|
+
'''
|
|
102
|
+
raises: ParsingError, KeyError
|
|
103
|
+
'''
|
|
104
|
+
# make a Python object of a DataElement
|
|
105
|
+
if elm["Datatype"] == "BOOL":
|
|
106
|
+
return bool(elm["Value"])
|
|
107
|
+
elif elm["Datatype"] == "DBL":
|
|
108
|
+
return float(elm["Value"])
|
|
109
|
+
elif elm["Datatype"] == "SGL":
|
|
110
|
+
return float(elm["Value"])
|
|
111
|
+
elif elm["Datatype"] == "I32":
|
|
112
|
+
return int(elm["Value"])
|
|
113
|
+
elif elm["Datatype"] == "I16":
|
|
114
|
+
return int(elm["Value"])
|
|
115
|
+
elif elm["Datatype"] == "STRING":
|
|
116
|
+
return str(elm["Value"])
|
|
117
|
+
raise ParsingError("unknown datatype: " + str(elm["Datatype"]))
|
|
118
|
+
|
|
119
|
+
def _parse_fullcycle(byte_string, need_add_data=False):
|
|
120
|
+
'''Parses 'timecycle', 'intensity', 'mass_cal' and 'add_data' from bytes.
|
|
121
|
+
|
|
122
|
+
Important: the byteorder of the parsed arrays will be big-endian! This
|
|
123
|
+
may be aligned if needed with the `.byteswap()`-method on the array,
|
|
124
|
+
but is not automatically performed to avoid any extra copy.
|
|
125
|
+
|
|
126
|
+
@params
|
|
127
|
+
- need_add_data if `False`, the 'mass_cal' and 'add_data' returned will be None
|
|
128
|
+
|
|
129
|
+
Parsing the AddData-cluster is much slower than parsing the intensity-array!
|
|
130
|
+
This may be skipped to improve performance, but is necessary for loading
|
|
131
|
+
the 'mass_cal' anyway. For orientation:
|
|
132
|
+
|
|
133
|
+
performance (on a Intel Core i5, 8th Gen Ubuntu Linux):
|
|
134
|
+
< 2 ms when `need_add_data=False` (default)
|
|
135
|
+
6-7 ms when needing to parse the AddData-cluster (else)
|
|
136
|
+
|
|
137
|
+
@returns a namedtuple ('timecycle', 'intensity', 'mass_cal', 'add_data')
|
|
138
|
+
'''
|
|
139
|
+
import numpy as np
|
|
140
|
+
|
|
141
|
+
_f32 = np.dtype(np.float32).newbyteorder('>')
|
|
142
|
+
_f64 = np.dtype(np.float64).newbyteorder('>')
|
|
143
|
+
_i16 = np.dtype(np.int16).newbyteorder('>')
|
|
144
|
+
_i32 = np.dtype(np.int32).newbyteorder('>')
|
|
145
|
+
_i64 = np.dtype(np.int64).newbyteorder('>')
|
|
146
|
+
_chr = np.dtype(np.int8).newbyteorder('>')
|
|
147
|
+
|
|
148
|
+
offset = 0
|
|
149
|
+
|
|
150
|
+
def rd_single(dtype=_i32):
|
|
151
|
+
nonlocal offset
|
|
152
|
+
_arr = np.frombuffer(byte_string, dtype=dtype, count=1, offset=offset)
|
|
153
|
+
offset += _arr.nbytes
|
|
154
|
+
return _arr[0]
|
|
155
|
+
|
|
156
|
+
def rd_arr1d(dtype=_f32, count=None):
|
|
157
|
+
nonlocal offset
|
|
158
|
+
if count is None:
|
|
159
|
+
count = rd_single()
|
|
160
|
+
arr = np.frombuffer(byte_string, dtype=dtype, count=count, offset=offset)
|
|
161
|
+
offset += arr.nbytes
|
|
162
|
+
return arr
|
|
163
|
+
|
|
164
|
+
def rd_arr2d(dtype=_f32):
|
|
165
|
+
nonlocal offset
|
|
166
|
+
n = rd_single()
|
|
167
|
+
m = rd_single()
|
|
168
|
+
arr = np.frombuffer(byte_string, dtype=dtype, count=n*m, offset=offset)
|
|
169
|
+
offset += arr.nbytes
|
|
170
|
+
return arr.reshape((n, m))
|
|
171
|
+
|
|
172
|
+
def rd_string():
|
|
173
|
+
nonlocal offset
|
|
174
|
+
return rd_arr1d(dtype=_chr).tobytes().decode('latin-1').lstrip('\x00')
|
|
175
|
+
|
|
176
|
+
tc_cluster = rd_arr1d(dtype=_f64, count=4)
|
|
177
|
+
run__, cpx__ = rd_arr1d(dtype=_f64, count=2) # (discarded)
|
|
178
|
+
# SpecData #
|
|
179
|
+
intensity = rd_arr1d(dtype=_f32)
|
|
180
|
+
sum_inty = rd_arr1d(dtype=_f32) # (discarded)
|
|
181
|
+
mon_peaks = rd_arr2d(dtype=_f32) # (discarded)
|
|
182
|
+
|
|
183
|
+
if not need_add_data:
|
|
184
|
+
# skip costly parsing of Trace- and Add-Data cluster:
|
|
185
|
+
return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, None, None)
|
|
186
|
+
|
|
187
|
+
# TraceData # (as yet discarded)
|
|
188
|
+
tc_cluster2 = rd_arr1d(dtype=_f64, count=6)
|
|
189
|
+
twoD_raw = rd_arr2d(dtype=_f32)
|
|
190
|
+
sum_raw = rd_arr1d(dtype=_f32)
|
|
191
|
+
sum_corr = rd_arr1d(dtype=_f32)
|
|
192
|
+
sum_conz = rd_arr1d(dtype=_f32)
|
|
193
|
+
calc_traces = rd_arr1d(dtype=_f32)
|
|
194
|
+
n_calc_trcs = rd_single()
|
|
195
|
+
for i in range(n_calc_trcs):
|
|
196
|
+
calc_names = rd_arr1d(dtype=_chr)
|
|
197
|
+
peak_centrs = rd_arr1d(dtype=_f32)
|
|
198
|
+
# AddData #
|
|
199
|
+
add_data = dict()
|
|
200
|
+
n_add_data = rd_single()
|
|
201
|
+
for i in range(n_add_data):
|
|
202
|
+
grp_name = rd_string()
|
|
203
|
+
descr = []
|
|
204
|
+
for i in range(rd_single()):
|
|
205
|
+
descr.append(rd_string())
|
|
206
|
+
units = []
|
|
207
|
+
for i in range(rd_single()):
|
|
208
|
+
units.append(rd_string())
|
|
209
|
+
data = rd_arr1d(dtype=_f32)
|
|
210
|
+
view = rd_arr1d(dtype=_chr)
|
|
211
|
+
n_lv_times = rd_single()
|
|
212
|
+
offset += 16 * n_lv_times # skipping LabVIEW timestamp
|
|
213
|
+
add_data[grp_name] = [itype.add_data_item_t(*tup) for tup in zip_longest(data, descr, units, view)]
|
|
214
|
+
|
|
215
|
+
# MassCal #
|
|
216
|
+
mc_masses = rd_arr1d(dtype=_f64)
|
|
217
|
+
mc_tbins = rd_arr1d(dtype=_f64)
|
|
218
|
+
cal_paras = rd_arr1d(dtype=_f64)
|
|
219
|
+
segmnt_cal_pars = rd_arr2d(dtype=_f64)
|
|
220
|
+
mcal_mode = rd_single(dtype=_i16)
|
|
221
|
+
mass_cal = itype.masscal_t(mcal_mode, mc_masses, mc_tbins, cal_paras, segmnt_cal_pars)
|
|
222
|
+
|
|
223
|
+
return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, mass_cal, add_data)
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class CalcConzInfo:
|
|
227
|
+
|
|
228
|
+
def __init__(self):
|
|
229
|
+
self.tables = {
|
|
230
|
+
"primary_ions": list(),
|
|
231
|
+
"transmission": list(),
|
|
232
|
+
}
|
|
233
|
+
|
|
234
|
+
@staticmethod
|
|
235
|
+
def load_json(json_string):
|
|
236
|
+
cc = CalcConzInfo()
|
|
237
|
+
j = json.loads(json_string)
|
|
238
|
+
delm = j["DataElement"]
|
|
239
|
+
for li in delm["Value"]["PISets"]["PiSets"]:
|
|
240
|
+
if not li["PriIonSetName"]:
|
|
241
|
+
log.info(f'loaded ({len(cc.tables["primary_ions"])}) primary-ion settings')
|
|
242
|
+
break
|
|
243
|
+
|
|
244
|
+
masses = map(float, filter(lambda x: x > 0, li["PriIonSetMasses"]))
|
|
245
|
+
values = map(float, li["PriIonSetMultiplier"])
|
|
246
|
+
cc.tables["primary_ions"].append(itype.table_setting_t(str(li["PriIonSetName"]), list(zip(masses, values))))
|
|
247
|
+
|
|
248
|
+
for li in j["DataElement"]["Value"]["TransSets"]["Transsets"]:
|
|
249
|
+
if not li["Name"]:
|
|
250
|
+
log.info(f'loaded ({len(cc.tables["transmission"])}) transmission settings')
|
|
251
|
+
break
|
|
252
|
+
|
|
253
|
+
masses = map(float, filter(lambda x: x > 0, li["Mass"]))
|
|
254
|
+
values = map(float, li["Value"])
|
|
255
|
+
# float(li["Voltage"]) # (not used)
|
|
256
|
+
cc.tables["transmission"].append(itype.table_setting_t(str(li["Name"]), list(zip(masses, values))))
|
|
257
|
+
|
|
258
|
+
return cc
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
## >>>>>>>> callback functions <<<<<<<< ##
|
|
262
|
+
|
|
263
|
+
def follow_calc_conz_info(client, self, msg):
|
|
264
|
+
if not msg.payload:
|
|
265
|
+
# empty payload will clear a retained topic
|
|
266
|
+
self._calcconzinfo = MqttClient._calcconzinfo
|
|
267
|
+
return
|
|
268
|
+
|
|
269
|
+
if not self._calcconzinfo[0] is _NOT_INIT:
|
|
270
|
+
# nothing to do..
|
|
271
|
+
return
|
|
272
|
+
|
|
273
|
+
log.debug(f"updating tm-/pi-table from {msg.topic}...")
|
|
274
|
+
self._calcconzinfo.append(CalcConzInfo.load_json(msg.payload.decode('latin-1')))
|
|
275
|
+
|
|
276
|
+
follow_calc_conz_info.topics = ["PTR/Act/PTR_CalcConzInfo"]
|
|
277
|
+
|
|
278
|
+
def follow_schedule(client, self, msg):
|
|
279
|
+
with follow_schedule._lock:
|
|
280
|
+
if msg.topic.endswith("SRV_ScheduleClear"):
|
|
281
|
+
self._sched_cmds.clear()
|
|
282
|
+
return
|
|
283
|
+
|
|
284
|
+
if msg.topic.endswith("SRV_Schedule"):
|
|
285
|
+
if not msg.payload:
|
|
286
|
+
log.warn("empty ACQ_SRV_Schedule payload has cleared retained topic")
|
|
287
|
+
self._sched_cmds.clear()
|
|
288
|
+
return
|
|
289
|
+
|
|
290
|
+
if msg.retain:
|
|
291
|
+
# Note: we either have received a message that has been
|
|
292
|
+
# retained because of a new connection..
|
|
293
|
+
payload = json.loads(msg.payload.decode())
|
|
294
|
+
self._sched_cmds.clear()
|
|
295
|
+
self._sched_cmds.extend(payload["CMDs"])
|
|
296
|
+
else:
|
|
297
|
+
# ..or the schedule as maintained by IoniTOF has changed,
|
|
298
|
+
# which we handle ourselves below:
|
|
299
|
+
pass
|
|
300
|
+
|
|
301
|
+
if msg.topic.startswith("IC_Command"):
|
|
302
|
+
if not msg.payload:
|
|
303
|
+
log.error("empty IC_Command! has topic been cleared?")
|
|
304
|
+
return
|
|
305
|
+
|
|
306
|
+
# these are the freshly added scheduling requests:
|
|
307
|
+
payload = json.loads(msg.payload.decode())
|
|
308
|
+
self._sched_cmds.extend(payload["CMDs"])
|
|
309
|
+
|
|
310
|
+
follow_schedule.topics = [
|
|
311
|
+
"DataCollection/Act/ACQ_SRV_Schedule",
|
|
312
|
+
"DataCollection/Set/ACQ_SRV_ScheduleClear",
|
|
313
|
+
"IC_Command/Write/Scheduled"
|
|
314
|
+
]
|
|
315
|
+
follow_schedule._lock = RLock()
|
|
316
|
+
|
|
317
|
+
def follow_state(client, self, msg):
|
|
318
|
+
if not msg.payload:
|
|
319
|
+
# empty payload will clear a retained topic
|
|
320
|
+
self._server_state = MqttClient._server_state
|
|
321
|
+
return
|
|
322
|
+
|
|
323
|
+
payload = json.loads(msg.payload.decode())
|
|
324
|
+
state = payload["DataElement"]["Value"]
|
|
325
|
+
log.debug(f"[{self}] new server-state: " + str(state))
|
|
326
|
+
# replace the current state with the new element:
|
|
327
|
+
self._server_state.append(state)
|
|
328
|
+
meas_running = (state == "ACQ_Aquire") # yes, there's a typo, plz keep it :)
|
|
329
|
+
just_started = (meas_running and not msg.retain)
|
|
330
|
+
if meas_running:
|
|
331
|
+
# signal the relevant thread(s) that we need an update:
|
|
332
|
+
self._calcconzinfo.append(_NOT_INIT)
|
|
333
|
+
if just_started:
|
|
334
|
+
# invalidate the source-file until we get a new one:
|
|
335
|
+
self._sf_filename.append(_NOT_INIT)
|
|
336
|
+
|
|
337
|
+
follow_state.topics = ["DataCollection/Act/ACQ_SRV_CurrentState"]
|
|
338
|
+
|
|
339
|
+
def follow_sourcefile(client, self, msg):
|
|
340
|
+
if not msg.payload:
|
|
341
|
+
# empty payload will clear a retained topic
|
|
342
|
+
self._sf_filename = MqttClient._sf_filename
|
|
343
|
+
return
|
|
344
|
+
|
|
345
|
+
payload = json.loads(msg.payload.decode())
|
|
346
|
+
path = payload["DataElement"]["Value"]
|
|
347
|
+
log.debug(f"[{self}] new source-file: " + str(path))
|
|
348
|
+
# replace the current path with the new element:
|
|
349
|
+
self._sf_filename.append(path)
|
|
350
|
+
|
|
351
|
+
follow_sourcefile.topics = ["DataCollection/Act/ACQ_SRV_SetFullStorageFile"]
|
|
352
|
+
|
|
353
|
+
def follow_act_set_values(client, self, msg):
|
|
354
|
+
if not msg.payload:
|
|
355
|
+
# empty payload will clear a retained topic
|
|
356
|
+
return
|
|
357
|
+
|
|
358
|
+
try:
|
|
359
|
+
server, kind, parID = msg.topic.split('/')
|
|
360
|
+
if server == "DataCollection":
|
|
361
|
+
# Note: this topic doesn't strictly follow the convention and is handled separately
|
|
362
|
+
return
|
|
363
|
+
|
|
364
|
+
if server == "Sequencer":
|
|
365
|
+
# Note: this is a separate program and will be ignored (has its own AUTO_-numbers et.c.)
|
|
366
|
+
return
|
|
367
|
+
|
|
368
|
+
if parID == "PTR_CalcConzInfo":
|
|
369
|
+
# another "special" topic handled in 'follow_calc_conz_info' ...
|
|
370
|
+
return
|
|
371
|
+
|
|
372
|
+
if parID not in _par_id_info.index:
|
|
373
|
+
log.warning(f"unknown par-ID in [{msg.topic}]")
|
|
374
|
+
return
|
|
375
|
+
|
|
376
|
+
payload = json.loads(msg.payload.decode())
|
|
377
|
+
if kind == "Act":
|
|
378
|
+
self.act_values[parID] = _parse_data_element(payload["DataElement"])
|
|
379
|
+
if kind == "Set":
|
|
380
|
+
self.set_values[parID] = _parse_data_element(payload["DataElement"])
|
|
381
|
+
except json.decoder.JSONDecodeError as exc:
|
|
382
|
+
log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
|
|
383
|
+
raise
|
|
384
|
+
except KeyError as exc:
|
|
385
|
+
log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
|
|
386
|
+
pass
|
|
387
|
+
except ParsingError as exc:
|
|
388
|
+
log.error(f"while parsing [{parID}] :: {str(exc)}")
|
|
389
|
+
pass
|
|
390
|
+
|
|
391
|
+
follow_act_set_values.topics = ["+/Act/+", "+/Set/+"]
|
|
392
|
+
|
|
393
|
+
def follow_cycle(client, self, msg):
|
|
394
|
+
if not msg.payload:
|
|
395
|
+
# empty payload will clear a retained topic
|
|
396
|
+
return
|
|
397
|
+
|
|
398
|
+
payload = json.loads(msg.payload.decode())
|
|
399
|
+
current = int(payload["DataElement"]["Value"])
|
|
400
|
+
# replace the current timecycle with the new element:
|
|
401
|
+
self._overallcycle.append(current)
|
|
402
|
+
|
|
403
|
+
follow_cycle.topics = ["DataCollection/Act/ACQ_SRV_OverallCycle"]
|
|
404
|
+
|
|
405
|
+
# collect all follow-functions together:
|
|
406
|
+
_subscriber_functions = [fun for name, fun in list(vars().items())
|
|
407
|
+
if callable(fun) and name.startswith('follow_')]
|
|
408
|
+
|
|
409
|
+
|
|
410
|
+
_NOT_INIT = object()
|
|
411
|
+
|
|
412
|
+
|
|
413
|
+
class MqttClient(MqttClientBase):
|
|
414
|
+
"""a simplified client for the Ionicon MQTT API.
|
|
415
|
+
|
|
416
|
+
> mq = MqttClient()
|
|
417
|
+
> mq.write('TCP_MCP_B', 3400)
|
|
418
|
+
ValueError()
|
|
419
|
+
|
|
420
|
+
"""
|
|
421
|
+
|
|
422
|
+
_sched_cmds = deque([_NOT_INIT], maxlen=None)
|
|
423
|
+
_server_state = deque([_NOT_INIT], maxlen=1)
|
|
424
|
+
_calcconzinfo = deque([_NOT_INIT], maxlen=1)
|
|
425
|
+
_sf_filename = deque([""], maxlen=1)
|
|
426
|
+
_overallcycle = deque([0], maxlen=1)
|
|
427
|
+
act_values = dict()
|
|
428
|
+
set_values = dict()
|
|
429
|
+
|
|
430
|
+
set_value_limit = {
|
|
431
|
+
"TCP_MCP_B": 3200.0,
|
|
432
|
+
}
|
|
433
|
+
|
|
434
|
+
@property
|
|
435
|
+
def is_connected(self):
|
|
436
|
+
'''Returns `True` if connection to IoniTOF could be established.'''
|
|
437
|
+
return (super().is_connected
|
|
438
|
+
and self._server_state[0] is not _NOT_INIT
|
|
439
|
+
and (len(self._sched_cmds) == 0 or self._sched_cmds[0] is not _NOT_INIT))
|
|
440
|
+
|
|
441
|
+
@property
|
|
442
|
+
def is_running(self):
|
|
443
|
+
'''Returns `True` if IoniTOF is currently acquiring data.'''
|
|
444
|
+
return self.current_server_state == 'ACQ_Aquire' # yes, there's a typo, plz keep it :)
|
|
445
|
+
|
|
446
|
+
@property
|
|
447
|
+
def current_schedule(self):
|
|
448
|
+
'''Returns a list with the upcoming write commands in ascending order.'''
|
|
449
|
+
if not self.is_connected:
|
|
450
|
+
return []
|
|
451
|
+
|
|
452
|
+
current_cycle = self._overallcycle[0]
|
|
453
|
+
filter_fun = lambda cmd: float(cmd["Schedule"]) > current_cycle
|
|
454
|
+
sorted_fun = lambda cmd: float(cmd["Schedule"])
|
|
455
|
+
|
|
456
|
+
return sorted(filter(filter_fun, self._sched_cmds), key=sorted_fun)
|
|
457
|
+
|
|
458
|
+
@property
|
|
459
|
+
def current_server_state(self):
|
|
460
|
+
'''Returns the state of the acquisition-server. One of:
|
|
461
|
+
|
|
462
|
+
- "ACQ_Idle"
|
|
463
|
+
- "ACQ_JustStarted"
|
|
464
|
+
- "ACQ_Aquire"
|
|
465
|
+
- "ACQ_Stopping"
|
|
466
|
+
|
|
467
|
+
or "<unknown>" if there's no connection to IoniTOF.
|
|
468
|
+
'''
|
|
469
|
+
if self.is_connected:
|
|
470
|
+
return self._server_state[0]
|
|
471
|
+
return "<unknown>"
|
|
472
|
+
|
|
473
|
+
@property
|
|
474
|
+
def current_sourcefile(self):
|
|
475
|
+
'''Returns the path to the hdf5-file that is currently being written.
|
|
476
|
+
|
|
477
|
+
Returns an empty string if no measurement is running.
|
|
478
|
+
'''
|
|
479
|
+
if not self.is_running:
|
|
480
|
+
return ""
|
|
481
|
+
|
|
482
|
+
if self._sf_filename[0] is not _NOT_INIT:
|
|
483
|
+
return self._sf_filename[0]
|
|
484
|
+
|
|
485
|
+
# Note: '_NOT_INIT' is set by us on start of acquisition, so we'd expect
|
|
486
|
+
# to receive the source-file-topic after a (generous) timeout:
|
|
487
|
+
timeout_s = 15
|
|
488
|
+
started_at = time.monotonic()
|
|
489
|
+
while time.monotonic() < started_at + timeout_s:
|
|
490
|
+
if self._sf_filename[0] is not _NOT_INIT:
|
|
491
|
+
return self._sf_filename[0]
|
|
492
|
+
|
|
493
|
+
time.sleep(10e-3)
|
|
494
|
+
else:
|
|
495
|
+
raise TimeoutError(f"[{self}] unable to retrieve source-file after ({timeout_s = })");
|
|
496
|
+
|
|
497
|
+
@property
|
|
498
|
+
def current_cycle(self):
|
|
499
|
+
'''Returns the current 'AbsCycle' (/'OverallCycle').'''
|
|
500
|
+
if self.is_running:
|
|
501
|
+
return self._overallcycle[0]
|
|
502
|
+
return 0
|
|
503
|
+
|
|
504
|
+
def __init__(self, host='127.0.0.1', port=1883):
|
|
505
|
+
# this sets up the mqtt connection with default callbacks:
|
|
506
|
+
super().__init__(host, port, _subscriber_functions, None, None, None)
|
|
507
|
+
log.debug(f"connection check ({self.is_connected}) :: {self._server_state = } / {self._sched_cmds = }");
|
|
508
|
+
|
|
509
|
+
def disconnect(self):
|
|
510
|
+
super().disconnect()
|
|
511
|
+
log.debug(f"[{self}] has disconnected")
|
|
512
|
+
# reset internal queues to their defaults:
|
|
513
|
+
self._sched_cmds = MqttClient._sched_cmds
|
|
514
|
+
self._server_state = MqttClient._server_state
|
|
515
|
+
self._calcconzinfo = MqttClient._calcconzinfo
|
|
516
|
+
self._sf_filename = MqttClient._sf_filename
|
|
517
|
+
self._overallcycle = MqttClient._overallcycle
|
|
518
|
+
self.act_values = MqttClient.act_values
|
|
519
|
+
self.set_values = MqttClient.set_values
|
|
520
|
+
|
|
521
|
+
def get(self, parID, kind="set"):
|
|
522
|
+
'''Return the last known value for the given `parID`.
|
|
523
|
+
|
|
524
|
+
- kind: one of 'set'/'act' (default: 'set')
|
|
525
|
+
|
|
526
|
+
A `KeyError` will be raised if the given `parID` is unknown!
|
|
527
|
+
'''
|
|
528
|
+
if not self.is_connected:
|
|
529
|
+
raise Exception(f"[{self}] no connection to instrument");
|
|
530
|
+
|
|
531
|
+
_lut = self.act_values if kind.lower() == "act" else self.set_values
|
|
532
|
+
is_read_only = ('W' not in _par_id_info.loc[parID].Access) # may raise KeyError!
|
|
533
|
+
if _lut is self.set_values and is_read_only:
|
|
534
|
+
raise ValueError(f"'{parID}' is read-only, did you mean `kind='act'`?")
|
|
535
|
+
|
|
536
|
+
if not parID in _lut:
|
|
537
|
+
# Note: The values should need NO! time to be populated from the MQTT topics,
|
|
538
|
+
# because all topics are published as *retained* by the PTR-server.
|
|
539
|
+
# However, a short timeout is respected before raising a `KeyError`:
|
|
540
|
+
time.sleep(200e-3)
|
|
541
|
+
rv = _lut.get(parID)
|
|
542
|
+
if rv is not None:
|
|
543
|
+
return rv
|
|
544
|
+
|
|
545
|
+
# still not found? give some useful hints for the user not to go crazy:
|
|
546
|
+
error_hint = (
|
|
547
|
+
"act" if parID in self.act_values else
|
|
548
|
+
"set" if parID in self.set_values else
|
|
549
|
+
"")
|
|
550
|
+
raise KeyError(str(parID) + (' (did you mean `kind="%s"`?)' % error_hint) if error_hint else "")
|
|
551
|
+
return _lut[parID]
|
|
552
|
+
|
|
553
|
+
def get_table(self, table_name):
|
|
554
|
+
timeout_s = 10
|
|
555
|
+
started_at = time.monotonic()
|
|
556
|
+
try:
|
|
557
|
+
while time.monotonic() < started_at + timeout_s:
|
|
558
|
+
# confirm change of state:
|
|
559
|
+
if not self._calcconzinfo[0] is _NOT_INIT:
|
|
560
|
+
return self._calcconzinfo[0].tables[table_name]
|
|
561
|
+
|
|
562
|
+
time.sleep(10e-3)
|
|
563
|
+
else:
|
|
564
|
+
raise TimeoutError(f"[{self}] unable to retrieve calc-conz-info from PTR server");
|
|
565
|
+
except KeyError as exc:
|
|
566
|
+
raise KeyError(str(exc) + f", possible values: {list(CalcConzInfo.tables.keys())}")
|
|
567
|
+
|
|
568
|
+
def set(self, parID, new_value, unit='-'):
|
|
569
|
+
'''Set a 'new_value' to 'parID' in the DataCollection.'''
|
|
570
|
+
if not self.is_connected:
|
|
571
|
+
raise Exception(f"[{self}] no connection to instrument");
|
|
572
|
+
|
|
573
|
+
raise NotImplementedError("DataCollection/Set, did you mean .write(parID)?")
|
|
574
|
+
|
|
575
|
+
topic, qos, retain = "DataCollection/Set/" + str(parID), 1, True
|
|
576
|
+
log.info(f"setting '{parID}' ~> [{new_value}]")
|
|
577
|
+
payload = {
|
|
578
|
+
"Header": _build_header(),
|
|
579
|
+
"DataElement": _build_data_element(new_value, unit),
|
|
580
|
+
}
|
|
581
|
+
return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
|
|
582
|
+
|
|
583
|
+
def filter_schedule(self, parID):
|
|
584
|
+
'''Returns a list with the upcoming write commands for 'parID' in ascending order.'''
|
|
585
|
+
return (cmd for cmd in self.current_schedule if cmd["ParaID"] == str(parID))
|
|
586
|
+
|
|
587
|
+
def write(self, parID, new_value):
|
|
588
|
+
'''Write a 'new_value' to 'parID' directly.'''
|
|
589
|
+
if not self.is_connected:
|
|
590
|
+
raise Exception(f"[{self}] no connection to instrument");
|
|
591
|
+
|
|
592
|
+
if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
|
|
593
|
+
raise ValueError(f"'{parID}' is read-only")
|
|
594
|
+
|
|
595
|
+
if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
|
|
596
|
+
raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
|
|
597
|
+
|
|
598
|
+
topic, qos, retain = "IC_Command/Write/Direct", 1, False
|
|
599
|
+
log.info(f"writing '{parID}' ~> [{new_value}]")
|
|
600
|
+
cmd = _build_write_command(parID, new_value)
|
|
601
|
+
payload = {
|
|
602
|
+
"Header": _build_header(),
|
|
603
|
+
"CMDs": [ cmd, ]
|
|
604
|
+
}
|
|
605
|
+
return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
|
|
606
|
+
|
|
607
|
+
def schedule(self, parID, new_value, future_cycle):
|
|
608
|
+
'''Schedule a 'new_value' to 'parID' for the given 'future_cycle'.
|
|
609
|
+
|
|
610
|
+
If 'future_cycle' is in fact in the past, the behaviour is defined by IoniTOF
|
|
611
|
+
(most likely the command is ignored). To be sure, the '.current_cycle' should
|
|
612
|
+
be checked before and after running the '.schedule' command programmatically!
|
|
613
|
+
'''
|
|
614
|
+
if not self.is_connected:
|
|
615
|
+
raise Exception(f"[{self}] no connection to instrument");
|
|
616
|
+
|
|
617
|
+
if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
|
|
618
|
+
raise ValueError(f"'{parID}' is read-only")
|
|
619
|
+
|
|
620
|
+
if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
|
|
621
|
+
raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
|
|
622
|
+
|
|
623
|
+
if (future_cycle == 0 and not self.is_running):
|
|
624
|
+
# Note: ioniTOF40 doesn't handle scheduling for the 0th cycle!
|
|
625
|
+
if parID == "AME_ActionNumber":
|
|
626
|
+
# a) the action-number will trigger a script for the 0th cycle, so
|
|
627
|
+
# we *must* be scheduling it!
|
|
628
|
+
self.write("AME_ActionNumber", new_value)
|
|
629
|
+
elif parID.startswith("AME_"):
|
|
630
|
+
# b) the AME-numbers cannot (currently) be set (i.e. written), but since
|
|
631
|
+
# they are inserted just *before* the cycle, this will work just fine:
|
|
632
|
+
future_cycle = 1
|
|
633
|
+
else:
|
|
634
|
+
# c) in all other cases, let's assume the measurement will start soon
|
|
635
|
+
# and dare to write immediately, skipping the schedule altogether:
|
|
636
|
+
log.debug(f"immediately writing {parID = } @ cycle '0' (measurement stopped)")
|
|
637
|
+
return self.write(parID, new_value)
|
|
638
|
+
|
|
639
|
+
if not future_cycle > self.current_cycle:
|
|
640
|
+
log.warn(f"attempting to schedule past cycle, hope you know what you're doing");
|
|
641
|
+
pass # and at least let's debug it in MQTT browser (see also doc-string above)!
|
|
642
|
+
|
|
643
|
+
topic, qos, retain = "IC_Command/Write/Scheduled", 1, False
|
|
644
|
+
log.info(f"scheduling '{parID}' ~> [{new_value}] for cycle ({future_cycle})")
|
|
645
|
+
cmd = _build_write_command(parID, new_value, future_cycle)
|
|
646
|
+
payload = {
|
|
647
|
+
"Header": _build_header(),
|
|
648
|
+
"CMDs": [ cmd, ]
|
|
649
|
+
}
|
|
650
|
+
return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
|
|
651
|
+
|
|
652
|
+
def schedule_filename(self, path, future_cycle):
|
|
653
|
+
'''Start writing to a new .h5 file with the beginning of 'future_cycle'.'''
|
|
654
|
+
assert str(path), "filename cannot be empty!"
|
|
655
|
+
# try to make sure that IoniTOF accepts the path:
|
|
656
|
+
if self.host == '127.0.0.1':
|
|
657
|
+
os.makedirs(os.path.dirname(path), exist_ok=True)
|
|
658
|
+
try:
|
|
659
|
+
with open(path, 'x'):
|
|
660
|
+
log.info("touched new file:", path)
|
|
661
|
+
except FileExistsError as exc:
|
|
662
|
+
log.error(f"new filename '{path}' already exists and will not be scheduled!")
|
|
663
|
+
return
|
|
664
|
+
|
|
665
|
+
return self.schedule('ACQ_SRV_SetFullStorageFile', path.replace('/', '\\'), future_cycle)
|
|
666
|
+
|
|
667
|
+
def start_measurement(self, path=None):
|
|
668
|
+
'''Start a new measurement and block until the change is confirmed.
|
|
669
|
+
|
|
670
|
+
If 'path' is not None, write to the given .h5 file.
|
|
671
|
+
'''
|
|
672
|
+
if not path:
|
|
673
|
+
self.write('ACQ_SRV_Start_Meas_Quick', True)
|
|
674
|
+
else:
|
|
675
|
+
self.write('ACQ_SRV_Start_Meas_Record', path.replace('/', '\\'))
|
|
676
|
+
timeout_s = 30
|
|
677
|
+
started_at = time.monotonic()
|
|
678
|
+
while time.monotonic() < started_at + timeout_s:
|
|
679
|
+
if self.is_running:
|
|
680
|
+
break
|
|
681
|
+
|
|
682
|
+
time.sleep(10e-3)
|
|
683
|
+
else:
|
|
684
|
+
self.disconnect()
|
|
685
|
+
raise TimeoutError(f"[{self}] error starting measurement");
|
|
686
|
+
|
|
687
|
+
def stop_measurement(self, future_cycle=None):
|
|
688
|
+
'''Stop the current measurement and block until the change is confirmed.
|
|
689
|
+
|
|
690
|
+
If 'future_cycle' is not None and in the future, schedule the stop command.'''
|
|
691
|
+
if future_cycle is None or not future_cycle > self._overallcycle[0]:
|
|
692
|
+
self.write('ACQ_SRV_Stop_Meas', True)
|
|
693
|
+
else:
|
|
694
|
+
self.schedule('ACQ_SRV_Stop_Meas', True, future_cycle)
|
|
695
|
+
# may need to wait until the scheduled event..
|
|
696
|
+
if future_cycle is not None:
|
|
697
|
+
self.block_until(future_cycle)
|
|
698
|
+
# ..for this timeout to be applicable:
|
|
699
|
+
timeout_s = 30
|
|
700
|
+
started_at = time.monotonic()
|
|
701
|
+
while time.monotonic() < started_at + timeout_s:
|
|
702
|
+
# confirm change of state:
|
|
703
|
+
if not self.is_running:
|
|
704
|
+
break
|
|
705
|
+
|
|
706
|
+
time.sleep(10e-3)
|
|
707
|
+
else:
|
|
708
|
+
self.disconnect()
|
|
709
|
+
raise TimeoutError(f"[{self}] error stopping measurement");
|
|
710
|
+
|
|
711
|
+
def block_until(self, cycle):
|
|
712
|
+
'''Blocks the current thread until at least 'cycle' has passed or acquisition stopped.
|
|
713
|
+
|
|
714
|
+
Returns the actual current cycle.
|
|
715
|
+
'''
|
|
716
|
+
while self.is_running:
|
|
717
|
+
if self._overallcycle[0] >= int(cycle):
|
|
718
|
+
break
|
|
719
|
+
time.sleep(10e-3)
|
|
720
|
+
else:
|
|
721
|
+
return 0
|
|
722
|
+
|
|
723
|
+
return self._overallcycle[0]
|
|
724
|
+
|
|
725
|
+
def iter_specdata(self, timeout_s=None, buffer_size=300):
|
|
726
|
+
'''Returns an iterator over the fullcycle-data as long as it is available.
|
|
727
|
+
|
|
728
|
+
* This will wait up to `timeout_s` (or indefinitely if `None`) for a
|
|
729
|
+
measurement to start or raise a TimeoutError (default: None).
|
|
730
|
+
* Elements will be buffered up to a maximum of `buffer_size` cycles (default: 300).
|
|
731
|
+
* Cycles recorded prior to calling `next()` on the iterator may be missed,
|
|
732
|
+
so ideally this should be set up before any measurement is running.
|
|
733
|
+
* [Important]: When the buffer runs full, a `queue.Full` exception will be raised!
|
|
734
|
+
Therefore, the caller should consume the iterator as soon as possible while the
|
|
735
|
+
measurement is running.
|
|
736
|
+
'''
|
|
737
|
+
q = queue.Queue(buffer_size)
|
|
738
|
+
topic = "DataCollection/Act/ACQ_SRV_FullCycleData"
|
|
739
|
+
qos = 2
|
|
740
|
+
|
|
741
|
+
def callback(client, self, msg):
|
|
742
|
+
try:
|
|
743
|
+
q.put_nowait(_parse_fullcycle(msg.payload, need_add_data=True))
|
|
744
|
+
log.debug(f"received fullcycle, buffer at ({q.qsize()}/{q.maxsize})")
|
|
745
|
+
except queue.Full:
|
|
746
|
+
# DO NOT FAIL INSIDE THE CALLBACK!
|
|
747
|
+
log.error(f"iter_specdata({q.maxsize}): fullcycle buffer overrun!")
|
|
748
|
+
client.unsubscribe(topic)
|
|
749
|
+
except Exception as ex:
|
|
750
|
+
# DO NOT FAIL INSIDE THE CALLBACK!
|
|
751
|
+
log.warning(f"got {ex!r} while parsing {len(msg.payload) = }")
|
|
752
|
+
|
|
753
|
+
if not self.is_connected:
|
|
754
|
+
raise Exception("no connection to MQTT broker")
|
|
755
|
+
|
|
756
|
+
# Note: when using a simple generator function like this, the following lines
|
|
757
|
+
# will not be excecuted until the first call to `next` on the iterator!
|
|
758
|
+
# this means, the callback will not yet be executed, the queue not filled
|
|
759
|
+
# and we might miss the first cycles...
|
|
760
|
+
self.client.message_callback_add(topic, callback)
|
|
761
|
+
self.client.subscribe(topic, qos)
|
|
762
|
+
try:
|
|
763
|
+
# Note: Prior to 3.0 on POSIX systems, and for *all versions on Windows*,
|
|
764
|
+
# if block is true and timeout is None, [the q.get()] operation goes into an
|
|
765
|
+
# uninterruptible wait on an underlying lock. This means that no exceptions
|
|
766
|
+
# can occur, and in particular a SIGINT will not trigger a KeyboardInterrupt!
|
|
767
|
+
yield q.get(block=True, timeout=timeout_s) # waiting for measurement to run...
|
|
768
|
+
|
|
769
|
+
while self.is_running or not q.empty():
|
|
770
|
+
if q.full():
|
|
771
|
+
# re-raise what we swallowed in the callback..
|
|
772
|
+
raise queue.Full
|
|
773
|
+
|
|
774
|
+
if not self.is_connected:
|
|
775
|
+
# no more data will come, so better prevent a deadlock:
|
|
776
|
+
break
|
|
777
|
+
|
|
778
|
+
try:
|
|
779
|
+
yield q.get(block=True, timeout=1.0) # seconds
|
|
780
|
+
except queue.Empty:
|
|
781
|
+
continue
|
|
782
|
+
|
|
783
|
+
except queue.Empty:
|
|
784
|
+
assert timeout_s is not None, "this should never happen"
|
|
785
|
+
raise TimeoutError("no measurement running after {timeout_s} seconds")
|
|
786
|
+
|
|
787
|
+
finally:
|
|
788
|
+
# ...also, when using more than one iterator, the first to finish will
|
|
789
|
+
# unsubscribe and cause all others to stop maybe before the time!
|
|
790
|
+
# all of this might not actually be an issue right now, but
|
|
791
|
+
# TODO :: fix this weird behaviour (can only be done by implementing the
|
|
792
|
+
# iterator-protocol properly using a helper class)!
|
|
793
|
+
self.client.unsubscribe(topic)
|
|
794
|
+
self.client.message_callback_remove(topic)
|
|
795
|
+
|
|
796
|
+
iter_specdata.__doc__ += _parse_fullcycle.__doc__
|
|
797
|
+
|
|
798
|
+
def __repr__(self):
|
|
799
|
+
return f"<{self.__class__.__name__}[{self.host}]>"
|
|
800
|
+
|