pytrms 0.9.2__py3-none-any.whl → 0.9.5__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pytrms/clients/mqtt.py CHANGED
@@ -1,797 +1,820 @@
1
- import os
2
- import time
3
- import json
4
- import queue
5
- from collections import deque, namedtuple
6
- from datetime import datetime
7
- from functools import wraps
8
- from itertools import cycle, chain, zip_longest
9
- from threading import Condition, RLock
10
-
11
- from . import _logging
12
- from . import _par_id_file
13
- from .._base import itype, MqttClientBase
14
-
15
-
16
- log = _logging.getLogger(__name__)
17
-
18
- __all__ = ['MqttClient', 'MqttClientBase']
19
-
20
-
21
- with open(_par_id_file) as f:
22
- from pandas import read_csv, isna
23
-
24
- _par_id_info = read_csv(f, sep='\t').drop(0).set_index('Name')
25
- if isna(_par_id_info.at['MPV_1', 'Access']):
26
- log.warning(f'filling in read-properties still missing in {os.path.basename(_par_id_file)}')
27
- _par_id_info.at['MPV_1', 'Access'] = 'RW'
28
- _par_id_info.at['MPV_2', 'Access'] = 'RW'
29
- _par_id_info.at['MPV_3', 'Access'] = 'RW'
30
-
31
-
32
-
33
- ## >>>>>>>> adaptor functions <<<<<<<< ##
34
-
35
- def _build_header():
36
- ts = datetime.now()
37
- header = {
38
- "TimeStamp": {
39
- "Str": ts.isoformat(),
40
- "sec": ts.timestamp() + 2082844800, # convert to LabVIEW time
41
- },
42
- }
43
- return header
44
-
45
- def _build_data_element(value, unit="-"):
46
- elm = {
47
- "Datatype": "",
48
- "Index": -1,
49
- "Value": str(value),
50
- "Unit": str(unit),
51
- }
52
- if isinstance(value, bool):
53
- # Note: True is also instance of int! Therefore, we must check it first:
54
- elm.update({"Datatype": "BOOL", "Value": str(value).lower()})
55
- elif isinstance(value, str):
56
- elm.update({"Datatype": "STRING"})
57
- elif isinstance(value, int):
58
- elm.update({"Datatype": "I32"})
59
- elif isinstance(value, float):
60
- elm.update({"Datatype": "DBL"})
61
- else:
62
- raise NotImplemented("unknown datatype")
63
-
64
- return elm
65
-
66
- def _build_write_command(parID, value, future_cycle=None):
67
- cmd = {
68
- "ParaID": str(parID),
69
- "Value": str(value),
70
- "Datatype": "",
71
- "CMDMode": "Set",
72
- "Index": -1,
73
- }
74
- if future_cycle is not None:
75
- cmd.update({
76
- "SchedMode": "OverallCycle",
77
- "Schedule": str(future_cycle),
78
- })
79
- if isinstance(value, bool):
80
- # Note: True is also instance of int!
81
- cmd.update({"Datatype": "BOOL", "Value": str(value).lower()})
82
- elif isinstance(value, str):
83
- cmd.update({"Datatype": "STRING"})
84
- elif isinstance(value, int):
85
- cmd.update({"Datatype": "I32"})
86
- elif isinstance(value, float):
87
- cmd.update({"Datatype": "DBL"})
88
- else:
89
- raise NotImplemented("unknown datatype")
90
-
91
- return cmd
92
-
93
-
94
- ## >>>>>>>> parsing functions <<<<<<<< ##
95
-
96
- class ParsingError(Exception):
97
- pass
98
-
99
-
100
- def _parse_data_element(elm):
101
- '''
102
- raises: ParsingError, KeyError
103
- '''
104
- # make a Python object of a DataElement
105
- if elm["Datatype"] == "BOOL":
106
- return bool(elm["Value"])
107
- elif elm["Datatype"] == "DBL":
108
- return float(elm["Value"])
109
- elif elm["Datatype"] == "SGL":
110
- return float(elm["Value"])
111
- elif elm["Datatype"] == "I32":
112
- return int(elm["Value"])
113
- elif elm["Datatype"] == "I16":
114
- return int(elm["Value"])
115
- elif elm["Datatype"] == "STRING":
116
- return str(elm["Value"])
117
- raise ParsingError("unknown datatype: " + str(elm["Datatype"]))
118
-
119
- def _parse_fullcycle(byte_string, need_add_data=False):
120
- '''Parses 'timecycle', 'intensity', 'mass_cal' and 'add_data' from bytes.
121
-
122
- Important: the byteorder of the parsed arrays will be big-endian! This
123
- may be aligned if needed with the `.byteswap()`-method on the array,
124
- but is not automatically performed to avoid any extra copy.
125
-
126
- @params
127
- - need_add_data if `False`, the 'mass_cal' and 'add_data' returned will be None
128
-
129
- Parsing the AddData-cluster is much slower than parsing the intensity-array!
130
- This may be skipped to improve performance, but is necessary for loading
131
- the 'mass_cal' anyway. For orientation:
132
-
133
- performance (on a Intel Core i5, 8th Gen Ubuntu Linux):
134
- < 2 ms when `need_add_data=False` (default)
135
- 6-7 ms when needing to parse the AddData-cluster (else)
136
-
137
- @returns a namedtuple ('timecycle', 'intensity', 'mass_cal', 'add_data')
138
- '''
139
- import numpy as np
140
-
141
- _f32 = np.dtype(np.float32).newbyteorder('>')
142
- _f64 = np.dtype(np.float64).newbyteorder('>')
143
- _i16 = np.dtype(np.int16).newbyteorder('>')
144
- _i32 = np.dtype(np.int32).newbyteorder('>')
145
- _i64 = np.dtype(np.int64).newbyteorder('>')
146
- _chr = np.dtype(np.int8).newbyteorder('>')
147
-
148
- offset = 0
149
-
150
- def rd_single(dtype=_i32):
151
- nonlocal offset
152
- _arr = np.frombuffer(byte_string, dtype=dtype, count=1, offset=offset)
153
- offset += _arr.nbytes
154
- return _arr[0]
155
-
156
- def rd_arr1d(dtype=_f32, count=None):
157
- nonlocal offset
158
- if count is None:
159
- count = rd_single()
160
- arr = np.frombuffer(byte_string, dtype=dtype, count=count, offset=offset)
161
- offset += arr.nbytes
162
- return arr
163
-
164
- def rd_arr2d(dtype=_f32):
165
- nonlocal offset
166
- n = rd_single()
167
- m = rd_single()
168
- arr = np.frombuffer(byte_string, dtype=dtype, count=n*m, offset=offset)
169
- offset += arr.nbytes
170
- return arr.reshape((n, m))
171
-
172
- def rd_string():
173
- nonlocal offset
174
- return rd_arr1d(dtype=_chr).tobytes().decode('latin-1').lstrip('\x00')
175
-
176
- tc_cluster = rd_arr1d(dtype=_f64, count=4)
177
- run__, cpx__ = rd_arr1d(dtype=_f64, count=2) # (discarded)
178
- # SpecData #
179
- intensity = rd_arr1d(dtype=_f32)
180
- sum_inty = rd_arr1d(dtype=_f32) # (discarded)
181
- mon_peaks = rd_arr2d(dtype=_f32) # (discarded)
182
-
183
- if not need_add_data:
184
- # skip costly parsing of Trace- and Add-Data cluster:
185
- return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, None, None)
186
-
187
- # TraceData # (as yet discarded)
188
- tc_cluster2 = rd_arr1d(dtype=_f64, count=6)
189
- twoD_raw = rd_arr2d(dtype=_f32)
190
- sum_raw = rd_arr1d(dtype=_f32)
191
- sum_corr = rd_arr1d(dtype=_f32)
192
- sum_conz = rd_arr1d(dtype=_f32)
193
- calc_traces = rd_arr1d(dtype=_f32)
194
- n_calc_trcs = rd_single()
195
- for i in range(n_calc_trcs):
196
- calc_names = rd_arr1d(dtype=_chr)
197
- peak_centrs = rd_arr1d(dtype=_f32)
198
- # AddData #
199
- add_data = dict()
200
- n_add_data = rd_single()
201
- for i in range(n_add_data):
202
- grp_name = rd_string()
203
- descr = []
204
- for i in range(rd_single()):
205
- descr.append(rd_string())
206
- units = []
207
- for i in range(rd_single()):
208
- units.append(rd_string())
209
- data = rd_arr1d(dtype=_f32)
210
- view = rd_arr1d(dtype=_chr)
211
- n_lv_times = rd_single()
212
- offset += 16 * n_lv_times # skipping LabVIEW timestamp
213
- add_data[grp_name] = [itype.add_data_item_t(*tup) for tup in zip_longest(data, descr, units, view)]
214
-
215
- # MassCal #
216
- mc_masses = rd_arr1d(dtype=_f64)
217
- mc_tbins = rd_arr1d(dtype=_f64)
218
- cal_paras = rd_arr1d(dtype=_f64)
219
- segmnt_cal_pars = rd_arr2d(dtype=_f64)
220
- mcal_mode = rd_single(dtype=_i16)
221
- mass_cal = itype.masscal_t(mcal_mode, mc_masses, mc_tbins, cal_paras, segmnt_cal_pars)
222
-
223
- return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, mass_cal, add_data)
224
-
225
-
226
- class CalcConzInfo:
227
-
228
- def __init__(self):
229
- self.tables = {
230
- "primary_ions": list(),
231
- "transmission": list(),
232
- }
233
-
234
- @staticmethod
235
- def load_json(json_string):
236
- cc = CalcConzInfo()
237
- j = json.loads(json_string)
238
- delm = j["DataElement"]
239
- for li in delm["Value"]["PISets"]["PiSets"]:
240
- if not li["PriIonSetName"]:
241
- log.info(f'loaded ({len(cc.tables["primary_ions"])}) primary-ion settings')
242
- break
243
-
244
- masses = map(float, filter(lambda x: x > 0, li["PriIonSetMasses"]))
245
- values = map(float, li["PriIonSetMultiplier"])
246
- cc.tables["primary_ions"].append(itype.table_setting_t(str(li["PriIonSetName"]), list(zip(masses, values))))
247
-
248
- for li in j["DataElement"]["Value"]["TransSets"]["Transsets"]:
249
- if not li["Name"]:
250
- log.info(f'loaded ({len(cc.tables["transmission"])}) transmission settings')
251
- break
252
-
253
- masses = map(float, filter(lambda x: x > 0, li["Mass"]))
254
- values = map(float, li["Value"])
255
- # float(li["Voltage"]) # (not used)
256
- cc.tables["transmission"].append(itype.table_setting_t(str(li["Name"]), list(zip(masses, values))))
257
-
258
- return cc
259
-
260
-
261
- ## >>>>>>>> callback functions <<<<<<<< ##
262
-
263
- def follow_calc_conz_info(client, self, msg):
264
- if not msg.payload:
265
- # empty payload will clear a retained topic
266
- self._calcconzinfo = MqttClient._calcconzinfo
267
- return
268
-
269
- if not self._calcconzinfo[0] is _NOT_INIT:
270
- # nothing to do..
271
- return
272
-
273
- log.debug(f"updating tm-/pi-table from {msg.topic}...")
274
- self._calcconzinfo.append(CalcConzInfo.load_json(msg.payload.decode('latin-1')))
275
-
276
- follow_calc_conz_info.topics = ["PTR/Act/PTR_CalcConzInfo"]
277
-
278
- def follow_schedule(client, self, msg):
279
- with follow_schedule._lock:
280
- if msg.topic.endswith("SRV_ScheduleClear"):
281
- self._sched_cmds.clear()
282
- return
283
-
284
- if msg.topic.endswith("SRV_Schedule"):
285
- if not msg.payload:
286
- log.warn("empty ACQ_SRV_Schedule payload has cleared retained topic")
287
- self._sched_cmds.clear()
288
- return
289
-
290
- if msg.retain:
291
- # Note: we either have received a message that has been
292
- # retained because of a new connection..
293
- payload = json.loads(msg.payload.decode())
294
- self._sched_cmds.clear()
295
- self._sched_cmds.extend(payload["CMDs"])
296
- else:
297
- # ..or the schedule as maintained by IoniTOF has changed,
298
- # which we handle ourselves below:
299
- pass
300
-
301
- if msg.topic.startswith("IC_Command"):
302
- if not msg.payload:
303
- log.error("empty IC_Command! has topic been cleared?")
304
- return
305
-
306
- # these are the freshly added scheduling requests:
307
- payload = json.loads(msg.payload.decode())
308
- self._sched_cmds.extend(payload["CMDs"])
309
-
310
- follow_schedule.topics = [
311
- "DataCollection/Act/ACQ_SRV_Schedule",
312
- "DataCollection/Set/ACQ_SRV_ScheduleClear",
313
- "IC_Command/Write/Scheduled"
314
- ]
315
- follow_schedule._lock = RLock()
316
-
317
- def follow_state(client, self, msg):
318
- if not msg.payload:
319
- # empty payload will clear a retained topic
320
- self._server_state = MqttClient._server_state
321
- return
322
-
323
- payload = json.loads(msg.payload.decode())
324
- state = payload["DataElement"]["Value"]
325
- log.debug(f"[{self}] new server-state: " + str(state))
326
- # replace the current state with the new element:
327
- self._server_state.append(state)
328
- meas_running = (state == "ACQ_Aquire") # yes, there's a typo, plz keep it :)
329
- just_started = (meas_running and not msg.retain)
330
- if meas_running:
331
- # signal the relevant thread(s) that we need an update:
332
- self._calcconzinfo.append(_NOT_INIT)
333
- if just_started:
334
- # invalidate the source-file until we get a new one:
335
- self._sf_filename.append(_NOT_INIT)
336
-
337
- follow_state.topics = ["DataCollection/Act/ACQ_SRV_CurrentState"]
338
-
339
- def follow_sourcefile(client, self, msg):
340
- if not msg.payload:
341
- # empty payload will clear a retained topic
342
- self._sf_filename = MqttClient._sf_filename
343
- return
344
-
345
- payload = json.loads(msg.payload.decode())
346
- path = payload["DataElement"]["Value"]
347
- log.debug(f"[{self}] new source-file: " + str(path))
348
- # replace the current path with the new element:
349
- self._sf_filename.append(path)
350
-
351
- follow_sourcefile.topics = ["DataCollection/Act/ACQ_SRV_SetFullStorageFile"]
352
-
353
- def follow_act_set_values(client, self, msg):
354
- if not msg.payload:
355
- # empty payload will clear a retained topic
356
- return
357
-
358
- try:
359
- server, kind, parID = msg.topic.split('/')
360
- if server == "DataCollection":
361
- # Note: this topic doesn't strictly follow the convention and is handled separately
362
- return
363
-
364
- if server == "Sequencer":
365
- # Note: this is a separate program and will be ignored (has its own AUTO_-numbers et.c.)
366
- return
367
-
368
- if parID == "PTR_CalcConzInfo":
369
- # another "special" topic handled in 'follow_calc_conz_info' ...
370
- return
371
-
372
- if parID not in _par_id_info.index:
373
- log.warning(f"unknown par-ID in [{msg.topic}]")
374
- return
375
-
376
- payload = json.loads(msg.payload.decode())
377
- if kind == "Act":
378
- self.act_values[parID] = _parse_data_element(payload["DataElement"])
379
- if kind == "Set":
380
- self.set_values[parID] = _parse_data_element(payload["DataElement"])
381
- except json.decoder.JSONDecodeError as exc:
382
- log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
383
- raise
384
- except KeyError as exc:
385
- log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
386
- pass
387
- except ParsingError as exc:
388
- log.error(f"while parsing [{parID}] :: {str(exc)}")
389
- pass
390
-
391
- follow_act_set_values.topics = ["+/Act/+", "+/Set/+"]
392
-
393
- def follow_cycle(client, self, msg):
394
- if not msg.payload:
395
- # empty payload will clear a retained topic
396
- return
397
-
398
- payload = json.loads(msg.payload.decode())
399
- current = int(payload["DataElement"]["Value"])
400
- # replace the current timecycle with the new element:
401
- self._overallcycle.append(current)
402
-
403
- follow_cycle.topics = ["DataCollection/Act/ACQ_SRV_OverallCycle"]
404
-
405
- # collect all follow-functions together:
406
- _subscriber_functions = [fun for name, fun in list(vars().items())
407
- if callable(fun) and name.startswith('follow_')]
408
-
409
-
410
- _NOT_INIT = object()
411
-
412
-
413
- class MqttClient(MqttClientBase):
414
- """a simplified client for the Ionicon MQTT API.
415
-
416
- > mq = MqttClient()
417
- > mq.write('TCP_MCP_B', 3400)
418
- ValueError()
419
-
420
- """
421
-
422
- _sched_cmds = deque([_NOT_INIT], maxlen=None)
423
- _server_state = deque([_NOT_INIT], maxlen=1)
424
- _calcconzinfo = deque([_NOT_INIT], maxlen=1)
425
- _sf_filename = deque([""], maxlen=1)
426
- _overallcycle = deque([0], maxlen=1)
427
- act_values = dict()
428
- set_values = dict()
429
-
430
- set_value_limit = {
431
- "TCP_MCP_B": 3200.0,
432
- }
433
-
434
- @property
435
- def is_connected(self):
436
- '''Returns `True` if connection to IoniTOF could be established.'''
437
- return (super().is_connected
438
- and self._server_state[0] is not _NOT_INIT
439
- and (len(self._sched_cmds) == 0 or self._sched_cmds[0] is not _NOT_INIT))
440
-
441
- @property
442
- def is_running(self):
443
- '''Returns `True` if IoniTOF is currently acquiring data.'''
444
- return self.current_server_state == 'ACQ_Aquire' # yes, there's a typo, plz keep it :)
445
-
446
- @property
447
- def current_schedule(self):
448
- '''Returns a list with the upcoming write commands in ascending order.'''
449
- if not self.is_connected:
450
- return []
451
-
452
- current_cycle = self._overallcycle[0]
453
- filter_fun = lambda cmd: float(cmd["Schedule"]) > current_cycle
454
- sorted_fun = lambda cmd: float(cmd["Schedule"])
455
-
456
- return sorted(filter(filter_fun, self._sched_cmds), key=sorted_fun)
457
-
458
- @property
459
- def current_server_state(self):
460
- '''Returns the state of the acquisition-server. One of:
461
-
462
- - "ACQ_Idle"
463
- - "ACQ_JustStarted"
464
- - "ACQ_Aquire"
465
- - "ACQ_Stopping"
466
-
467
- or "<unknown>" if there's no connection to IoniTOF.
468
- '''
469
- if self.is_connected:
470
- return self._server_state[0]
471
- return "<unknown>"
472
-
473
- @property
474
- def current_sourcefile(self):
475
- '''Returns the path to the hdf5-file that is currently being written.
476
-
477
- Returns an empty string if no measurement is running.
478
- '''
479
- if not self.is_running:
480
- return ""
481
-
482
- if self._sf_filename[0] is not _NOT_INIT:
483
- return self._sf_filename[0]
484
-
485
- # Note: '_NOT_INIT' is set by us on start of acquisition, so we'd expect
486
- # to receive the source-file-topic after a (generous) timeout:
487
- timeout_s = 15
488
- started_at = time.monotonic()
489
- while time.monotonic() < started_at + timeout_s:
490
- if self._sf_filename[0] is not _NOT_INIT:
491
- return self._sf_filename[0]
492
-
493
- time.sleep(10e-3)
494
- else:
495
- raise TimeoutError(f"[{self}] unable to retrieve source-file after ({timeout_s = })");
496
-
497
- @property
498
- def current_cycle(self):
499
- '''Returns the current 'AbsCycle' (/'OverallCycle').'''
500
- if self.is_running:
501
- return self._overallcycle[0]
502
- return 0
503
-
504
- def __init__(self, host='127.0.0.1', port=1883):
505
- # this sets up the mqtt connection with default callbacks:
506
- super().__init__(host, port, _subscriber_functions, None, None, None)
507
- log.debug(f"connection check ({self.is_connected}) :: {self._server_state = } / {self._sched_cmds = }");
508
-
509
- def disconnect(self):
510
- super().disconnect()
511
- log.debug(f"[{self}] has disconnected")
512
- # reset internal queues to their defaults:
513
- self._sched_cmds = MqttClient._sched_cmds
514
- self._server_state = MqttClient._server_state
515
- self._calcconzinfo = MqttClient._calcconzinfo
516
- self._sf_filename = MqttClient._sf_filename
517
- self._overallcycle = MqttClient._overallcycle
518
- self.act_values = MqttClient.act_values
519
- self.set_values = MqttClient.set_values
520
-
521
- def get(self, parID, kind="set"):
522
- '''Return the last known value for the given `parID`.
523
-
524
- - kind: one of 'set'/'act' (default: 'set')
525
-
526
- A `KeyError` will be raised if the given `parID` is unknown!
527
- '''
528
- if not self.is_connected:
529
- raise Exception(f"[{self}] no connection to instrument");
530
-
531
- _lut = self.act_values if kind.lower() == "act" else self.set_values
532
- is_read_only = ('W' not in _par_id_info.loc[parID].Access) # may raise KeyError!
533
- if _lut is self.set_values and is_read_only:
534
- raise ValueError(f"'{parID}' is read-only, did you mean `kind='act'`?")
535
-
536
- if not parID in _lut:
537
- # Note: The values should need NO! time to be populated from the MQTT topics,
538
- # because all topics are published as *retained* by the PTR-server.
539
- # However, a short timeout is respected before raising a `KeyError`:
540
- time.sleep(200e-3)
541
- rv = _lut.get(parID)
542
- if rv is not None:
543
- return rv
544
-
545
- # still not found? give some useful hints for the user not to go crazy:
546
- error_hint = (
547
- "act" if parID in self.act_values else
548
- "set" if parID in self.set_values else
549
- "")
550
- raise KeyError(str(parID) + (' (did you mean `kind="%s"`?)' % error_hint) if error_hint else "")
551
- return _lut[parID]
552
-
553
- def get_table(self, table_name):
554
- timeout_s = 10
555
- started_at = time.monotonic()
556
- try:
557
- while time.monotonic() < started_at + timeout_s:
558
- # confirm change of state:
559
- if not self._calcconzinfo[0] is _NOT_INIT:
560
- return self._calcconzinfo[0].tables[table_name]
561
-
562
- time.sleep(10e-3)
563
- else:
564
- raise TimeoutError(f"[{self}] unable to retrieve calc-conz-info from PTR server");
565
- except KeyError as exc:
566
- raise KeyError(str(exc) + f", possible values: {list(CalcConzInfo.tables.keys())}")
567
-
568
- def set(self, parID, new_value, unit='-'):
569
- '''Set a 'new_value' to 'parID' in the DataCollection.'''
570
- if not self.is_connected:
571
- raise Exception(f"[{self}] no connection to instrument");
572
-
573
- raise NotImplementedError("DataCollection/Set, did you mean .write(parID)?")
574
-
575
- topic, qos, retain = "DataCollection/Set/" + str(parID), 1, True
576
- log.info(f"setting '{parID}' ~> [{new_value}]")
577
- payload = {
578
- "Header": _build_header(),
579
- "DataElement": _build_data_element(new_value, unit),
580
- }
581
- return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
582
-
583
- def filter_schedule(self, parID):
584
- '''Returns a list with the upcoming write commands for 'parID' in ascending order.'''
585
- return (cmd for cmd in self.current_schedule if cmd["ParaID"] == str(parID))
586
-
587
- def write(self, parID, new_value):
588
- '''Write a 'new_value' to 'parID' directly.'''
589
- if not self.is_connected:
590
- raise Exception(f"[{self}] no connection to instrument");
591
-
592
- if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
593
- raise ValueError(f"'{parID}' is read-only")
594
-
595
- if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
596
- raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
597
-
598
- topic, qos, retain = "IC_Command/Write/Direct", 1, False
599
- log.info(f"writing '{parID}' ~> [{new_value}]")
600
- cmd = _build_write_command(parID, new_value)
601
- payload = {
602
- "Header": _build_header(),
603
- "CMDs": [ cmd, ]
604
- }
605
- return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
606
-
607
- def schedule(self, parID, new_value, future_cycle):
608
- '''Schedule a 'new_value' to 'parID' for the given 'future_cycle'.
609
-
610
- If 'future_cycle' is in fact in the past, the behaviour is defined by IoniTOF
611
- (most likely the command is ignored). To be sure, the '.current_cycle' should
612
- be checked before and after running the '.schedule' command programmatically!
613
- '''
614
- if not self.is_connected:
615
- raise Exception(f"[{self}] no connection to instrument");
616
-
617
- if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
618
- raise ValueError(f"'{parID}' is read-only")
619
-
620
- if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
621
- raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
622
-
623
- if (future_cycle == 0 and not self.is_running):
624
- # Note: ioniTOF40 doesn't handle scheduling for the 0th cycle!
625
- if parID == "AME_ActionNumber":
626
- # a) the action-number will trigger a script for the 0th cycle, so
627
- # we *must* be scheduling it!
628
- self.write("AME_ActionNumber", new_value)
629
- elif parID.startswith("AME_"):
630
- # b) the AME-numbers cannot (currently) be set (i.e. written), but since
631
- # they are inserted just *before* the cycle, this will work just fine:
632
- future_cycle = 1
633
- else:
634
- # c) in all other cases, let's assume the measurement will start soon
635
- # and dare to write immediately, skipping the schedule altogether:
636
- log.debug(f"immediately writing {parID = } @ cycle '0' (measurement stopped)")
637
- return self.write(parID, new_value)
638
-
639
- if not future_cycle > self.current_cycle:
640
- log.warn(f"attempting to schedule past cycle, hope you know what you're doing");
641
- pass # and at least let's debug it in MQTT browser (see also doc-string above)!
642
-
643
- topic, qos, retain = "IC_Command/Write/Scheduled", 1, False
644
- log.info(f"scheduling '{parID}' ~> [{new_value}] for cycle ({future_cycle})")
645
- cmd = _build_write_command(parID, new_value, future_cycle)
646
- payload = {
647
- "Header": _build_header(),
648
- "CMDs": [ cmd, ]
649
- }
650
- return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
651
-
652
- def schedule_filename(self, path, future_cycle):
653
- '''Start writing to a new .h5 file with the beginning of 'future_cycle'.'''
654
- assert str(path), "filename cannot be empty!"
655
- # try to make sure that IoniTOF accepts the path:
656
- if self.host == '127.0.0.1':
657
- os.makedirs(os.path.dirname(path), exist_ok=True)
658
- try:
659
- with open(path, 'x'):
660
- log.info("touched new file:", path)
661
- except FileExistsError as exc:
662
- log.error(f"new filename '{path}' already exists and will not be scheduled!")
663
- return
664
-
665
- return self.schedule('ACQ_SRV_SetFullStorageFile', path.replace('/', '\\'), future_cycle)
666
-
667
- def start_measurement(self, path=None):
668
- '''Start a new measurement and block until the change is confirmed.
669
-
670
- If 'path' is not None, write to the given .h5 file.
671
- '''
672
- if not path:
673
- self.write('ACQ_SRV_Start_Meas_Quick', True)
674
- else:
675
- self.write('ACQ_SRV_Start_Meas_Record', path.replace('/', '\\'))
676
- timeout_s = 30
677
- started_at = time.monotonic()
678
- while time.monotonic() < started_at + timeout_s:
679
- if self.is_running:
680
- break
681
-
682
- time.sleep(10e-3)
683
- else:
684
- self.disconnect()
685
- raise TimeoutError(f"[{self}] error starting measurement");
686
-
687
- def stop_measurement(self, future_cycle=None):
688
- '''Stop the current measurement and block until the change is confirmed.
689
-
690
- If 'future_cycle' is not None and in the future, schedule the stop command.'''
691
- if future_cycle is None or not future_cycle > self._overallcycle[0]:
692
- self.write('ACQ_SRV_Stop_Meas', True)
693
- else:
694
- self.schedule('ACQ_SRV_Stop_Meas', True, future_cycle)
695
- # may need to wait until the scheduled event..
696
- if future_cycle is not None:
697
- self.block_until(future_cycle)
698
- # ..for this timeout to be applicable:
699
- timeout_s = 30
700
- started_at = time.monotonic()
701
- while time.monotonic() < started_at + timeout_s:
702
- # confirm change of state:
703
- if not self.is_running:
704
- break
705
-
706
- time.sleep(10e-3)
707
- else:
708
- self.disconnect()
709
- raise TimeoutError(f"[{self}] error stopping measurement");
710
-
711
- def block_until(self, cycle):
712
- '''Blocks the current thread until at least 'cycle' has passed or acquisition stopped.
713
-
714
- Returns the actual current cycle.
715
- '''
716
- while self.is_running:
717
- if self._overallcycle[0] >= int(cycle):
718
- break
719
- time.sleep(10e-3)
720
- else:
721
- return 0
722
-
723
- return self._overallcycle[0]
724
-
725
- def iter_specdata(self, timeout_s=None, buffer_size=300):
726
- '''Returns an iterator over the fullcycle-data as long as it is available.
727
-
728
- * This will wait up to `timeout_s` (or indefinitely if `None`) for a
729
- measurement to start or raise a TimeoutError (default: None).
730
- * Elements will be buffered up to a maximum of `buffer_size` cycles (default: 300).
731
- * Cycles recorded prior to calling `next()` on the iterator may be missed,
732
- so ideally this should be set up before any measurement is running.
733
- * [Important]: When the buffer runs full, a `queue.Full` exception will be raised!
734
- Therefore, the caller should consume the iterator as soon as possible while the
735
- measurement is running.
736
- '''
737
- q = queue.Queue(buffer_size)
738
- topic = "DataCollection/Act/ACQ_SRV_FullCycleData"
739
- qos = 2
740
-
741
- def callback(client, self, msg):
742
- try:
743
- q.put_nowait(_parse_fullcycle(msg.payload, need_add_data=True))
744
- log.debug(f"received fullcycle, buffer at ({q.qsize()}/{q.maxsize})")
745
- except queue.Full:
746
- # DO NOT FAIL INSIDE THE CALLBACK!
747
- log.error(f"iter_specdata({q.maxsize}): fullcycle buffer overrun!")
748
- client.unsubscribe(topic)
749
-
750
- if not self.is_connected:
751
- raise Exception("no connection to MQTT broker")
752
-
753
- # Note: when using a simple generator function like this, the following lines
754
- # will not be excecuted until the first call to `next` on the iterator!
755
- # this means, the callback will not yet be executed, the queue not filled
756
- # and we might miss the first cycles...
757
- self.client.message_callback_add(topic, callback)
758
- self.client.subscribe(topic, qos)
759
- try:
760
- # Note: Prior to 3.0 on POSIX systems, and for *all versions on Windows*,
761
- # if block is true and timeout is None, [the q.get()] operation goes into an
762
- # uninterruptible wait on an underlying lock. This means that no exceptions
763
- # can occur, and in particular a SIGINT will not trigger a KeyboardInterrupt!
764
- yield q.get(block=True, timeout=timeout_s) # waiting for measurement to run...
765
-
766
- while self.is_running or not q.empty():
767
- if q.full():
768
- # re-raise what we swallowed in the callback..
769
- raise queue.Full
770
-
771
- if not self.is_connected:
772
- # no more data will come, so better prevent a deadlock:
773
- break
774
-
775
- try:
776
- yield q.get(block=True, timeout=1.0) # seconds
777
- except queue.Empty:
778
- continue
779
-
780
- except queue.Empty:
781
- assert timeout_s is not None, "this should never happen"
782
- raise TimeoutError("no measurement running after {timeout_s} seconds")
783
-
784
- finally:
785
- # ...also, when using more than one iterator, the first to finish will
786
- # unsubscribe and cause all others to stop maybe before the time!
787
- # all of this might not actually be an issue right now, but
788
- # TODO :: fix this weird behaviour (can only be done by implementing the
789
- # iterator-protocol properly using a helper class)!
790
- self.client.unsubscribe(topic)
791
- self.client.message_callback_remove(topic)
792
-
793
- iter_specdata.__doc__ += _parse_fullcycle.__doc__
794
-
795
- def __repr__(self):
796
- return f"<{self.__class__.__name__}[{self.host}]>"
797
-
1
+ import os
2
+ import time
3
+ import json
4
+ import queue
5
+ from collections import deque, namedtuple
6
+ from datetime import datetime
7
+ from functools import wraps
8
+ from itertools import cycle, chain, zip_longest
9
+ from threading import Condition, RLock
10
+
11
+ from . import _logging
12
+ from . import _par_id_file
13
+ from .._base import itype, MqttClientBase
14
+
15
+
16
+ log = _logging.getLogger(__name__)
17
+
18
+ __all__ = ['MqttClient', 'MqttClientBase']
19
+
20
+
21
+ with open(_par_id_file) as f:
22
+ from pandas import read_csv, isna
23
+
24
+ _par_id_info = read_csv(f, sep='\t').drop(0).set_index('Name')
25
+ if isna(_par_id_info.at['MPV_1', 'Access']):
26
+ log.warning(f'filling in read-properties still missing in {os.path.basename(_par_id_file)}')
27
+ _par_id_info.at['MPV_1', 'Access'] = 'RW'
28
+ _par_id_info.at['MPV_2', 'Access'] = 'RW'
29
+ _par_id_info.at['MPV_3', 'Access'] = 'RW'
30
+
31
+
32
+
33
+ ## >>>>>>>> adaptor functions <<<<<<<< ##
34
+
35
+ def _build_header():
36
+ ts = datetime.now()
37
+ header = {
38
+ "TimeStamp": {
39
+ "Str": ts.isoformat(),
40
+ "sec": ts.timestamp() + 2082844800, # convert to LabVIEW time
41
+ },
42
+ }
43
+ return header
44
+
45
+ def _build_data_element(value, unit="-"):
46
+ elm = {
47
+ "Datatype": "",
48
+ "Index": -1,
49
+ "Value": str(value),
50
+ "Unit": str(unit),
51
+ }
52
+ if isinstance(value, bool):
53
+ # Note: True is also instance of int! Therefore, we must check it first:
54
+ elm.update({"Datatype": "BOOL", "Value": str(value).lower()})
55
+ elif isinstance(value, str):
56
+ elm.update({"Datatype": "STRING"})
57
+ elif isinstance(value, int):
58
+ elm.update({"Datatype": "I32"})
59
+ elif isinstance(value, float):
60
+ elm.update({"Datatype": "DBL"})
61
+ else:
62
+ raise NotImplemented("unknown datatype")
63
+
64
+ return elm
65
+
66
+ def _build_write_command(parID, value, future_cycle=None):
67
+ cmd = {
68
+ "ParaID": str(parID),
69
+ "Value": str(value),
70
+ "Datatype": "",
71
+ "CMDMode": "Set",
72
+ "Index": -1,
73
+ }
74
+ if future_cycle is not None:
75
+ cmd.update({
76
+ "SchedMode": "OverallCycle",
77
+ "Schedule": str(future_cycle),
78
+ })
79
+ if isinstance(value, bool):
80
+ # Note: True is also instance of int!
81
+ cmd.update({"Datatype": "BOOL", "Value": str(value).lower()})
82
+ elif isinstance(value, str):
83
+ cmd.update({"Datatype": "STRING"})
84
+ elif isinstance(value, int):
85
+ cmd.update({"Datatype": "I32"})
86
+ elif isinstance(value, float):
87
+ cmd.update({"Datatype": "DBL"})
88
+ else:
89
+ raise NotImplemented("unknown datatype")
90
+
91
+ return cmd
92
+
93
+
94
+ ## >>>>>>>> parsing functions <<<<<<<< ##
95
+
96
+ class ParsingError(Exception):
97
+ pass
98
+
99
+
100
+ def _parse_data_element(elm):
101
+ '''
102
+ raises: ParsingError, KeyError
103
+ '''
104
+ # make a Python object of a DataElement
105
+ if elm["Datatype"] == "BOOL":
106
+ return bool(elm["Value"])
107
+ elif elm["Datatype"] == "DBL":
108
+ return float(elm["Value"])
109
+ elif elm["Datatype"] == "SGL":
110
+ return float(elm["Value"])
111
+ elif elm["Datatype"] == "I32":
112
+ return int(elm["Value"])
113
+ elif elm["Datatype"] == "I16":
114
+ return int(elm["Value"])
115
+ elif elm["Datatype"] == "STRING":
116
+ return str(elm["Value"])
117
+ raise ParsingError("unknown datatype: " + str(elm["Datatype"]))
118
+
119
+ def _parse_fullcycle(byte_string, need_add_data=False):
120
+ '''Parses 'timecycle', 'intensity', 'mass_cal' and 'add_data' from bytes.
121
+
122
+ Important: the byteorder of the parsed arrays will be big-endian! This
123
+ may be aligned if needed with the `.byteswap()`-method on the array,
124
+ but is not automatically performed to avoid any extra copy.
125
+
126
+ @params
127
+ - need_add_data if `False`, the 'mass_cal' and 'add_data' returned will be None
128
+
129
+ Parsing the AddData-cluster is much slower than parsing the intensity-array!
130
+ This may be skipped to improve performance, but is necessary for loading
131
+ the 'mass_cal' anyway. For orientation:
132
+
133
+ performance (on a Intel Core i5, 8th Gen Ubuntu Linux):
134
+ < 2 ms when `need_add_data=False` (default)
135
+ 6-7 ms when needing to parse the AddData-cluster (else)
136
+
137
+ @returns a namedtuple ('timecycle', 'intensity', 'mass_cal', 'add_data')
138
+ '''
139
+ import numpy as np
140
+
141
+ _f32 = np.dtype(np.float32).newbyteorder('>')
142
+ _f64 = np.dtype(np.float64).newbyteorder('>')
143
+ _i16 = np.dtype(np.int16).newbyteorder('>')
144
+ _i32 = np.dtype(np.int32).newbyteorder('>')
145
+ _i64 = np.dtype(np.int64).newbyteorder('>')
146
+ _chr = np.dtype(np.int8).newbyteorder('>')
147
+
148
+ offset = 0
149
+
150
+ def rd_single(dtype=_i32):
151
+ nonlocal offset
152
+ _arr = np.frombuffer(byte_string, dtype=dtype, count=1, offset=offset)
153
+ offset += _arr.nbytes
154
+ return _arr[0]
155
+
156
+ def rd_arr1d(dtype=_f32, count=None):
157
+ nonlocal offset
158
+ if count is None:
159
+ count = rd_single()
160
+ arr = np.frombuffer(byte_string, dtype=dtype, count=count, offset=offset)
161
+ offset += arr.nbytes
162
+ return arr
163
+
164
+ def rd_arr2d(dtype=_f32):
165
+ nonlocal offset
166
+ n = rd_single()
167
+ m = rd_single()
168
+ arr = np.frombuffer(byte_string, dtype=dtype, count=n*m, offset=offset)
169
+ offset += arr.nbytes
170
+ return arr.reshape((n, m))
171
+
172
+ def rd_string():
173
+ nonlocal offset
174
+ return rd_arr1d(dtype=_chr).tobytes().decode('latin-1').lstrip('\x00')
175
+
176
+ tc_cluster = rd_arr1d(dtype=_f64, count=4)
177
+ run__, cpx__ = rd_arr1d(dtype=_f64, count=2) # (discarded)
178
+ # SpecData #
179
+ intensity = rd_arr1d(dtype=_f32)
180
+ sum_inty = rd_arr1d(dtype=_f32) # (discarded)
181
+ mon_peaks = rd_arr2d(dtype=_f32) # (discarded)
182
+
183
+ if not need_add_data:
184
+ # skip costly parsing of Trace- and Add-Data cluster:
185
+ return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, None, None)
186
+
187
+ # TraceData # (as yet discarded)
188
+ tc_cluster2 = rd_arr1d(dtype=_f64, count=6)
189
+ twoD_raw = rd_arr2d(dtype=_f32)
190
+ sum_raw = rd_arr1d(dtype=_f32)
191
+ sum_corr = rd_arr1d(dtype=_f32)
192
+ sum_conz = rd_arr1d(dtype=_f32)
193
+ calc_traces = rd_arr1d(dtype=_f32)
194
+ n_calc_trcs = rd_single()
195
+ for i in range(n_calc_trcs):
196
+ calc_names = rd_arr1d(dtype=_chr)
197
+ peak_centrs = rd_arr1d(dtype=_f32)
198
+ # AddData #
199
+ add_data = dict()
200
+ n_add_data = rd_single()
201
+ for i in range(n_add_data):
202
+ grp_name = rd_string()
203
+ descr = []
204
+ for i in range(rd_single()):
205
+ descr.append(rd_string())
206
+ units = []
207
+ for i in range(rd_single()):
208
+ units.append(rd_string())
209
+ data = rd_arr1d(dtype=_f32)
210
+ view = rd_arr1d(dtype=_chr)
211
+ n_lv_times = rd_single()
212
+ offset += 16 * n_lv_times # skipping LabVIEW timestamp
213
+ add_data[grp_name] = [itype.add_data_item_t(*tup) for tup in zip_longest(data, descr, units, view)]
214
+
215
+ # MassCal #
216
+ mc_masses = rd_arr1d(dtype=_f64)
217
+ mc_tbins = rd_arr1d(dtype=_f64)
218
+ cal_paras = rd_arr1d(dtype=_f64)
219
+ segmnt_cal_pars = rd_arr2d(dtype=_f64)
220
+ mcal_mode = rd_single(dtype=_i16)
221
+ mass_cal = itype.masscal_t(mcal_mode, mc_masses, mc_tbins, cal_paras, segmnt_cal_pars)
222
+
223
+ return itype.fullcycle_t(itype.timecycle_t(*tc_cluster), intensity, mass_cal, add_data)
224
+
225
+
226
+ class CalcConzInfo:
227
+
228
+ def __init__(self):
229
+ self.tables = {
230
+ "primary_ions": list(),
231
+ "transmission": list(),
232
+ }
233
+
234
+ @staticmethod
235
+ def load_json(json_string):
236
+ cc = CalcConzInfo()
237
+ j = json.loads(json_string)
238
+ delm = j["DataElement"]
239
+ for li in delm["Value"]["PISets"]["PiSets"]:
240
+ if not li["PriIonSetName"]:
241
+ log.info(f'loaded ({len(cc.tables["primary_ions"])}) primary-ion settings')
242
+ break
243
+
244
+ masses = map(float, filter(lambda x: x > 0, li["PriIonSetMasses"]))
245
+ values = map(float, li["PriIonSetMultiplier"])
246
+ cc.tables["primary_ions"].append(itype.table_setting_t(str(li["PriIonSetName"]), list(zip(masses, values))))
247
+
248
+ for li in j["DataElement"]["Value"]["TransSets"]["Transsets"]:
249
+ if not li["Name"]:
250
+ log.info(f'loaded ({len(cc.tables["transmission"])}) transmission settings')
251
+ break
252
+
253
+ masses = map(float, filter(lambda x: x > 0, li["Mass"]))
254
+ values = map(float, li["Value"])
255
+ # float(li["Voltage"]) # (not used)
256
+ cc.tables["transmission"].append(itype.table_setting_t(str(li["Name"]), list(zip(masses, values))))
257
+
258
+ return cc
259
+
260
+
261
+ ## >>>>>>>> callback functions <<<<<<<< ##
262
+
263
+ def follow_calc_conz_info(client, self, msg):
264
+ if not msg.payload:
265
+ # empty payload will clear a retained topic
266
+ self._calcconzinfo = MqttClient._calcconzinfo
267
+ return
268
+
269
+ if not self._calcconzinfo[0] is _NOT_INIT:
270
+ # nothing to do..
271
+ return
272
+
273
+ log.debug(f"updating tm-/pi-table from {msg.topic}...")
274
+ self._calcconzinfo.append(CalcConzInfo.load_json(msg.payload.decode('latin-1')))
275
+
276
+ follow_calc_conz_info.topics = ["PTR/Act/PTR_CalcConzInfo"]
277
+
278
+ def follow_schedule(client, self, msg):
279
+ with follow_schedule._lock:
280
+ if msg.topic.endswith("SRV_ScheduleClear"):
281
+ self._sched_cmds.clear()
282
+ return
283
+
284
+ if msg.topic.endswith("SRV_Schedule"):
285
+ if not msg.payload:
286
+ log.warn("empty ACQ_SRV_Schedule payload has cleared retained topic")
287
+ self._sched_cmds.clear()
288
+ return
289
+
290
+ if msg.retain:
291
+ # Note: we either have received a message that has been
292
+ # retained because of a new connection..
293
+ payload = json.loads(msg.payload.decode())
294
+ self._sched_cmds.clear()
295
+ self._sched_cmds.extend(payload["CMDs"])
296
+ else:
297
+ # ..or the schedule as maintained by IoniTOF has changed,
298
+ # which we handle ourselves below:
299
+ pass
300
+
301
+ if msg.topic.startswith("IC_Command"):
302
+ if not msg.payload:
303
+ log.error("empty IC_Command! has topic been cleared?")
304
+ return
305
+
306
+ # these are the freshly added scheduling requests:
307
+ payload = json.loads(msg.payload.decode())
308
+ self._sched_cmds.extend(payload["CMDs"])
309
+
310
+ follow_schedule.topics = [
311
+ "DataCollection/Act/ACQ_SRV_Schedule",
312
+ "DataCollection/Set/ACQ_SRV_ScheduleClear",
313
+ "IC_Command/Write/Scheduled"
314
+ ]
315
+ follow_schedule._lock = RLock()
316
+
317
+ def follow_state(client, self, msg):
318
+ if not msg.payload:
319
+ # empty payload will clear a retained topic
320
+ self._server_state = MqttClient._server_state
321
+ return
322
+
323
+ payload = json.loads(msg.payload.decode())
324
+ state = payload["DataElement"]["Value"]
325
+ log.debug(f"[{self}] new server-state: " + str(state))
326
+ # replace the current state with the new element:
327
+ self._server_state.append(state)
328
+ meas_running = (state == "ACQ_Aquire") # yes, there's a typo, plz keep it :)
329
+ if meas_running:
330
+ # signal the relevant thread(s) that we need an update:
331
+ self._calcconzinfo.append(_NOT_INIT)
332
+ if not meas_running:
333
+ # Note: the user-interface in `.current_sourcefile` checks for the
334
+ # above _server_state and expects an initialized filename if and
335
+ # only if the server is running! Therefore, we can safely
336
+ # invalidate the source-file until we get a new one:
337
+ self._sf_filename.append(_NOT_INIT)
338
+
339
+ follow_state.topics = ["DataCollection/Act/ACQ_SRV_CurrentState"]
340
+
341
+ def follow_sourcefile(client, self, msg):
342
+ if not msg.payload:
343
+ # empty payload will clear a retained topic
344
+ self._sf_filename = MqttClient._sf_filename
345
+ return
346
+
347
+ payload = json.loads(msg.payload.decode())
348
+ path = payload["DataElement"]["Value"]
349
+ log.debug(f"[{self}] new source-file: " + str(path))
350
+ # replace the current path with the new element:
351
+ self._sf_filename.append(path)
352
+
353
+ follow_sourcefile.topics = ["DataCollection/Act/ACQ_SRV_SetFullStorageFile"]
354
+
355
+ def follow_act_set_values(client, self, msg):
356
+ if not msg.payload:
357
+ # empty payload will clear a retained topic
358
+ return
359
+
360
+ try:
361
+ server, kind, parID = msg.topic.split('/')
362
+ if server == "DataCollection":
363
+ # Note: this topic doesn't strictly follow the convention and is handled separately
364
+ if kind != "Set":
365
+ return
366
+
367
+ if server == "Sequencer":
368
+ # Note: this is a separate program and will be ignored (has its own AUTO_-numbers et.c.)
369
+ return
370
+
371
+ if parID == "PTR_CalcConzInfo":
372
+ # another "special" topic handled in 'follow_calc_conz_info' ...
373
+ return
374
+
375
+ if parID not in _par_id_info.index:
376
+ log.warning(f"unknown par-ID in [{msg.topic}]")
377
+ return
378
+
379
+ payload = json.loads(msg.payload.decode())
380
+ if kind == "Act":
381
+ self.act_values[parID] = _parse_data_element(payload["DataElement"])
382
+ if kind == "Set":
383
+ self.set_values[parID] = _parse_data_element(payload["DataElement"])
384
+ except json.decoder.JSONDecodeError as exc:
385
+ log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
386
+ raise
387
+ except KeyError as exc:
388
+ log.error(f"{exc.__class__.__name__}: {exc} :: while processing [{msg.topic}] ({msg.payload})")
389
+ pass
390
+ except ParsingError as exc:
391
+ log.error(f"while parsing [{parID}] :: {str(exc)}")
392
+ pass
393
+
394
+ follow_act_set_values.topics = ["+/Act/+", "+/Set/+"]
395
+
396
+ def follow_cycle(client, self, msg):
397
+ if not msg.payload:
398
+ # empty payload will clear a retained topic
399
+ return
400
+
401
+ payload = json.loads(msg.payload.decode())
402
+ current = int(payload["DataElement"]["Value"])
403
+ # replace the current timecycle with the new element:
404
+ self._overallcycle.append(current)
405
+
406
+ follow_cycle.topics = ["DataCollection/Act/ACQ_SRV_OverallCycle"]
407
+
408
+ # collect all follow-functions together:
409
+ _subscriber_functions = [fun for name, fun in list(vars().items())
410
+ if callable(fun) and name.startswith('follow_')]
411
+
412
+
413
+ _NOT_INIT = object()
414
+
415
+
416
+ class MqttClient(MqttClientBase):
417
+ """a simplified client for the Ionicon MQTT API.
418
+
419
+ > mq = MqttClient()
420
+ > mq.write('TCP_MCP_B', 3400)
421
+ ValueError()
422
+
423
+ """
424
+
425
+ _sched_cmds = deque([_NOT_INIT], maxlen=None)
426
+ _server_state = deque([_NOT_INIT], maxlen=1)
427
+ _calcconzinfo = deque([_NOT_INIT], maxlen=1)
428
+ _sf_filename = deque([""], maxlen=1)
429
+ _overallcycle = deque([0], maxlen=1)
430
+ act_values = dict()
431
+ set_values = dict()
432
+
433
+ set_value_limit = {
434
+ "TCP_MCP_B": 3200.0,
435
+ }
436
+
437
+ @property
438
+ def is_connected(self):
439
+ '''Returns `True` if connection to IoniTOF could be established.'''
440
+ return (super().is_connected
441
+ and self._server_state[0] is not _NOT_INIT
442
+ and (len(self._sched_cmds) == 0 or self._sched_cmds[0] is not _NOT_INIT))
443
+
444
+ @property
445
+ def is_running(self):
446
+ '''Returns `True` if IoniTOF is currently acquiring data.'''
447
+ return self.current_server_state == 'ACQ_Aquire' # yes, there's a typo, plz keep it :)
448
+
449
+ @property
450
+ def current_schedule(self):
451
+ '''Returns a list with the upcoming write commands in ascending order.'''
452
+ if not self.is_connected:
453
+ return []
454
+
455
+ current_cycle = self._overallcycle[0]
456
+ filter_fun = lambda cmd: float(cmd["Schedule"]) > current_cycle
457
+ sorted_fun = lambda cmd: float(cmd["Schedule"])
458
+
459
+ return sorted(filter(filter_fun, self._sched_cmds), key=sorted_fun)
460
+
461
+ @property
462
+ def current_server_state(self):
463
+ '''Returns the state of the acquisition-server. One of:
464
+
465
+ - "ACQ_Idle"
466
+ - "ACQ_JustStarted"
467
+ - "ACQ_Aquire"
468
+ - "ACQ_Stopping"
469
+
470
+ or "<unknown>" if there's no connection to IoniTOF.
471
+ '''
472
+ if self.is_connected:
473
+ return self._server_state[0]
474
+ return "<unknown>"
475
+
476
+ @property
477
+ def current_sourcefile(self):
478
+ '''Returns the path to the hdf5-file that is currently being written.
479
+
480
+ Returns an empty string if no measurement is running.
481
+ '''
482
+ if not self.is_running:
483
+ return ""
484
+
485
+ if self._sf_filename[0] is not _NOT_INIT:
486
+ return self._sf_filename[0]
487
+
488
+ # Note: '_NOT_INIT' is set by us on start of acquisition, so we'd expect
489
+ # to receive the source-file-topic after a (generous) timeout:
490
+ timeout_s = 15
491
+ started_at = time.monotonic()
492
+ while time.monotonic() < started_at + timeout_s:
493
+ if self._sf_filename[0] is not _NOT_INIT:
494
+ return self._sf_filename[0]
495
+
496
+ time.sleep(10e-3)
497
+ else:
498
+ raise TimeoutError(f"[{self}] unable to retrieve source-file after ({timeout_s = })");
499
+
500
+ @property
501
+ def current_cycle(self):
502
+ '''Returns the current 'AbsCycle' (/'OverallCycle').'''
503
+ if self.is_running:
504
+ return self._overallcycle[0]
505
+ return 0
506
+
507
+ def __init__(self, host='127.0.0.1', port=1883):
508
+ # this sets up the mqtt connection with default callbacks:
509
+ super().__init__(host, port, _subscriber_functions, None, None, None)
510
+ log.debug(f"connection check ({self.is_connected}) :: {self._server_state = } / {self._sched_cmds = }");
511
+
512
+ def disconnect(self):
513
+ super().disconnect()
514
+ log.debug(f"[{self}] has disconnected")
515
+ # reset internal queues to their defaults:
516
+ self._sched_cmds = MqttClient._sched_cmds
517
+ self._server_state = MqttClient._server_state
518
+ self._calcconzinfo = MqttClient._calcconzinfo
519
+ self._sf_filename = MqttClient._sf_filename
520
+ self._overallcycle = MqttClient._overallcycle
521
+ self.act_values = MqttClient.act_values
522
+ self.set_values = MqttClient.set_values
523
+
524
+ def get(self, parID, kind="set"):
525
+ '''Return the last known value for the given `parID`.
526
+
527
+ - kind: one of 'set'/'act' (default: 'set')
528
+
529
+ A `KeyError` will be raised if the given `parID` is unknown!
530
+ '''
531
+ if not self.is_connected:
532
+ raise Exception(f"[{self}] no connection to instrument");
533
+
534
+ _lut = self.act_values if kind.lower() == "act" else self.set_values
535
+ is_read_only = ('W' not in _par_id_info.loc[parID].Access) # may raise KeyError!
536
+ if _lut is self.set_values and is_read_only:
537
+ raise ValueError(f"'{parID}' is read-only, did you mean `kind='act'`?")
538
+
539
+ if not parID in _lut:
540
+ # Note: The values should need NO! time to be populated from the MQTT topics,
541
+ # because all topics are published as *retained* by the PTR-server.
542
+ # However, a short timeout is respected before raising a `KeyError`:
543
+ time.sleep(200e-3)
544
+ rv = _lut.get(parID)
545
+ if rv is not None:
546
+ return rv
547
+
548
+ # still not found? give some useful hints for the user not to go crazy:
549
+ error_hint = (
550
+ "act" if parID in self.act_values else
551
+ "set" if parID in self.set_values else
552
+ "")
553
+ raise KeyError(str(parID) + (' (did you mean `kind="%s"`?)' % error_hint) if error_hint else "")
554
+ return _lut[parID]
555
+
556
+ def get_table(self, table_name):
557
+ timeout_s = 10
558
+ started_at = time.monotonic()
559
+ try:
560
+ while time.monotonic() < started_at + timeout_s:
561
+ # confirm change of state:
562
+ if not self._calcconzinfo[0] is _NOT_INIT:
563
+ return self._calcconzinfo[0].tables[table_name]
564
+
565
+ time.sleep(10e-3)
566
+ else:
567
+ raise TimeoutError(f"[{self}] unable to retrieve calc-conz-info from PTR server");
568
+ except KeyError as exc:
569
+ raise KeyError(str(exc) + f", possible values: {list(CalcConzInfo.tables.keys())}")
570
+
571
+ def set(self, parID, new_value, unit='-'):
572
+ '''Set a 'new_value' to 'parID' in the DataCollection.'''
573
+ if not self.is_connected:
574
+ raise Exception(f"[{self}] no connection to instrument");
575
+
576
+ raise NotImplementedError("DataCollection/Set, did you mean .write(parID)?")
577
+
578
+ topic, qos, retain = "DataCollection/Set/" + str(parID), 1, True
579
+ log.info(f"setting '{parID}' ~> [{new_value}]")
580
+ payload = {
581
+ "Header": _build_header(),
582
+ "DataElement": _build_data_element(new_value, unit),
583
+ }
584
+ return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
585
+
586
+ def filter_schedule(self, parID):
587
+ '''Returns a list with the upcoming write commands for 'parID' in ascending order.'''
588
+ return (cmd for cmd in self.current_schedule if cmd["ParaID"] == str(parID))
589
+
590
+ def write(self, parID, new_value):
591
+ '''Write a 'new_value' to 'parID' directly.'''
592
+ if not self.is_connected:
593
+ raise Exception(f"[{self}] no connection to instrument");
594
+
595
+ if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
596
+ raise ValueError(f"'{parID}' is read-only")
597
+
598
+ if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
599
+ raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
600
+
601
+ topic, qos, retain = "IC_Command/Write/Direct", 1, False
602
+ log.info(f"writing '{parID}' ~> [{new_value}]")
603
+ cmd = _build_write_command(parID, new_value)
604
+ payload = {
605
+ "Header": _build_header(),
606
+ "CMDs": [ cmd, ]
607
+ }
608
+ return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
609
+
610
+ def schedule(self, parID, new_value, future_cycle):
611
+ '''Schedule a 'new_value' to 'parID' for the given 'future_cycle'.
612
+
613
+ If 'future_cycle' is in fact in the past, the behaviour is defined by IoniTOF
614
+ (most likely the command is ignored). To be sure, the '.current_cycle' should
615
+ be checked before and after running the '.schedule' command programmatically!
616
+ '''
617
+ if not self.is_connected:
618
+ raise Exception(f"[{self}] no connection to instrument");
619
+
620
+ if not 'W' in _par_id_info.loc[parID].Access: # may raise KeyError!
621
+ raise ValueError(f"'{parID}' is read-only")
622
+
623
+ if parID in __class__.set_value_limit and new_value > __class__.set_value_limit[parID]:
624
+ raise ValueError("set value limit of {__class__.set_value_limit[parID]} on '{parID}'")
625
+
626
+ if (future_cycle == 0 and not self.is_running):
627
+ # Note: ioniTOF40 doesn't handle scheduling for the 0th cycle!
628
+ if parID == "AME_ActionNumber":
629
+ # a) the action-number will trigger a script for the 0th cycle, so
630
+ # we *must* be scheduling it!
631
+ self.write("AME_ActionNumber", new_value)
632
+ elif parID.startswith("AME_"):
633
+ # b) the AME-numbers cannot (currently) be set (i.e. written), but since
634
+ # they are inserted just *before* the cycle, this will work just fine:
635
+ future_cycle = 1
636
+ else:
637
+ # c) in all other cases, let's assume the measurement will start soon
638
+ # and dare to write immediately, skipping the schedule altogether:
639
+ log.debug(f"immediately writing {parID = } @ cycle '0' (measurement stopped)")
640
+ return self.write(parID, new_value)
641
+
642
+ if not future_cycle > self.current_cycle:
643
+ log.warn(f"attempting to schedule past cycle, hope you know what you're doing");
644
+ pass # and at least let's debug it in MQTT browser (see also doc-string above)!
645
+
646
+ topic, qos, retain = "IC_Command/Write/Scheduled", 1, False
647
+ log.info(f"scheduling '{parID}' ~> [{new_value}] for cycle ({future_cycle})")
648
+ cmd = _build_write_command(parID, new_value, future_cycle)
649
+ payload = {
650
+ "Header": _build_header(),
651
+ "CMDs": [ cmd, ]
652
+ }
653
+ return self.publish_with_ack(topic, json.dumps(payload), qos=qos, retain=retain)
654
+
655
+ def schedule_filename(self, path, future_cycle):
656
+ '''Start writing to a new .h5 file with the beginning of 'future_cycle'.'''
657
+ assert str(path), "filename cannot be empty!"
658
+ # try to make sure that IoniTOF accepts the path:
659
+ if self.host == '127.0.0.1':
660
+ os.makedirs(os.path.dirname(path), exist_ok=True)
661
+ try:
662
+ with open(path, 'x'):
663
+ log.info("touched new file:", path)
664
+ except FileExistsError as exc:
665
+ log.error(f"new filename '{path}' already exists and will not be scheduled!")
666
+ return
667
+
668
+ return self.schedule('ACQ_SRV_SetFullStorageFile', path.replace('/', '\\'), future_cycle)
669
+
670
+ def start_measurement(self, path=None):
671
+ '''Start a new measurement and block until the change is confirmed.
672
+
673
+ If 'path' is not None, write to the given .h5 file.
674
+ '''
675
+ if not path:
676
+ self.write('ACQ_SRV_Start_Meas_Quick', True)
677
+ else:
678
+ self.write('ACQ_SRV_Start_Meas_Record', path.replace('/', '\\'))
679
+ timeout_s = 30
680
+ started_at = time.monotonic()
681
+ while time.monotonic() < started_at + timeout_s:
682
+ if self.is_running:
683
+ break
684
+
685
+ time.sleep(10e-3)
686
+ else:
687
+ self.disconnect()
688
+ raise TimeoutError(f"[{self}] error starting measurement");
689
+
690
+ def stop_measurement(self, future_cycle=None):
691
+ '''Stop the current measurement and block until the change is confirmed.
692
+
693
+ If 'future_cycle' is not None and in the future, schedule the stop command.'''
694
+ if future_cycle is None or not future_cycle > self._overallcycle[0]:
695
+ self.write('ACQ_SRV_Stop_Meas', True)
696
+ else:
697
+ self.schedule('ACQ_SRV_Stop_Meas', True, future_cycle)
698
+ # may need to wait until the scheduled event..
699
+ if future_cycle is not None:
700
+ self.block_until(future_cycle)
701
+ # ..for this timeout to be applicable:
702
+ timeout_s = 30
703
+ started_at = time.monotonic()
704
+ while time.monotonic() < started_at + timeout_s:
705
+ # confirm change of state:
706
+ if not self.is_running:
707
+ break
708
+
709
+ time.sleep(10e-3)
710
+ else:
711
+ self.disconnect()
712
+ raise TimeoutError(f"[{self}] error stopping measurement");
713
+
714
+ def block_until(self, cycle):
715
+ '''Blocks the current thread until at least 'cycle' has passed or acquisition stopped.
716
+
717
+ Returns the actual current cycle.
718
+ '''
719
+ while self.is_running:
720
+ if self._overallcycle[0] >= int(cycle):
721
+ break
722
+ time.sleep(10e-3)
723
+ else:
724
+ return 0
725
+
726
+ return self._overallcycle[0]
727
+
728
+ def iter_specdata(self, timeout_s=None, buffer_size=300):
729
+ '''Returns an iterator over the fullcycle-data as long as it is available.
730
+
731
+ * This will wait up to `timeout_s` (or indefinitely if `None`) for a
732
+ measurement to start or raise a TimeoutError (default: None).
733
+ * Elements will be buffered up to a maximum of `buffer_size` cycles (default: 300).
734
+ * Cycles recorded prior to calling `next()` on the iterator may be missed,
735
+ so ideally this should be set up before any measurement is running.
736
+ * Once the measurement stops, this iterator will raise `StopIteration`.
737
+ * [Important]: When the buffer runs full, a `queue.Full` exception will be raised!
738
+ Therefore, the caller should consume the iterator as soon as possible while the
739
+ measurement is running.
740
+ '''
741
+ q = queue.Queue(buffer_size)
742
+ topic = "DataCollection/Act/ACQ_SRV_FullCycleData"
743
+ qos = 2
744
+
745
+ def callback(client, self, msg):
746
+ try:
747
+ _fc = _parse_fullcycle(msg.payload, need_add_data=True)
748
+ # IoniTOF cycle-indexing starts at 1, while 0 marks idle state:
749
+ if _fc.timecycle.abs_cycle > 0:
750
+ q.put_nowait(_fc)
751
+ log.debug(f"received fullcycle, buffer at ({q.qsize()}/{q.maxsize})")
752
+ except queue.Full:
753
+ # DO NOT FAIL INSIDE THE CALLBACK!
754
+ log.error(f"iter_specdata({q.maxsize}): fullcycle buffer overrun!")
755
+ client.unsubscribe(topic)
756
+ except Exception as ex:
757
+ # DO NOT FAIL INSIDE THE CALLBACK!
758
+ log.warning(f"got {ex!r} while parsing {len(msg.payload) = }")
759
+
760
+ if not self.is_connected:
761
+ raise Exception("no connection to MQTT broker")
762
+
763
+ # Note: when using a simple generator function like this, the following lines
764
+ # will not be excecuted until the first call to `next` on the iterator!
765
+ # this means, the callback will not yet be executed, the queue not filled
766
+ # and we might miss the first cycles...
767
+ self.client.message_callback_add(topic, callback)
768
+ self.client.subscribe(topic, qos)
769
+ try:
770
+ # Note: Prior to 3.0 on POSIX systems, and for *all versions on Windows*,
771
+ # if block is true and timeout is None, [the q.get()] operation goes into an
772
+ # uninterruptible wait on an underlying lock. This means that no exceptions
773
+ # can occur, and in particular a SIGINT will not trigger a KeyboardInterrupt!
774
+ if timeout_s is None and not self.is_running:
775
+ log.warn(f"waiting indefinitely for measurement to run...")
776
+
777
+ yield q.get(block=True, timeout=timeout_s)
778
+
779
+ # make double sure that there's more to come..
780
+ _started_at = time.monotonic()
781
+ while timeout_s is None or time.monotonic() < _started_at + timeout_s:
782
+ if self.is_running:
783
+ break
784
+
785
+ time.sleep(10e-3)
786
+ else:
787
+ raise TimeoutError(f"[{self}] received specdata, but measurement won't start");
788
+
789
+ while self.is_running or not q.empty():
790
+ if q.full():
791
+ # re-raise what we swallowed in the callback..
792
+ raise queue.Full
793
+
794
+ if not self.is_connected:
795
+ # no more data will come, so better prevent a deadlock:
796
+ break
797
+
798
+ try:
799
+ yield q.get(block=True, timeout=1.0) # seconds
800
+ except queue.Empty:
801
+ continue
802
+
803
+ except queue.Empty:
804
+ assert timeout_s is not None, "this should never happen"
805
+ raise TimeoutError(f"no measurement running after {timeout_s} seconds")
806
+
807
+ finally:
808
+ # ...also, when using more than one iterator, the first to finish will
809
+ # unsubscribe and cause all others to stop maybe before the time!
810
+ # all of this might not actually be an issue right now, but
811
+ # TODO :: fix this weird behaviour (can only be done by implementing the
812
+ # iterator-protocol properly using a helper class)!
813
+ self.client.unsubscribe(topic)
814
+ self.client.message_callback_remove(topic)
815
+
816
+ iter_specdata.__doc__ += _parse_fullcycle.__doc__
817
+
818
+ def __repr__(self):
819
+ return f"<{self.__class__.__name__}[{self.host}]>"
820
+