pytrms 0.9.0__tar.gz → 0.9.2__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- {pytrms-0.9.0 → pytrms-0.9.2}/PKG-INFO +3 -3
- {pytrms-0.9.0 → pytrms-0.9.2}/pyproject.toml +5 -3
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/__init__.py +8 -7
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/_base/mqttclient.py +20 -7
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/clients/__init__.py +2 -2
- pytrms-0.9.2/pytrms/clients/db_api.py +183 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/clients/ioniclient.py +5 -21
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/clients/modbus.py +44 -11
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/clients/mqtt.py +40 -16
- pytrms-0.9.2/pytrms/clients/ssevent.py +82 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/compose/composition.py +118 -23
- pytrms-0.9.2/pytrms/helpers.py +120 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/instrument.py +22 -17
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/measurement.py +10 -8
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/peaktable.py +7 -5
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/readers/ionitof_reader.py +7 -16
- pytrms-0.9.0/pytrms/clients/db_api.py +0 -186
- pytrms-0.9.0/pytrms/clients/dirigent.py +0 -169
- pytrms-0.9.0/pytrms/clients/ssevent.py +0 -82
- pytrms-0.9.0/pytrms/helpers.py +0 -6
- pytrms-0.9.0/pytrms/tracebuffer.py +0 -108
- {pytrms-0.9.0 → pytrms-0.9.2}/LICENSE +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/_base/__init__.py +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/_base/ioniclient.py +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/_version.py +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/compose/__init__.py +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/data/IoniTofPrefs.ini +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/data/ParaIDs.csv +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/plotting/__init__.py +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/plotting/plotting.py +0 -0
- {pytrms-0.9.0 → pytrms-0.9.2}/pytrms/readers/__init__.py +0 -0
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
Metadata-Version: 2.1
|
|
2
2
|
Name: pytrms
|
|
3
|
-
Version: 0.9.
|
|
3
|
+
Version: 0.9.2
|
|
4
4
|
Summary: Python bundle for proton-transfer reaction mass-spectrometry (PTR-MS).
|
|
5
5
|
License: GPL-2.0
|
|
6
6
|
Author: Moritz Koenemann
|
|
@@ -13,7 +13,7 @@ Classifier: Programming Language :: Python :: 3.11
|
|
|
13
13
|
Classifier: Programming Language :: Python :: 3.12
|
|
14
14
|
Requires-Dist: h5py (>=3.12.1,<4.0.0)
|
|
15
15
|
Requires-Dist: matplotlib (>=3.9.2,<4.0.0)
|
|
16
|
-
Requires-Dist: paho-mqtt (>=1.6.1,<
|
|
16
|
+
Requires-Dist: paho-mqtt (>=1.6.1,<3.0)
|
|
17
17
|
Requires-Dist: pandas (>=2.2.3,<3.0.0)
|
|
18
|
-
Requires-Dist: pyModbusTCP (>=0.
|
|
18
|
+
Requires-Dist: pyModbusTCP (>=0.1.9)
|
|
19
19
|
Requires-Dist: requests (>=2.32.3,<3.0.0)
|
|
@@ -1,6 +1,6 @@
|
|
|
1
1
|
[tool.poetry]
|
|
2
2
|
name = "pytrms"
|
|
3
|
-
version = "0.9.
|
|
3
|
+
version = "0.9.2"
|
|
4
4
|
description = "Python bundle for proton-transfer reaction mass-spectrometry (PTR-MS)."
|
|
5
5
|
authors = ["Moritz Koenemann <moritz.koenemann@ionicon.com>"]
|
|
6
6
|
license = "GPL-2.0"
|
|
@@ -15,8 +15,10 @@ h5py = "^3.12.1"
|
|
|
15
15
|
matplotlib = "^3.9.2"
|
|
16
16
|
requests = "^2.32.3"
|
|
17
17
|
pandas = "^2.2.3"
|
|
18
|
-
|
|
19
|
-
|
|
18
|
+
|
|
19
|
+
# we have legacy support for Anaconda-packages!
|
|
20
|
+
pyModbusTCP = ">=0.1.9"
|
|
21
|
+
paho-mqtt = ">=1.6.1,<3.0"
|
|
20
22
|
|
|
21
23
|
[tool.poetry.group.test.dependencies]
|
|
22
24
|
pytest = "^8.3.0"
|
|
@@ -1,4 +1,4 @@
|
|
|
1
|
-
_version = '0.9.
|
|
1
|
+
_version = '0.9.2'
|
|
2
2
|
|
|
3
3
|
__all__ = ['load', 'connect']
|
|
4
4
|
|
|
@@ -6,14 +6,16 @@ __all__ = ['load', 'connect']
|
|
|
6
6
|
def load(path):
|
|
7
7
|
'''Open a datafile for post-analysis or batch processing.
|
|
8
8
|
|
|
9
|
-
|
|
9
|
+
`path` may be a glob-expression to collect a whole batch.
|
|
10
|
+
|
|
11
|
+
returns a `Measurement` instance.
|
|
10
12
|
'''
|
|
11
|
-
|
|
12
|
-
from .
|
|
13
|
+
import glob
|
|
14
|
+
from .measurement import FinishedMeasurement
|
|
13
15
|
|
|
14
|
-
|
|
16
|
+
files = glob.glob(path)
|
|
15
17
|
|
|
16
|
-
return
|
|
18
|
+
return FinishedMeasurement(*files)
|
|
17
19
|
|
|
18
20
|
def connect(host=None, method='webapi'):
|
|
19
21
|
'''Connect a client to a running measurement server.
|
|
@@ -23,7 +25,6 @@ def connect(host=None, method='webapi'):
|
|
|
23
25
|
returns an `Instrument` if connected successfully.
|
|
24
26
|
'''
|
|
25
27
|
from .instrument import Instrument
|
|
26
|
-
from .helpers import PTRConnectionError
|
|
27
28
|
|
|
28
29
|
if method.lower() == 'webapi':
|
|
29
30
|
from .clients.ioniclient import IoniClient
|
|
@@ -6,9 +6,8 @@ from collections import deque
|
|
|
6
6
|
from itertools import cycle
|
|
7
7
|
from threading import Condition, RLock
|
|
8
8
|
from datetime import datetime as dt
|
|
9
|
-
from abc import ABC, abstractmethod
|
|
10
9
|
|
|
11
|
-
import paho.mqtt.client
|
|
10
|
+
import paho.mqtt.client
|
|
12
11
|
|
|
13
12
|
from .ioniclient import IoniClientBase
|
|
14
13
|
|
|
@@ -39,7 +38,6 @@ def _on_publish(client, self, mid):
|
|
|
39
38
|
class MqttClientBase(IoniClientBase):
|
|
40
39
|
|
|
41
40
|
@property
|
|
42
|
-
@abstractmethod
|
|
43
41
|
def is_connected(self):
|
|
44
42
|
'''Returns `True` if connected to the server.
|
|
45
43
|
|
|
@@ -49,12 +47,27 @@ class MqttClientBase(IoniClientBase):
|
|
|
49
47
|
return (True
|
|
50
48
|
and self.client.is_connected())
|
|
51
49
|
|
|
52
|
-
def __init__(self, host, subscriber_functions,
|
|
50
|
+
def __init__(self, host, port, subscriber_functions,
|
|
53
51
|
on_connect, on_subscribe, on_publish,
|
|
54
52
|
connect_timeout_s=10):
|
|
55
|
-
|
|
56
|
-
|
|
57
|
-
|
|
53
|
+
assert len(subscriber_functions) > 0, "no subscribers: for some unknown reason this causes disconnects"
|
|
54
|
+
super().__init__(host, port)
|
|
55
|
+
|
|
56
|
+
# Note: Version 2.0 of paho-mqtt introduced versioning of the user-callback to fix
|
|
57
|
+
# some inconsistency in callback arguments and to provide better support for MQTTv5.
|
|
58
|
+
# VERSION1 of the callback is deprecated, but is still supported in version 2.x.
|
|
59
|
+
# If you want to upgrade to the newer version of the API callback, you will need
|
|
60
|
+
# to update your callbacks:
|
|
61
|
+
paho_version = int(paho.mqtt.__version__.split('.')[0])
|
|
62
|
+
if paho_version == 1:
|
|
63
|
+
self.client = paho.mqtt.client.Client(clean_session=True)
|
|
64
|
+
elif paho_version == 2:
|
|
65
|
+
self.client = paho.mqtt.client.Client(paho.mqtt.client.CallbackAPIVersion.VERSION1,
|
|
66
|
+
clean_session=True)
|
|
67
|
+
else:
|
|
68
|
+
# see https://eclipse.dev/paho/files/paho.mqtt.python/html/migrations.html
|
|
69
|
+
raise NotImplementedError("API VERSION2 for MQTTv5 (use paho-mqtt 2.x or implement user callbacks)")
|
|
70
|
+
|
|
58
71
|
# clean_session is a boolean that determines the client type. If True,
|
|
59
72
|
# the broker will remove all information about this client when it
|
|
60
73
|
# disconnects. If False, the client is a persistent client and
|
|
@@ -7,7 +7,7 @@ assert os.path.exists(_par_id_file), "par-id file not found: please re-install P
|
|
|
7
7
|
|
|
8
8
|
import logging as _logging
|
|
9
9
|
|
|
10
|
-
_logging.TRACE =
|
|
10
|
+
_logging.TRACE = 5 # even more verbose than logging.DEBUG
|
|
11
11
|
|
|
12
12
|
def enable_extended_logging(log_level=_logging.DEBUG):
|
|
13
13
|
'''make output of http-requests more talkative.
|
|
@@ -22,7 +22,7 @@ def enable_extended_logging(log_level=_logging.DEBUG):
|
|
|
22
22
|
requests_log.setLevel(log_level)
|
|
23
23
|
requests_log.propagate = True
|
|
24
24
|
|
|
25
|
-
if log_level
|
|
25
|
+
if log_level <= _logging.TRACE:
|
|
26
26
|
# Enabling debugging at http.client level (requests->urllib3->http.client)
|
|
27
27
|
# you will see the REQUEST, including HEADERS and DATA, and RESPONSE with
|
|
28
28
|
# HEADERS but without DATA. the only thing missing will be the response.body,
|
|
@@ -0,0 +1,183 @@
|
|
|
1
|
+
import os
|
|
2
|
+
import json
|
|
3
|
+
|
|
4
|
+
import requests
|
|
5
|
+
|
|
6
|
+
from . import _logging
|
|
7
|
+
from .ssevent import SSEventListener
|
|
8
|
+
from .._base import IoniClientBase
|
|
9
|
+
|
|
10
|
+
log = _logging.getLogger(__name__)
|
|
11
|
+
|
|
12
|
+
# TODO :: sowas waer auch ganz cool: die DBAPI bietes sich geradezu an,
|
|
13
|
+
# da mehr object-oriented zu arbeiten:
|
|
14
|
+
# currentVariable = get_component(currentComponentNameAction, ds)
|
|
15
|
+
# currentVariable.save_value({'value': currentValue})
|
|
16
|
+
|
|
17
|
+
class IoniConnect(IoniClientBase):
|
|
18
|
+
|
|
19
|
+
@property
|
|
20
|
+
def is_connected(self):
|
|
21
|
+
'''Returns `True` if connection to IoniTOF could be established.'''
|
|
22
|
+
try:
|
|
23
|
+
self.get("/api/status")
|
|
24
|
+
return True
|
|
25
|
+
except:
|
|
26
|
+
return False
|
|
27
|
+
|
|
28
|
+
@property
|
|
29
|
+
def is_running(self):
|
|
30
|
+
'''Returns `True` if IoniTOF is currently acquiring data.'''
|
|
31
|
+
raise NotImplementedError("is_running")
|
|
32
|
+
|
|
33
|
+
def connect(self, timeout_s):
|
|
34
|
+
pass
|
|
35
|
+
|
|
36
|
+
def disconnect(self):
|
|
37
|
+
pass
|
|
38
|
+
|
|
39
|
+
def __init__(self, host='127.0.0.1', port=5066, session=None):
|
|
40
|
+
super().__init__(host, port)
|
|
41
|
+
self.url = f"http://{self.host}:{self.port}"
|
|
42
|
+
if session is None:
|
|
43
|
+
session = requests.sessions.Session()
|
|
44
|
+
self.session = session
|
|
45
|
+
# ??
|
|
46
|
+
self.current_avg_endpoint = None
|
|
47
|
+
self.comp_dict = dict()
|
|
48
|
+
|
|
49
|
+
def get(self, endpoint, **kwargs):
|
|
50
|
+
return self._get_object(endpoint, **kwargs).json()
|
|
51
|
+
|
|
52
|
+
def post(self, endpoint, data, **kwargs):
|
|
53
|
+
return self._create_object(endpoint, data, 'post', **kwargs).headers.get('Location')
|
|
54
|
+
|
|
55
|
+
def put(self, endpoint, data, **kwargs):
|
|
56
|
+
return self._create_object(endpoint, data, 'put', **kwargs).headers.get('Location')
|
|
57
|
+
|
|
58
|
+
def upload(self, endpoint, filename):
|
|
59
|
+
if not endpoint.startswith('/'):
|
|
60
|
+
endpoint = '/' + endpoint
|
|
61
|
+
with open(filename) as f:
|
|
62
|
+
# Note (important!): this is a "form-data" entry, where the server
|
|
63
|
+
# expects the "name" to be 'file' and rejects it otherwise:
|
|
64
|
+
name = 'file'
|
|
65
|
+
r = self.session.post(self.url + endpoint, files=[(name, (filename, f, ''))])
|
|
66
|
+
r.raise_for_status()
|
|
67
|
+
|
|
68
|
+
return r
|
|
69
|
+
|
|
70
|
+
def _get_object(self, endpoint, **kwargs):
|
|
71
|
+
if not endpoint.startswith('/'):
|
|
72
|
+
endpoint = '/' + endpoint
|
|
73
|
+
if 'headers' not in kwargs:
|
|
74
|
+
kwargs['headers'] = {'content-type': 'application/hal+json'}
|
|
75
|
+
elif 'content-type' not in (k.lower() for k in kwargs['headers']):
|
|
76
|
+
kwargs['headers'].update({'content-type': 'application/hal+json'})
|
|
77
|
+
r = self.session.request('get', self.url + endpoint, **kwargs)
|
|
78
|
+
r.raise_for_status()
|
|
79
|
+
|
|
80
|
+
return r
|
|
81
|
+
|
|
82
|
+
def _create_object(self, endpoint, data, method='post', **kwargs):
|
|
83
|
+
if not endpoint.startswith('/'):
|
|
84
|
+
endpoint = '/' + endpoint
|
|
85
|
+
if not isinstance(data, str):
|
|
86
|
+
data = json.dumps(data, ensure_ascii=False) # default is `True`, escapes Umlaute!
|
|
87
|
+
if 'headers' not in kwargs:
|
|
88
|
+
kwargs['headers'] = {'content-type': 'application/hal+json'}
|
|
89
|
+
elif 'content-type' not in (k.lower() for k in kwargs['headers']):
|
|
90
|
+
kwargs['headers'].update({'content-type': 'application/hal+json'})
|
|
91
|
+
r = self.session.request(method, self.url + endpoint, data=data, **kwargs)
|
|
92
|
+
if not r.ok:
|
|
93
|
+
log.error(f"POST {endpoint}\n{data}\n\nreturned [{r.status_code}]: {r.content}")
|
|
94
|
+
r.raise_for_status()
|
|
95
|
+
|
|
96
|
+
return r
|
|
97
|
+
|
|
98
|
+
def sync(self, peaktable):
|
|
99
|
+
"""Compare and upload any differences in `peaktable` to the database."""
|
|
100
|
+
from pytrms.peaktable import Peak, PeakTable
|
|
101
|
+
from operator import attrgetter
|
|
102
|
+
|
|
103
|
+
# Note: a `Peak` is a hashable object that serves as a key that
|
|
104
|
+
# distinguishes between peaks as defined by PyTRMS:
|
|
105
|
+
make_key = lambda peak: Peak(center=peak['center'], label=peak['name'], shift=peak['shift'])
|
|
106
|
+
|
|
107
|
+
if isinstance(peaktable, str):
|
|
108
|
+
log.info(f"loading peaktable '{peaktable}'...")
|
|
109
|
+
peaktable = PeakTable.from_file(peaktable)
|
|
110
|
+
|
|
111
|
+
# get the PyTRMS- and IoniConnect-peaks on the same page:
|
|
112
|
+
conv = {
|
|
113
|
+
'name': attrgetter('label'),
|
|
114
|
+
'center': attrgetter('center'),
|
|
115
|
+
'kRate': attrgetter('k_rate'),
|
|
116
|
+
'low': lambda p: p.borders[0],
|
|
117
|
+
'high': lambda p: p.borders[1],
|
|
118
|
+
'shift': attrgetter('shift'),
|
|
119
|
+
'multiplier': attrgetter('multiplier'),
|
|
120
|
+
}
|
|
121
|
+
# normalize the input argument and create a hashable set:
|
|
122
|
+
updates = dict()
|
|
123
|
+
for peak in peaktable:
|
|
124
|
+
payload = {k: conv[k](peak) for k in conv}
|
|
125
|
+
updates[make_key(payload)] = {'payload': payload}
|
|
126
|
+
|
|
127
|
+
log.info(f"fetching current peaktable from the server...")
|
|
128
|
+
# create a comparable collection of peaks already on the database by
|
|
129
|
+
# reducing the keys in the response to what we actually want to update:
|
|
130
|
+
db_peaks = {make_key(p): {
|
|
131
|
+
'payload': {k: p[k] for k in conv.keys()},
|
|
132
|
+
'self': p['_links']['self'],
|
|
133
|
+
'parent': p['_links'].get('parent'),
|
|
134
|
+
} for p in self.get('/api/peaks')['_embedded']['peaks']}
|
|
135
|
+
|
|
136
|
+
to_update = updates.keys() & db_peaks.keys()
|
|
137
|
+
to_upload = updates.keys() - db_peaks.keys()
|
|
138
|
+
updated = 0
|
|
139
|
+
for key in sorted(to_update):
|
|
140
|
+
# check if an existing peak needs an update
|
|
141
|
+
if db_peaks[key]['payload'] == updates[key]['payload']:
|
|
142
|
+
# nothing to do..
|
|
143
|
+
log.debug(f"up-to-date: {key}")
|
|
144
|
+
continue
|
|
145
|
+
|
|
146
|
+
self.put(db_peaks[key]['self']['href'], updates[key]['payload'])
|
|
147
|
+
log.info(f"updated: {key}")
|
|
148
|
+
updated += 1
|
|
149
|
+
|
|
150
|
+
if len(to_upload):
|
|
151
|
+
# Note: POSTing the embedded-collection is *miles faster*
|
|
152
|
+
# than doing separate requests for each peak!
|
|
153
|
+
payload = {'_embedded': {'peaks': [updates[key]['payload'] for key in sorted(to_upload)]}}
|
|
154
|
+
self.post('/api/peaks', payload)
|
|
155
|
+
for key in sorted(to_upload): log.info(f"added new: {key}")
|
|
156
|
+
|
|
157
|
+
# Note: this disregards the peak-parent-relationship, but in
|
|
158
|
+
# order to implement this correctly, one would need to check
|
|
159
|
+
# if the parent-peak with a specific 'parentID' is already
|
|
160
|
+
# uploaded and search it.. there's an endpoint
|
|
161
|
+
# 'LINK /api/peaks/{parentID} Location: /api/peaks/{childID}'
|
|
162
|
+
# to link a child to its parent, but it remains complicated.
|
|
163
|
+
# TODO :: maybe later implement parent-peaks!?
|
|
164
|
+
|
|
165
|
+
return {
|
|
166
|
+
'added': len(to_upload),
|
|
167
|
+
'updated': updated,
|
|
168
|
+
'up-to-date': len(to_update) - updated,
|
|
169
|
+
}
|
|
170
|
+
|
|
171
|
+
def iter_events(self, event_re=r".*"):
|
|
172
|
+
"""Follow the server-sent-events (SSE) on the DB-API.
|
|
173
|
+
|
|
174
|
+
`event_re` a regular expression to filter events (default: matches everything)
|
|
175
|
+
|
|
176
|
+
Note: This will block until a matching event is received.
|
|
177
|
+
Especially, it cannot be cancelled by KeyboardInterrupt (due to the `requests`
|
|
178
|
+
stream-implementation), unless the server sends a keep-alive at regular
|
|
179
|
+
intervals (as every well-behaved server should be doing)!
|
|
180
|
+
"""
|
|
181
|
+
yield from SSEventListener(event_re, host_url=self.url, endpoint="/api/events",
|
|
182
|
+
session=self.session)
|
|
183
|
+
|
|
@@ -18,17 +18,17 @@ class IoniClient:
|
|
|
18
18
|
Access the Ionicon WebAPI.
|
|
19
19
|
|
|
20
20
|
Usage:
|
|
21
|
-
|
|
22
|
-
|
|
21
|
+
> client = IoniClient()
|
|
22
|
+
> client.get('TPS_Pull_H')
|
|
23
23
|
{'TPS_Pull_H': 123.45, ... }
|
|
24
24
|
|
|
25
|
-
|
|
25
|
+
> client.set('TPS_Pull_H', 42)
|
|
26
26
|
{'TPS_Pull_H': 42.0, ... }
|
|
27
27
|
|
|
28
|
-
|
|
28
|
+
> client.start_measurement()
|
|
29
29
|
ACK
|
|
30
30
|
|
|
31
|
-
|
|
31
|
+
> client.host, client.port
|
|
32
32
|
('localhost', 8002)
|
|
33
33
|
|
|
34
34
|
'''
|
|
@@ -85,19 +85,3 @@ class IoniClient:
|
|
|
85
85
|
|
|
86
86
|
def stop_measurement(self):
|
|
87
87
|
return self.set('ACQ_SRV_Stop_Meas', 1)
|
|
88
|
-
|
|
89
|
-
|
|
90
|
-
if __name__ == '__main__':
|
|
91
|
-
import sys
|
|
92
|
-
client = IoniClient()
|
|
93
|
-
|
|
94
|
-
if len(sys.argv) == 2:
|
|
95
|
-
print(client.get(sys.argv[1]))
|
|
96
|
-
elif len(sys.argv) == 3:
|
|
97
|
-
print(client.set(sys.argv[1], sys.argv[2]))
|
|
98
|
-
else:
|
|
99
|
-
print(f"""\
|
|
100
|
-
usage:
|
|
101
|
-
python {sys.argv[0]} <varname> [<value>]
|
|
102
|
-
""")
|
|
103
|
-
|
|
@@ -9,7 +9,7 @@ from collections import namedtuple
|
|
|
9
9
|
from functools import lru_cache
|
|
10
10
|
from itertools import tee
|
|
11
11
|
|
|
12
|
-
|
|
12
|
+
import pyModbusTCP.client
|
|
13
13
|
|
|
14
14
|
from . import _par_id_file
|
|
15
15
|
from .._base.ioniclient import IoniClientBase
|
|
@@ -19,9 +19,28 @@ log = logging.getLogger(__name__)
|
|
|
19
19
|
__all__ = ['IoniconModbus']
|
|
20
20
|
|
|
21
21
|
|
|
22
|
+
def _patch_is_open():
|
|
23
|
+
# Note: the .is_open and .timeout attributes were changed
|
|
24
|
+
# from a function to a property!
|
|
25
|
+
#
|
|
26
|
+
# 0.2.0 2022-06-05
|
|
27
|
+
#
|
|
28
|
+
# - ModbusClient: parameters are now properties instead of methods (more intuitive).
|
|
29
|
+
#
|
|
30
|
+
# from the [changelog](https://github.com/sourceperl/pyModbusTCP/blob/master/CHANGES):
|
|
31
|
+
major, minor, patch = pyModbusTCP.__version__.split('.')
|
|
32
|
+
if int(minor) < 2:
|
|
33
|
+
return lambda mc: mc.is_open()
|
|
34
|
+
else:
|
|
35
|
+
return lambda mc: mc.is_open
|
|
36
|
+
|
|
37
|
+
_is_open = _patch_is_open()
|
|
38
|
+
|
|
22
39
|
with open(_par_id_file) as f:
|
|
23
40
|
it = iter(f)
|
|
24
|
-
assert next(it).startswith('ID\tName'), "Modbus parameter file is corrupt: "
|
|
41
|
+
assert next(it).startswith('ID\tName'), ("Modbus parameter file is corrupt: "
|
|
42
|
+
+ f.name
|
|
43
|
+
+ "\n\ntry re-installing the PyTRMS python package to fix it!")
|
|
25
44
|
_id_to_descr = {int(id_): name for id_, name, *_ in (line.strip().split('\t') for line in it)}
|
|
26
45
|
|
|
27
46
|
# look-up-table for c_structs (see docstring of struct-module for more info).
|
|
@@ -59,10 +78,10 @@ def _unpack(registers, format='>f'):
|
|
|
59
78
|
representation, respectively.
|
|
60
79
|
|
|
61
80
|
>>> _unpack([17448, 0], 'float')
|
|
62
|
-
672.
|
|
81
|
+
672.0
|
|
63
82
|
|
|
64
83
|
>>> _unpack([17446, 32768], 'float')
|
|
65
|
-
666.
|
|
84
|
+
666.0
|
|
66
85
|
|
|
67
86
|
>>> _unpack([16875, 61191, 54426, 37896], 'double')
|
|
68
87
|
3749199524.83057
|
|
@@ -132,7 +151,7 @@ class IoniconModbus(IoniClientBase):
|
|
|
132
151
|
|
|
133
152
|
@property
|
|
134
153
|
def is_connected(self):
|
|
135
|
-
if not self.mc
|
|
154
|
+
if not _is_open(self.mc):
|
|
136
155
|
return False
|
|
137
156
|
|
|
138
157
|
# wait for the IoniTOF alive-counter to change (1 second max)...
|
|
@@ -175,7 +194,18 @@ class IoniconModbus(IoniClientBase):
|
|
|
175
194
|
|
|
176
195
|
def __init__(self, host='localhost', port=502):
|
|
177
196
|
super().__init__(host, port)
|
|
178
|
-
|
|
197
|
+
# Note: we patch the behaviour such, that it behaves like pre-0.2
|
|
198
|
+
# (from the time of development of this module), BUT we skip the
|
|
199
|
+
# auto_close-feature for the sake of speed:
|
|
200
|
+
#
|
|
201
|
+
# 0.2.0 2022-06-05
|
|
202
|
+
#
|
|
203
|
+
# - ModbusClient: now TCP auto open mode is active by default (auto_open=True, auto_close=False).
|
|
204
|
+
#
|
|
205
|
+
# from the [changelog](https://github.com/sourceperl/pyModbusTCP/blob/master/CHANGES)
|
|
206
|
+
self.mc = pyModbusTCP.client.ModbusClient(host=self.host, port=self.port,
|
|
207
|
+
auto_open = False, auto_close = False
|
|
208
|
+
)
|
|
179
209
|
# try connect immediately:
|
|
180
210
|
try:
|
|
181
211
|
self.connect()
|
|
@@ -185,8 +215,11 @@ class IoniconModbus(IoniClientBase):
|
|
|
185
215
|
|
|
186
216
|
def connect(self, timeout_s=10):
|
|
187
217
|
log.info(f"[{self}] connecting to Modbus server...")
|
|
188
|
-
|
|
189
|
-
self.mc.
|
|
218
|
+
# Note: .timeout-attribute changed to a property with 0.2.0 (see comments above)
|
|
219
|
+
if callable(self.mc.timeout):
|
|
220
|
+
self.mc.timeout(timeout_s)
|
|
221
|
+
else:
|
|
222
|
+
self.mc.timeout = timeout_s
|
|
190
223
|
if not self.mc.open():
|
|
191
224
|
raise TimeoutError(f"[{self}] no connection to modbus socket")
|
|
192
225
|
|
|
@@ -201,7 +234,7 @@ class IoniconModbus(IoniClientBase):
|
|
|
201
234
|
raise TimeoutError(f"[{self}] no connection to IoniTOF");
|
|
202
235
|
|
|
203
236
|
def disconnect(self):
|
|
204
|
-
if self.mc
|
|
237
|
+
if _is_open(self.mc):
|
|
205
238
|
self.mc.close()
|
|
206
239
|
|
|
207
240
|
@property
|
|
@@ -452,9 +485,9 @@ class IoniconModbus(IoniClientBase):
|
|
|
452
485
|
_read = self.mc.read_holding_registers if is_holding_register else self.mc.read_input_registers
|
|
453
486
|
|
|
454
487
|
register = _read(addr, n_bytes)
|
|
455
|
-
if register is None and self.mc
|
|
488
|
+
if register is None and _is_open(self.mc):
|
|
456
489
|
raise IOError(f"unable to read ({n_bytes}) registers at [{addr}] from connection")
|
|
457
|
-
elif register is None and not self.mc
|
|
490
|
+
elif register is None and not _is_open(self.mc):
|
|
458
491
|
raise IOError("trying to read from closed Modbus-connection")
|
|
459
492
|
|
|
460
493
|
return _unpack(register, c_format)
|
|
@@ -325,9 +325,14 @@ def follow_state(client, self, msg):
|
|
|
325
325
|
log.debug(f"[{self}] new server-state: " + str(state))
|
|
326
326
|
# replace the current state with the new element:
|
|
327
327
|
self._server_state.append(state)
|
|
328
|
-
|
|
329
|
-
|
|
330
|
-
|
|
328
|
+
meas_running = (state == "ACQ_Aquire") # yes, there's a typo, plz keep it :)
|
|
329
|
+
just_started = (meas_running and not msg.retain)
|
|
330
|
+
if meas_running:
|
|
331
|
+
# signal the relevant thread(s) that we need an update:
|
|
332
|
+
self._calcconzinfo.append(_NOT_INIT)
|
|
333
|
+
if just_started:
|
|
334
|
+
# invalidate the source-file until we get a new one:
|
|
335
|
+
self._sf_filename.append(_NOT_INIT)
|
|
331
336
|
|
|
332
337
|
follow_state.topics = ["DataCollection/Act/ACQ_SRV_CurrentState"]
|
|
333
338
|
|
|
@@ -356,6 +361,10 @@ def follow_act_set_values(client, self, msg):
|
|
|
356
361
|
# Note: this topic doesn't strictly follow the convention and is handled separately
|
|
357
362
|
return
|
|
358
363
|
|
|
364
|
+
if server == "Sequencer":
|
|
365
|
+
# Note: this is a separate program and will be ignored (has its own AUTO_-numbers et.c.)
|
|
366
|
+
return
|
|
367
|
+
|
|
359
368
|
if parID == "PTR_CalcConzInfo":
|
|
360
369
|
# another "special" topic handled in 'follow_calc_conz_info' ...
|
|
361
370
|
return
|
|
@@ -404,8 +413,8 @@ _NOT_INIT = object()
|
|
|
404
413
|
class MqttClient(MqttClientBase):
|
|
405
414
|
"""a simplified client for the Ionicon MQTT API.
|
|
406
415
|
|
|
407
|
-
|
|
408
|
-
|
|
416
|
+
> mq = MqttClient()
|
|
417
|
+
> mq.write('TCP_MCP_B', 3400)
|
|
409
418
|
ValueError()
|
|
410
419
|
|
|
411
420
|
"""
|
|
@@ -463,11 +472,27 @@ class MqttClient(MqttClientBase):
|
|
|
463
472
|
|
|
464
473
|
@property
|
|
465
474
|
def current_sourcefile(self):
|
|
466
|
-
'''Returns the path to the hdf5-file that is currently
|
|
475
|
+
'''Returns the path to the hdf5-file that is currently being written.
|
|
467
476
|
|
|
468
|
-
|
|
477
|
+
Returns an empty string if no measurement is running.
|
|
469
478
|
'''
|
|
470
|
-
|
|
479
|
+
if not self.is_running:
|
|
480
|
+
return ""
|
|
481
|
+
|
|
482
|
+
if self._sf_filename[0] is not _NOT_INIT:
|
|
483
|
+
return self._sf_filename[0]
|
|
484
|
+
|
|
485
|
+
# Note: '_NOT_INIT' is set by us on start of acquisition, so we'd expect
|
|
486
|
+
# to receive the source-file-topic after a (generous) timeout:
|
|
487
|
+
timeout_s = 15
|
|
488
|
+
started_at = time.monotonic()
|
|
489
|
+
while time.monotonic() < started_at + timeout_s:
|
|
490
|
+
if self._sf_filename[0] is not _NOT_INIT:
|
|
491
|
+
return self._sf_filename[0]
|
|
492
|
+
|
|
493
|
+
time.sleep(10e-3)
|
|
494
|
+
else:
|
|
495
|
+
raise TimeoutError(f"[{self}] unable to retrieve source-file after ({timeout_s = })");
|
|
471
496
|
|
|
472
497
|
@property
|
|
473
498
|
def current_cycle(self):
|
|
@@ -476,9 +501,9 @@ class MqttClient(MqttClientBase):
|
|
|
476
501
|
return self._overallcycle[0]
|
|
477
502
|
return 0
|
|
478
503
|
|
|
479
|
-
def __init__(self, host='127.0.0.1'):
|
|
504
|
+
def __init__(self, host='127.0.0.1', port=1883):
|
|
480
505
|
# this sets up the mqtt connection with default callbacks:
|
|
481
|
-
super().__init__(host, _subscriber_functions, None, None, None)
|
|
506
|
+
super().__init__(host, port, _subscriber_functions, None, None, None)
|
|
482
507
|
log.debug(f"connection check ({self.is_connected}) :: {self._server_state = } / {self._sched_cmds = }");
|
|
483
508
|
|
|
484
509
|
def disconnect(self):
|
|
@@ -508,12 +533,10 @@ class MqttClient(MqttClientBase):
|
|
|
508
533
|
if _lut is self.set_values and is_read_only:
|
|
509
534
|
raise ValueError(f"'{parID}' is read-only, did you mean `kind='act'`?")
|
|
510
535
|
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
|
|
515
|
-
return _lut[parID]
|
|
516
|
-
except KeyError as exc:
|
|
536
|
+
if not parID in _lut:
|
|
537
|
+
# Note: The values should need NO! time to be populated from the MQTT topics,
|
|
538
|
+
# because all topics are published as *retained* by the PTR-server.
|
|
539
|
+
# However, a short timeout is respected before raising a `KeyError`:
|
|
517
540
|
time.sleep(200e-3)
|
|
518
541
|
rv = _lut.get(parID)
|
|
519
542
|
if rv is not None:
|
|
@@ -525,6 +548,7 @@ class MqttClient(MqttClientBase):
|
|
|
525
548
|
"set" if parID in self.set_values else
|
|
526
549
|
"")
|
|
527
550
|
raise KeyError(str(parID) + (' (did you mean `kind="%s"`?)' % error_hint) if error_hint else "")
|
|
551
|
+
return _lut[parID]
|
|
528
552
|
|
|
529
553
|
def get_table(self, table_name):
|
|
530
554
|
timeout_s = 10
|
|
@@ -0,0 +1,82 @@
|
|
|
1
|
+
import re
|
|
2
|
+
from collections import namedtuple
|
|
3
|
+
from collections.abc import Iterable
|
|
4
|
+
|
|
5
|
+
import requests
|
|
6
|
+
|
|
7
|
+
from . import _logging
|
|
8
|
+
|
|
9
|
+
log = _logging.getLogger(__name__)
|
|
10
|
+
|
|
11
|
+
_event_rv = namedtuple('ssevent', ['event', 'data'])
|
|
12
|
+
|
|
13
|
+
class SSEventListener(Iterable):
|
|
14
|
+
|
|
15
|
+
@staticmethod
|
|
16
|
+
def _line_stream(response):
|
|
17
|
+
# Note: using .iter_content() seems to yield results faster than .iter_lines()
|
|
18
|
+
line = ''
|
|
19
|
+
for bite in response.iter_content(chunk_size=1, decode_unicode=True):
|
|
20
|
+
line += bite
|
|
21
|
+
if bite == '\n':
|
|
22
|
+
yield line.strip()
|
|
23
|
+
line = ''
|
|
24
|
+
|
|
25
|
+
def __init__(self, event_re=None, host_url='http://127.0.0.1:5066',
|
|
26
|
+
endpoint='/api/events', session=None):
|
|
27
|
+
self.uri = host_url + endpoint
|
|
28
|
+
if session is not None:
|
|
29
|
+
self._get = session.get
|
|
30
|
+
else:
|
|
31
|
+
self._get = requests.get
|
|
32
|
+
self._connect_response = None
|
|
33
|
+
self.subscriptions = set()
|
|
34
|
+
if event_re is not None:
|
|
35
|
+
self.subscribe(event_re)
|
|
36
|
+
|
|
37
|
+
def subscribe(self, event_re):
|
|
38
|
+
"""Listen for events matching the given string or regular expression."""
|
|
39
|
+
self.subscriptions.add(re.compile(event_re))
|
|
40
|
+
if self._connect_response is None:
|
|
41
|
+
r = self._get(self.uri, headers={'accept': 'text/event-stream'}, stream=True)
|
|
42
|
+
if not r.ok:
|
|
43
|
+
log.error(f"no connection to {self.uri} (got [{r.status_code}])")
|
|
44
|
+
r.raise_for_status()
|
|
45
|
+
|
|
46
|
+
self._connect_response = r
|
|
47
|
+
|
|
48
|
+
def unsubscribe(self, event_re):
|
|
49
|
+
"""Stop listening for certain events."""
|
|
50
|
+
self.subscriptions.remove(re.compile(event_re))
|
|
51
|
+
if not len(self.subscriptions):
|
|
52
|
+
log.debug(f"closing connection to {self.uri}")
|
|
53
|
+
self._connect_response.close()
|
|
54
|
+
self._connect_response = None
|
|
55
|
+
|
|
56
|
+
def __iter__(self):
|
|
57
|
+
if self._connect_response is None:
|
|
58
|
+
raise Exception("call .subscribe() first to listen for events")
|
|
59
|
+
|
|
60
|
+
event = msg = ''
|
|
61
|
+
for line in self._line_stream(self._connect_response): # blocks...
|
|
62
|
+
if not line:
|
|
63
|
+
# an empty line concludes an event
|
|
64
|
+
if event and any(re.match(sub, event) for sub in self.subscriptions):
|
|
65
|
+
yield _event_rv(event, msg)
|
|
66
|
+
|
|
67
|
+
# Note: any further empty lines are ignored (may be used as keep-alive),
|
|
68
|
+
# but in either case clear event and msg to rearm for the next event:
|
|
69
|
+
event = msg = ''
|
|
70
|
+
continue
|
|
71
|
+
|
|
72
|
+
key, val = line.split(':', maxsplit=1)
|
|
73
|
+
if not key:
|
|
74
|
+
# this is a comment, starting with a colon ':' ...
|
|
75
|
+
log.log(_logging.TRACE, "sse:" + val)
|
|
76
|
+
elif key == 'event':
|
|
77
|
+
event = val.lstrip()
|
|
78
|
+
elif key == 'data':
|
|
79
|
+
msg += val.lstrip()
|
|
80
|
+
else:
|
|
81
|
+
log.warning(f"unknown SSE-key <{key}> in stream")
|
|
82
|
+
|