pytrms 0.9.7__tar.gz → 0.9.8__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (31) hide show
  1. {pytrms-0.9.7 → pytrms-0.9.8}/PKG-INFO +1 -1
  2. {pytrms-0.9.7 → pytrms-0.9.8}/pyproject.toml +2 -2
  3. pytrms-0.9.8/pytrms/__init__.py +86 -0
  4. pytrms-0.9.8/pytrms/clients/__init__.py +9 -0
  5. pytrms-0.9.8/pytrms/clients/db_api.py +400 -0
  6. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/clients/dummy.py +5 -4
  7. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/clients/mqtt.py +2 -8
  8. pytrms-0.9.8/pytrms/clients/ssevent.py +111 -0
  9. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/data/ParaIDs.csv +3 -3
  10. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/instrument.py +67 -26
  11. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/peaktable.py +3 -4
  12. pytrms-0.9.7/pytrms/__init__.py +0 -42
  13. pytrms-0.9.7/pytrms/clients/__init__.py +0 -33
  14. pytrms-0.9.7/pytrms/clients/db_api.py +0 -213
  15. pytrms-0.9.7/pytrms/clients/ssevent.py +0 -82
  16. {pytrms-0.9.7 → pytrms-0.9.8}/LICENSE +0 -0
  17. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/_base/__init__.py +0 -0
  18. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/_base/ioniclient.py +0 -0
  19. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/_base/mqttclient.py +0 -0
  20. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/_version.py +0 -0
  21. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/clients/ioniclient.py +0 -0
  22. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/clients/modbus.py +0 -0
  23. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/compose/__init__.py +0 -0
  24. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/compose/composition.py +0 -0
  25. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/data/IoniTofPrefs.ini +0 -0
  26. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/helpers.py +0 -0
  27. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/measurement.py +0 -0
  28. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/plotting/__init__.py +0 -0
  29. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/plotting/plotting.py +0 -0
  30. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/readers/__init__.py +0 -0
  31. {pytrms-0.9.7 → pytrms-0.9.8}/pytrms/readers/ionitof_reader.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: pytrms
3
- Version: 0.9.7
3
+ Version: 0.9.8
4
4
  Summary: Python bundle for proton-transfer reaction mass-spectrometry (PTR-MS).
5
5
  License: GPL-2.0
6
6
  Author: Moritz Koenemann
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "pytrms"
3
- version = "0.9.7"
3
+ version = "0.9.8"
4
4
  description = "Python bundle for proton-transfer reaction mass-spectrometry (PTR-MS)."
5
5
  authors = ["Moritz Koenemann <moritz.koenemann@ionicon.com>"]
6
6
  license = "GPL-2.0"
@@ -22,9 +22,9 @@ paho-mqtt = ">=1.6.1,<3.0"
22
22
 
23
23
  [tool.poetry.group.test.dependencies]
24
24
  pytest = "^8.3.0"
25
+ testcontainers = "^4.13.0"
25
26
 
26
27
  [tool.poetry.group.dev.dependencies]
27
- Sphinx = "^8.0.0"
28
28
  ipykernel = "^6.29.0"
29
29
 
30
30
  [build-system]
@@ -0,0 +1,86 @@
1
+ _version = '0.9.8'
2
+
3
+ import logging
4
+ from functools import wraps
5
+
6
+ _logging_getLogger = logging.getLogger
7
+
8
+ @wraps(_logging_getLogger)
9
+ def getLoggerWithAnnouncement(name=None):
10
+ # patch the (global) logger to print its own name
11
+ # (useful for turning individual loggers on/off)
12
+ # WARNING: this will patch every instance of the
13
+ # logging-module in every import after pytrms is
14
+ # imported! don't be overwhelmingly fancy with this!
15
+ rv = _logging_getLogger(name)
16
+ if name is not None:
17
+ rv.debug(f"'acquired logger for '{name}'")
18
+
19
+ return rv
20
+
21
+ logging.getLogger = getLoggerWithAnnouncement
22
+ logging.TRACE = 5 # even more verbose than logging.DEBUG
23
+
24
+ __all__ = ['load', 'connect']
25
+
26
+
27
+ def enable_extended_logging(log_level=logging.DEBUG):
28
+ '''make output of http-requests more talkative.
29
+
30
+ set 'log_level=logging.TRACE' for highest verbosity!
31
+ '''
32
+ if log_level <= logging.DEBUG:
33
+ # enable logging of http request urls on the library, that is
34
+ # underlying the 'requests'-package:
35
+ logging.warning(f"enabling logging-output on 'urllib3' ({log_level = })")
36
+ requests_log = logging.getLogger("urllib3")
37
+ requests_log.setLevel(log_level)
38
+ requests_log.propagate = True
39
+
40
+ if log_level <= logging.TRACE:
41
+ # Enabling debugging at http.client level (requests->urllib3->http.client)
42
+ # you will see the REQUEST, including HEADERS and DATA, and RESPONSE with
43
+ # HEADERS but without DATA. the only thing missing will be the response.body,
44
+ # which is not logged.
45
+ logging.warning(f"enabling logging-output on 'HTTPConnection' ({log_level = })")
46
+ from http.client import HTTPConnection
47
+ HTTPConnection.debuglevel = 1
48
+
49
+
50
+ def load(path):
51
+ '''Open a datafile for post-analysis or batch processing.
52
+
53
+ `path` may be a glob-expression to collect a whole batch.
54
+
55
+ returns a `Measurement` instance.
56
+ '''
57
+ import glob
58
+ from .measurement import FinishedMeasurement
59
+
60
+ files = glob.glob(path)
61
+
62
+ return FinishedMeasurement(*files)
63
+
64
+
65
+ def connect(host='localhost', port=None, method='mqtt'):
66
+ '''Connect a client to a running measurement server.
67
+
68
+ 'method' is the preferred connection, either 'mqtt' (default), 'webapi' or 'modbus'.
69
+
70
+ returns an `Instrument` if connected successfully.
71
+ '''
72
+ from .instrument import Instrument
73
+
74
+ if method.lower() == 'mqtt':
75
+ from .clients.mqtt import MqttClient as _client
76
+ elif method.lower() == 'webapi':
77
+ from .clients.ioniclient import IoniClient as _client
78
+ elif method.lower() == 'modbus':
79
+ from .clients.modbus import IoniconModbus as _client
80
+ else:
81
+ raise NotImplementedError(str(method))
82
+
83
+ backend = _client(host, port) if port is not None else _client(host)
84
+
85
+ return Instrument(backend)
86
+
@@ -0,0 +1,9 @@
1
+ import os
2
+
3
+ from .. import enable_extended_logging
4
+
5
+ _root = os.path.dirname(__file__)
6
+ _par_id_file = os.path.abspath(os.path.join(_root, '..', 'data', 'ParaIDs.csv'))
7
+ assert os.path.exists(_par_id_file), "par-id file not found: please re-install PyTRMS package"
8
+
9
+
@@ -0,0 +1,400 @@
1
+ import os
2
+ import time
3
+ import json
4
+ import logging
5
+ from collections import namedtuple
6
+ import urllib3.util
7
+
8
+ import requests
9
+ import requests.adapters
10
+ import requests.exceptions
11
+
12
+ from .ssevent import SSEventListener
13
+ from .._base import _IoniClientBase
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+ _unsafe = namedtuple('http_response', ['status_code', 'href'])
18
+
19
+ __all__ = ['IoniConnect']
20
+
21
+
22
+ class IoniConnect(_IoniClientBase):
23
+
24
+ # Note: this retry-policy is specifically designed for the
25
+ # SQLite Error 5: 'database locked', which may take potentially
26
+ # minutes to resolve itself! Therefore, it is extra generous
27
+ # and backs off up to `3.0 * 2^4 = 48 sec` between retries for
28
+ # a total of ~1 1/2 minutes (plus database timeout). But, giving
29
+ # up on retrying here, would mean *losing all data* in the queue!
30
+ # ==>> We would rather crash on a `queue.full` exception! <<==
31
+ _retry_policy = urllib3.util.Retry(
32
+ # this configures policies on each cause for errors individually...
33
+ total=None, # max. retries (takes precedence). `None`: turned off
34
+ connect=0, read=0, redirect=0, # (all turned off, see docs for details)
35
+ other=0, # "other" errors include timeout (set to 27 seconds)
36
+ # configure the retries on specific status-codes...
37
+ status=5, # how many times to retry on bad status codes
38
+ raise_on_status=True, # `True`: do not return a 429 status code
39
+ status_forcelist=[429], # integer status-codes to retry on
40
+ allowed_methods=None, # `None`: retry on all (possibly not idempotent) verbs
41
+ # this configures backoff between retries...
42
+ backoff_factor=3.0, # back off *after* first try in seconds (x 2^n_retries)
43
+ respect_retry_after_header=False, # would override `backoff_factor`, turn off!
44
+ )
45
+
46
+ @property
47
+ def is_connected(self):
48
+ '''Returns `True` if connection to IoniTOF could be established.'''
49
+ try:
50
+ assert self.session is not None, "not connected"
51
+ self.get("/api/status")
52
+ return True
53
+ except:
54
+ return False
55
+
56
+ @property
57
+ def is_running(self):
58
+ '''Returns `True` if IoniTOF is currently acquiring data.'''
59
+ try:
60
+ assert self.session is not None, "not connected"
61
+ self.get_location("/api/measurements/current")
62
+ return True
63
+ except (AssertionError, requests.exceptions.HTTPError):
64
+ return False
65
+
66
+ def connect(self, timeout_s=10):
67
+ self.session = requests.sessions.Session()
68
+ self.session.mount('http://', self._http_adapter)
69
+ self.session.mount('https://', self._http_adapter)
70
+ started_at = time.monotonic()
71
+ while timeout_s is None or time.monotonic() < started_at + timeout_s:
72
+ try:
73
+ self.current_meas_loc = self.get_location("/api/measurements/current")
74
+ break
75
+ except requests.exceptions.HTTPError:
76
+ # OK, no measurement running..
77
+ self.current_meas_loc = ''
78
+ break
79
+ except Exception:
80
+ pass
81
+
82
+ time.sleep(10e-1)
83
+ else:
84
+ self.session = self.current_meas_loc = None
85
+ raise TimeoutError(f"no connection to '{self.url}'");
86
+
87
+ def disconnect(self):
88
+ if self.session is not None:
89
+ del self.session
90
+ self.session = None
91
+ self.current_meas_loc = None
92
+
93
+ def start_measurement(self, path=None):
94
+ '''Start a new measurement and block until the change is confirmed.
95
+
96
+ If 'path' is not None, write to the given .h5 file.
97
+ '''
98
+ assert not self.is_running, "measurement already running @ " + str(self.current_meas_loc)
99
+
100
+ payload = {}
101
+ if path is not None:
102
+ assert os.path.isdir(path), "must point to a (recipe-)directory: " + str(path)
103
+ payload |= { "recipeDirectory": str(path) }
104
+
105
+ self.current_meas_loc = self.post("/api/measurements", payload)
106
+ self.put(self.current_meas_loc, { "isRunning": True })
107
+
108
+ return self.current_meas_loc
109
+
110
+ def stop_measurement(self, future_cycle=None):
111
+ '''Stop the current measurement and block until the change is confirmed.
112
+
113
+ If 'future_cycle' is not None and in the future, schedule the stop command.
114
+ '''
115
+ loc = self.current_meas_loc or self.get_location("/api/measurements/current")
116
+ self.patch(loc, { "isRunning": False })
117
+ self.current_meas_loc = ''
118
+
119
+ def __init__(self, host='127.0.0.1', port=5066):
120
+ super().__init__(host, port)
121
+ self.url = f"http://{self.host}:{self.port}"
122
+ self._http_adapter = requests.adapters.HTTPAdapter(max_retries=self._retry_policy)
123
+ self.session = None
124
+ self.current_meas_loc = None
125
+ try:
126
+ self.connect(timeout_s=3.3)
127
+ except TimeoutError:
128
+ log.warning("no connection! make sure the DB-API is running and try again")
129
+
130
+ def get(self, endpoint, **kwargs):
131
+ """Make a GET request to `endpoint` and parse JSON if applicable."""
132
+ try:
133
+ r = self._fetch_object(endpoint, 'get', **kwargs)
134
+ if 'json' in r.headers.get('content-type', ''):
135
+ return r.json()
136
+ if 'text' in r.headers.get('content-type', ''):
137
+ return r.text
138
+ else:
139
+ log.warning(f"unexpected 'content-type: {r.headers['content-type']}'")
140
+ log.info(f"did you mean to use `{type(self).__name__}.download(..)` instead?")
141
+ return r.content
142
+
143
+ except requests.exceptions.HTTPError as e:
144
+ if e.response.status_code == 410: # Gone
145
+ log.debug(f"nothing there at '{endpoint}' 0_o ?!")
146
+ return None
147
+ raise
148
+
149
+ def get_location(self, endpoint, **kwargs):
150
+ """Returns the actual location that `endpoint` points to (may be a redirect)."""
151
+ r = self._fetch_object(endpoint, 'get', **(kwargs | { "allow_redirects": False }))
152
+ return r.headers.get('Location', r.request.path_url)
153
+
154
+ def post(self, endpoint, data, **kwargs):
155
+ """Append to the collection at `endpoint` the object defined by `data`."""
156
+ r = self._create_object(endpoint, data, 'post', **kwargs)
157
+ return _unsafe(r.status_code, r.headers.get('Location', '')) # no default location known!
158
+
159
+ def put(self, endpoint, data, **kwargs):
160
+ """Replace the entire object at `endpoint` with `data`."""
161
+ r = self._create_object(endpoint, data, 'put', **kwargs)
162
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
163
+
164
+ def patch(self, endpoint, data, **kwargs):
165
+ """Change parts of the object at `endpoint` with fields in `data`."""
166
+ r = self._create_object(endpoint, data, 'patch', **kwargs)
167
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
168
+
169
+ def delete(self, endpoint, **kwargs):
170
+ """Attempt to delete the object at `endpoint`."""
171
+ r = self._fetch_object(endpoint, data, 'delete', **kwargs)
172
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
173
+
174
+ def link(self, parent_ep, child_ep, **kwargs):
175
+ """Make the object at `parent_e[nd]p[oint]` refer to `child_e[nd]p[oint]`"""
176
+ r = self._make_link(parent_ep, child_ep, sever=False, **kwargs)
177
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
178
+
179
+ def unlink(self, parent_ep, child_ep, **kwargs):
180
+ """Destroy the reference from `parent_e[nd]p[oint]` to `child_e[nd]p[oint]`"""
181
+ r = self._make_link(parent_ep, child_ep, sever=True, **kwargs)
182
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
183
+
184
+ def upload(self, endpoint, filename):
185
+ """Upload the file at `filename` to `endpoint`."""
186
+ if not endpoint.startswith('/'):
187
+ endpoint = '/' + endpoint
188
+ with open(filename, 'rb') as f:
189
+ # Note (important!): this is a "form-data" entry, where the server
190
+ # expects the "name" to be 'file' and rejects it otherwise:
191
+ name = 'file'
192
+ r = self._create_object(endpoint, None, 'post',
193
+ # Note: the requests library will set the content-type automatically
194
+ # and also add a randomly generated "boundary" to separate files:
195
+ #headers={'content-type': 'multipart/form-data'}, No!
196
+ files=[(name, (filename, f, ''))])
197
+ r.raise_for_status()
198
+
199
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
200
+
201
+ def download(self, endpoint, out_file='.'):
202
+ """Download from `endpoint` into `out_file` (may be a directory).
203
+
204
+ Returns:
205
+ status_code, actual_filename
206
+ """
207
+ if not endpoint.startswith('/'):
208
+ endpoint = '/' + endpoint
209
+
210
+ out_file = os.path.abspath(out_file)
211
+
212
+ content_type = 'application/octet-stream'
213
+ r = self._fetch_object(endpoint, 'get', stream=True, headers={'accept': content_type})
214
+ assert r.headers['content-type'] == content_type, "unexcepted content-type"
215
+
216
+ content_dispo = r.headers['content-disposition'].split('; ')
217
+ #['attachment',
218
+ # 'filename=2025_10_06__13_23_32.h5',
219
+ # "filename*=UTF-8''2025_10_06__13_23_32.h5"]
220
+ filename = next(
221
+ (dispo.split('=')[1] for dispo in content_dispo if dispo.startswith("filename="))
222
+ , None)
223
+ if os.path.isdir(out_file):
224
+ assert filename, "no out_file given and server didn't supply filename"
225
+ out_file = os.path.join(out_file, filename)
226
+
227
+ with open(out_file, mode='xb') as f:
228
+ # chunk_size must be of type int or None. A value of None will
229
+ # function differently depending on the value of `stream`.
230
+ # stream=True will read data as it arrives in whatever size the
231
+ # chunks are received. If stream=False, data is returned as
232
+ # a single chunk.
233
+ for chunk in r.iter_content(chunk_size=None):
234
+ f.write(chunk)
235
+ r.close()
236
+
237
+ return _unsafe(r.status_code, out_file)
238
+
239
+ def _fetch_object(self, endpoint, method='get', **kwargs):
240
+ if not endpoint.startswith('/'):
241
+ endpoint = '/' + endpoint
242
+ if 'headers' not in kwargs:
243
+ kwargs['headers'] = {'accept': 'application/json'}
244
+ elif 'accept' not in (k.lower() for k in kwargs['headers']):
245
+ kwargs['headers'].update({'accept': 'application/json'})
246
+ if 'timeout' not in kwargs:
247
+ # https://requests.readthedocs.io/en/latest/user/advanced/#timeouts
248
+ kwargs['timeout'] = (6.06, 27)
249
+ r = self.session.request(method, self.url + endpoint, **kwargs)
250
+ r.raise_for_status()
251
+
252
+ return r
253
+
254
+ def _make_link(self, parent_href, child_href, *, sever=False, **kwargs):
255
+ verb = "LINK" if not sever else "UNLINK"
256
+ if 'timeout' not in kwargs:
257
+ # https://requests.readthedocs.io/en/latest/user/advanced/#timeouts
258
+ kwargs['timeout'] = (6.06, 27)
259
+ r = self.session.request(verb, self.url + parent_href,
260
+ headers={"location": child_href}, **kwargs)
261
+ r.raise_for_status()
262
+
263
+ return r
264
+
265
+ def _create_object(self, endpoint, data, method='post', **kwargs):
266
+ if not endpoint.startswith('/'):
267
+ endpoint = '/' + endpoint
268
+ if data is not None:
269
+ if not isinstance(data, str):
270
+ # Note: default is `ensure_ascii=True`, but this escapes Umlaute!
271
+ data = json.dumps(data, ensure_ascii=False)
272
+ if 'headers' not in kwargs:
273
+ kwargs['headers'] = {'content-type': 'application/json'}
274
+ elif 'content-type' not in (k.lower() for k in kwargs['headers']):
275
+ kwargs['headers'].update({'content-type': 'application/json'})
276
+ if 'timeout' not in kwargs:
277
+ # https://requests.readthedocs.io/en/latest/user/advanced/#timeouts
278
+ kwargs['timeout'] = (6.06, 27)
279
+ r = self.session.request(method, self.url + endpoint, data=data, **kwargs)
280
+ if not r.ok:
281
+ log.error(f"POST {endpoint}\n{data}\n\nreturned [{r.status_code}]: {r.content}")
282
+ r.raise_for_status()
283
+
284
+ return r
285
+
286
+ def sync(self, peaktable):
287
+ """Compare and upload any differences in `peaktable` to the database."""
288
+ from pytrms.peaktable import Peak, PeakTable
289
+ from operator import attrgetter
290
+
291
+ # Note: the DB-API distinguishes between peaks with
292
+ # different center *and* name, while the PyTRMS 'Peak'
293
+ # only distinguishes by center, so this is our key:
294
+ make_key = lambda p_info: (p_info['center'], p_info['name'])
295
+
296
+ if isinstance(peaktable, str):
297
+ log.info(f"loading peaktable '{peaktable}'...")
298
+ peaktable = PeakTable.from_file(peaktable)
299
+
300
+ # get the PyTRMS- and IoniConnect-peaks on the same page:
301
+ conv = {
302
+ 'name': attrgetter('label'),
303
+ 'center': attrgetter('center'),
304
+ 'kRate': attrgetter('k_rate'),
305
+ 'low': lambda p: p.borders[0],
306
+ 'high': lambda p: p.borders[1],
307
+ 'shift': attrgetter('shift'),
308
+ 'multiplier': attrgetter('multiplier'),
309
+ 'resolution': attrgetter('resolution'),
310
+ }
311
+ # normalize the input argument and create a hashable set:
312
+ updates = dict()
313
+ for peak in peaktable:
314
+ payload = {k: conv[k](peak) for k in conv}
315
+ updates[make_key(payload)] = {'payload': payload}
316
+
317
+ log.info(f"fetching current peaktable from the server...")
318
+ pt_server = self.get('/api/peaks')['_embedded']['peaks']
319
+ # create a comparable collection of peaks already on the database by
320
+ # reducing the keys in the response to what we actually want to update:
321
+ db_peaks = {make_key(p): {
322
+ 'payload': {k: p[k] for k in conv.keys()},
323
+ 'self': p['_links']['self'],
324
+ 'parent': p['_links'].get('parent'),
325
+ } for p in self.get('/api/peaks')['_embedded']['peaks']}
326
+
327
+ to_update = updates.keys() & db_peaks.keys()
328
+ to_upload = updates.keys() - db_peaks.keys()
329
+ updated = up_to_date = 0
330
+ for key in sorted(to_update):
331
+ # check if an existing peak needs an update
332
+ if db_peaks[key]['payload'] == updates[key]['payload']:
333
+ # nothing to do..
334
+ log.debug(f"up-to-date: {key}")
335
+ up_to_date += 1
336
+ else:
337
+ self.put(db_peaks[key]['self']['href'], updates[key]['payload'])
338
+ log.info(f"updated: {key}")
339
+ updated += 1
340
+
341
+ if len(to_upload):
342
+ # Note: POSTing the embedded-collection is *miles faster*
343
+ # than doing separate requests for each peak!
344
+ payload = {
345
+ '_embedded': {
346
+ 'peaks': [updates[key]['payload']
347
+ for key in sorted(to_upload)]
348
+ }
349
+ }
350
+ self.post('/api/peaks', payload)
351
+ for key in sorted(to_upload):
352
+ log.info(f"added new: {key}")
353
+ # Note: we need the updated peaktable to learn about
354
+ # the href (id) assigned to newly added peaks:
355
+ pt_server = self.get('/api/peaks')['_embedded']['peaks']
356
+
357
+ log.info("repairing fitpeak~>nominal links...")
358
+ peak2href = {
359
+ Peak(p["center"], label=p["name"]): p["_links"]["self"]["href"]
360
+ for p in pt_server
361
+ }
362
+ to_link = set((peak2href[fitted], peak2href[fitted.parent])
363
+ for fitted in peaktable.fitted)
364
+
365
+ is_link = set((child["_links"]["self"]["href"], child["_links"]["parent"]["href"])
366
+ for child in pt_server if "parent" in child["_links"])
367
+
368
+ for child_href, parent_href in is_link & to_link:
369
+ log.debug(f"keep link {parent_href} <~> {child_href}")
370
+ pass
371
+
372
+ for child_href, parent_href in to_link - is_link:
373
+ log.debug(f"make link {parent_href} ~>> {child_href}")
374
+ self.link(parent_href, child_href)
375
+
376
+ for child_href, parent_href in is_link - to_link:
377
+ log.debug(f'break link {parent_href} ~x~ {child_href}')
378
+ self.unlink(parent_href, child_href)
379
+
380
+ return {
381
+ 'added': len(to_upload),
382
+ 'updated': updated,
383
+ 'up-to-date': up_to_date,
384
+ 'linked': len(to_link - is_link),
385
+ 'unlinked': len(is_link - to_link),
386
+ }
387
+
388
+ def iter_events(self, event_re=r".*"):
389
+ """Follow the server-sent-events (SSE) on the DB-API.
390
+
391
+ `event_re` a regular expression to filter events (default: matches everything)
392
+
393
+ Note: This will block until a matching event is received.
394
+ Especially, it cannot be cancelled by KeyboardInterrupt (due to the `requests`
395
+ stream-implementation), unless the server sends a keep-alive at regular
396
+ intervals (as every well-behaved server should be doing)!
397
+ """
398
+ yield from SSEventListener(event_re, host_url=self.url, endpoint="/api/events",
399
+ session=self.session)
400
+
@@ -1,7 +1,8 @@
1
- from . import _logging
1
+ import logging
2
+
2
3
  from .._base import _IoniClientBase
3
4
 
4
- log = _logging.getLogger(__name__)
5
+ log = logging.getLogger(__name__)
5
6
 
6
7
 
7
8
  class IoniDummy(_IoniClientBase):
@@ -21,7 +22,7 @@ class IoniDummy(_IoniClientBase):
21
22
 
22
23
  __is_running = False
23
24
 
24
- def connect(self, timeout_s):
25
+ def connect(self, timeout_s=0):
25
26
  log.info(f'pretending to connect to server')
26
27
  self.__is_connected = True
27
28
 
@@ -37,7 +38,7 @@ class IoniDummy(_IoniClientBase):
37
38
  log.info(f'pretending to stop measurement ({future_cycle = })')
38
39
  self.__is_running = False
39
40
 
40
- def __init__(self, host='localhost', port=5687):
41
+ def __init__(self, host='localhost', port=1234):
41
42
  super().__init__(host, port)
42
43
  self.connect()
43
44
 
@@ -2,18 +2,18 @@ import os
2
2
  import time
3
3
  import json
4
4
  import queue
5
+ import logging
5
6
  from collections import deque, namedtuple
6
7
  from datetime import datetime
7
8
  from functools import wraps
8
9
  from itertools import cycle, chain, zip_longest
9
10
  from threading import Condition, RLock
10
11
 
11
- from . import _logging
12
12
  from . import _par_id_file
13
13
  from .._base import itype, _MqttClientBase, _IoniClientBase
14
14
 
15
15
 
16
- log = _logging.getLogger(__name__)
16
+ log = logging.getLogger(__name__)
17
17
 
18
18
  __all__ = ['MqttClient']
19
19
 
@@ -22,12 +22,6 @@ with open(_par_id_file) as f:
22
22
  from pandas import read_csv, isna
23
23
 
24
24
  _par_id_info = read_csv(f, sep='\t').drop(0).set_index('Name').fillna('')
25
- if isna(_par_id_info.at['MPV_1', 'Access']):
26
- log.warning(f'filling in read-properties still missing in {os.path.basename(_par_id_file)}')
27
- _par_id_info.at['MPV_1', 'Access'] = 'RW'
28
- _par_id_info.at['MPV_2', 'Access'] = 'RW'
29
- _par_id_info.at['MPV_3', 'Access'] = 'RW'
30
-
31
25
 
32
26
 
33
27
  ## >>>>>>>> adaptor functions <<<<<<<< ##