pytrms 0.9.6__py3-none-any.whl → 0.9.8__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
pytrms/__init__.py CHANGED
@@ -1,8 +1,52 @@
1
- _version = '0.9.6'
1
+ _version = '0.9.8'
2
+
3
+ import logging
4
+ from functools import wraps
5
+
6
+ _logging_getLogger = logging.getLogger
7
+
8
+ @wraps(_logging_getLogger)
9
+ def getLoggerWithAnnouncement(name=None):
10
+ # patch the (global) logger to print its own name
11
+ # (useful for turning individual loggers on/off)
12
+ # WARNING: this will patch every instance of the
13
+ # logging-module in every import after pytrms is
14
+ # imported! don't be overwhelmingly fancy with this!
15
+ rv = _logging_getLogger(name)
16
+ if name is not None:
17
+ rv.debug(f"'acquired logger for '{name}'")
18
+
19
+ return rv
20
+
21
+ logging.getLogger = getLoggerWithAnnouncement
22
+ logging.TRACE = 5 # even more verbose than logging.DEBUG
2
23
 
3
24
  __all__ = ['load', 'connect']
4
25
 
5
26
 
27
+ def enable_extended_logging(log_level=logging.DEBUG):
28
+ '''make output of http-requests more talkative.
29
+
30
+ set 'log_level=logging.TRACE' for highest verbosity!
31
+ '''
32
+ if log_level <= logging.DEBUG:
33
+ # enable logging of http request urls on the library, that is
34
+ # underlying the 'requests'-package:
35
+ logging.warning(f"enabling logging-output on 'urllib3' ({log_level = })")
36
+ requests_log = logging.getLogger("urllib3")
37
+ requests_log.setLevel(log_level)
38
+ requests_log.propagate = True
39
+
40
+ if log_level <= logging.TRACE:
41
+ # Enabling debugging at http.client level (requests->urllib3->http.client)
42
+ # you will see the REQUEST, including HEADERS and DATA, and RESPONSE with
43
+ # HEADERS but without DATA. the only thing missing will be the response.body,
44
+ # which is not logged.
45
+ logging.warning(f"enabling logging-output on 'HTTPConnection' ({log_level = })")
46
+ from http.client import HTTPConnection
47
+ HTTPConnection.debuglevel = 1
48
+
49
+
6
50
  def load(path):
7
51
  '''Open a datafile for post-analysis or batch processing.
8
52
 
@@ -32,7 +76,7 @@ def connect(host='localhost', port=None, method='mqtt'):
32
76
  elif method.lower() == 'webapi':
33
77
  from .clients.ioniclient import IoniClient as _client
34
78
  elif method.lower() == 'modbus':
35
- from .modbus import IoniconModbus as _client
79
+ from .clients.modbus import IoniconModbus as _client
36
80
  else:
37
81
  raise NotImplementedError(str(method))
38
82
 
pytrms/_base/__init__.py CHANGED
@@ -1,7 +1,7 @@
1
1
  from collections import namedtuple
2
2
 
3
- from .mqttclient import MqttClientBase
4
- from .ioniclient import IoniClientBase
3
+ from .mqttclient import MqttClientBase as _MqttClientBase
4
+ from .ioniclient import IoniClientBase as _IoniClientBase
5
5
 
6
6
  class itype:
7
7
 
@@ -1,6 +1,10 @@
1
1
  from abc import ABC, abstractmethod
2
2
 
3
+
3
4
  class IoniClientBase(ABC):
5
+ '''Abstract base-class that defines the common interface for clients.
6
+
7
+ '''
4
8
 
5
9
  @property
6
10
  @abstractmethod
@@ -22,6 +26,22 @@ class IoniClientBase(ABC):
22
26
  def disconnect(self):
23
27
  pass
24
28
 
29
+ @abstractmethod
30
+ def start_measurement(self, path=None):
31
+ '''Start a new measurement and block until the change is confirmed.
32
+
33
+ If 'path' is not None, write to the given .h5 file.
34
+ '''
35
+ pass
36
+
37
+ @abstractmethod
38
+ def stop_measurement(self, future_cycle=None):
39
+ '''Stop the current measurement and block until the change is confirmed.
40
+
41
+ If 'future_cycle' is not None and in the future, schedule the stop command.
42
+ '''
43
+ pass
44
+
25
45
  def __init__(self, host, port):
26
46
  # Note: circumvent (potentially sluggish) Windows DNS lookup:
27
47
  self.host = '127.0.0.1' if host == 'localhost' else str(host)
@@ -9,8 +9,6 @@ from datetime import datetime as dt
9
9
 
10
10
  import paho.mqtt.client
11
11
 
12
- from .ioniclient import IoniClientBase
13
-
14
12
  log = logging.getLogger(__name__)
15
13
 
16
14
  __all__ = ['MqttClientBase']
@@ -35,7 +33,11 @@ def _on_publish(client, self, mid):
35
33
  log.debug(f"[{self}] published {mid = }")
36
34
 
37
35
 
38
- class MqttClientBase(IoniClientBase):
36
+ class MqttClientBase:
37
+ """Mix-in class that supplies basic MQTT-callback functions.
38
+
39
+ Implements part of the `IoniClientBase` interface.
40
+ """
39
41
 
40
42
  @property
41
43
  def is_connected(self):
@@ -50,8 +52,11 @@ class MqttClientBase(IoniClientBase):
50
52
  def __init__(self, host, port, subscriber_functions,
51
53
  on_connect, on_subscribe, on_publish,
52
54
  connect_timeout_s=10):
55
+ # Note: circumvent (potentially sluggish) Windows DNS lookup:
56
+ self.host = '127.0.0.1' if host == 'localhost' else str(host)
57
+ self.port = int(port)
58
+
53
59
  assert len(subscriber_functions) > 0, "no subscribers: for some unknown reason this causes disconnects"
54
- super().__init__(host, port)
55
60
 
56
61
  # Note: Version 2.0 of paho-mqtt introduced versioning of the user-callback to fix
57
62
  # some inconsistency in callback arguments and to provide better support for MQTTv5.
@@ -90,7 +95,7 @@ class MqttClientBase(IoniClientBase):
90
95
  try:
91
96
  self.connect(connect_timeout_s)
92
97
  except TimeoutError as exc:
93
- log.warn(f"{exc} (retry connecting when the Instrument is set up)")
98
+ log.warning(f"{exc} (retry connecting when the Instrument is set up)")
94
99
 
95
100
  def connect(self, timeout_s=10):
96
101
  log.info(f"[{self}] connecting to MQTT broker...")
@@ -117,3 +122,6 @@ class MqttClientBase(IoniClientBase):
117
122
  self.client.loop_stop()
118
123
  self.client.disconnect()
119
124
 
125
+ def __repr__(self):
126
+ return f"<{self.__class__.__name__} @ {self.host}[:{self.port}]>"
127
+
@@ -1,33 +1,9 @@
1
1
  import os
2
2
 
3
+ from .. import enable_extended_logging
4
+
3
5
  _root = os.path.dirname(__file__)
4
6
  _par_id_file = os.path.abspath(os.path.join(_root, '..', 'data', 'ParaIDs.csv'))
5
7
  assert os.path.exists(_par_id_file), "par-id file not found: please re-install PyTRMS package"
6
8
 
7
9
 
8
- import logging as _logging
9
-
10
- _logging.TRACE = 5 # even more verbose than logging.DEBUG
11
-
12
- def enable_extended_logging(log_level=_logging.DEBUG):
13
- '''make output of http-requests more talkative.
14
-
15
- set 'log_level=logging.TRACE' (defined as 0 in pytrms.__init__) for highest verbosity!
16
- '''
17
- if log_level <= _logging.DEBUG:
18
- # enable logging of http request urls on the library, that is
19
- # underlying the 'requests'-package:
20
- _logging.warn(f"enabling logging-output on 'urllib3' ({log_level = })")
21
- requests_log = _logging.getLogger("urllib3")
22
- requests_log.setLevel(log_level)
23
- requests_log.propagate = True
24
-
25
- if log_level <= _logging.TRACE:
26
- # Enabling debugging at http.client level (requests->urllib3->http.client)
27
- # you will see the REQUEST, including HEADERS and DATA, and RESPONSE with
28
- # HEADERS but without DATA. the only thing missing will be the response.body,
29
- # which is not logged.
30
- _logging.warn(f"enabling logging-output on 'HTTPConnection' ({log_level = })")
31
- from http.client import HTTPConnection
32
- HTTPConnection.debuglevel = 1
33
-
pytrms/clients/db_api.py CHANGED
@@ -1,25 +1,53 @@
1
1
  import os
2
+ import time
2
3
  import json
4
+ import logging
5
+ from collections import namedtuple
6
+ import urllib3.util
3
7
 
4
8
  import requests
9
+ import requests.adapters
10
+ import requests.exceptions
5
11
 
6
- from . import _logging
7
12
  from .ssevent import SSEventListener
8
- from .._base import IoniClientBase
9
-
10
- log = _logging.getLogger(__name__)
11
-
12
- # TODO :: sowas waer auch ganz cool: die DBAPI bietes sich geradezu an,
13
- # da mehr object-oriented zu arbeiten:
14
- # currentVariable = get_component(currentComponentNameAction, ds)
15
- # currentVariable.save_value({'value': currentValue})
16
-
17
- class IoniConnect(IoniClientBase):
13
+ from .._base import _IoniClientBase
14
+
15
+ log = logging.getLogger(__name__)
16
+
17
+ _unsafe = namedtuple('http_response', ['status_code', 'href'])
18
+
19
+ __all__ = ['IoniConnect']
20
+
21
+
22
+ class IoniConnect(_IoniClientBase):
23
+
24
+ # Note: this retry-policy is specifically designed for the
25
+ # SQLite Error 5: 'database locked', which may take potentially
26
+ # minutes to resolve itself! Therefore, it is extra generous
27
+ # and backs off up to `3.0 * 2^4 = 48 sec` between retries for
28
+ # a total of ~1 1/2 minutes (plus database timeout). But, giving
29
+ # up on retrying here, would mean *losing all data* in the queue!
30
+ # ==>> We would rather crash on a `queue.full` exception! <<==
31
+ _retry_policy = urllib3.util.Retry(
32
+ # this configures policies on each cause for errors individually...
33
+ total=None, # max. retries (takes precedence). `None`: turned off
34
+ connect=0, read=0, redirect=0, # (all turned off, see docs for details)
35
+ other=0, # "other" errors include timeout (set to 27 seconds)
36
+ # configure the retries on specific status-codes...
37
+ status=5, # how many times to retry on bad status codes
38
+ raise_on_status=True, # `True`: do not return a 429 status code
39
+ status_forcelist=[429], # integer status-codes to retry on
40
+ allowed_methods=None, # `None`: retry on all (possibly not idempotent) verbs
41
+ # this configures backoff between retries...
42
+ backoff_factor=3.0, # back off *after* first try in seconds (x 2^n_retries)
43
+ respect_retry_after_header=False, # would override `backoff_factor`, turn off!
44
+ )
18
45
 
19
46
  @property
20
47
  def is_connected(self):
21
48
  '''Returns `True` if connection to IoniTOF could be established.'''
22
49
  try:
50
+ assert self.session is not None, "not connected"
23
51
  self.get("/api/status")
24
52
  return True
25
53
  except:
@@ -28,69 +56,223 @@ class IoniConnect(IoniClientBase):
28
56
  @property
29
57
  def is_running(self):
30
58
  '''Returns `True` if IoniTOF is currently acquiring data.'''
31
- raise NotImplementedError("is_running")
59
+ try:
60
+ assert self.session is not None, "not connected"
61
+ self.get_location("/api/measurements/current")
62
+ return True
63
+ except (AssertionError, requests.exceptions.HTTPError):
64
+ return False
32
65
 
33
- def connect(self, timeout_s):
34
- pass
66
+ def connect(self, timeout_s=10):
67
+ self.session = requests.sessions.Session()
68
+ self.session.mount('http://', self._http_adapter)
69
+ self.session.mount('https://', self._http_adapter)
70
+ started_at = time.monotonic()
71
+ while timeout_s is None or time.monotonic() < started_at + timeout_s:
72
+ try:
73
+ self.current_meas_loc = self.get_location("/api/measurements/current")
74
+ break
75
+ except requests.exceptions.HTTPError:
76
+ # OK, no measurement running..
77
+ self.current_meas_loc = ''
78
+ break
79
+ except Exception:
80
+ pass
81
+
82
+ time.sleep(10e-1)
83
+ else:
84
+ self.session = self.current_meas_loc = None
85
+ raise TimeoutError(f"no connection to '{self.url}'");
35
86
 
36
87
  def disconnect(self):
37
- pass
88
+ if self.session is not None:
89
+ del self.session
90
+ self.session = None
91
+ self.current_meas_loc = None
92
+
93
+ def start_measurement(self, path=None):
94
+ '''Start a new measurement and block until the change is confirmed.
95
+
96
+ If 'path' is not None, write to the given .h5 file.
97
+ '''
98
+ assert not self.is_running, "measurement already running @ " + str(self.current_meas_loc)
99
+
100
+ payload = {}
101
+ if path is not None:
102
+ assert os.path.isdir(path), "must point to a (recipe-)directory: " + str(path)
103
+ payload |= { "recipeDirectory": str(path) }
104
+
105
+ self.current_meas_loc = self.post("/api/measurements", payload)
106
+ self.put(self.current_meas_loc, { "isRunning": True })
38
107
 
39
- def __init__(self, host='127.0.0.1', port=5066, session=None):
108
+ return self.current_meas_loc
109
+
110
+ def stop_measurement(self, future_cycle=None):
111
+ '''Stop the current measurement and block until the change is confirmed.
112
+
113
+ If 'future_cycle' is not None and in the future, schedule the stop command.
114
+ '''
115
+ loc = self.current_meas_loc or self.get_location("/api/measurements/current")
116
+ self.patch(loc, { "isRunning": False })
117
+ self.current_meas_loc = ''
118
+
119
+ def __init__(self, host='127.0.0.1', port=5066):
40
120
  super().__init__(host, port)
41
121
  self.url = f"http://{self.host}:{self.port}"
42
- if session is None:
43
- session = requests.sessions.Session()
44
- self.session = session
45
- # ??
46
- self.current_avg_endpoint = None
47
- self.comp_dict = dict()
122
+ self._http_adapter = requests.adapters.HTTPAdapter(max_retries=self._retry_policy)
123
+ self.session = None
124
+ self.current_meas_loc = None
125
+ try:
126
+ self.connect(timeout_s=3.3)
127
+ except TimeoutError:
128
+ log.warning("no connection! make sure the DB-API is running and try again")
48
129
 
49
130
  def get(self, endpoint, **kwargs):
50
- return self._get_object(endpoint, **kwargs).json()
131
+ """Make a GET request to `endpoint` and parse JSON if applicable."""
132
+ try:
133
+ r = self._fetch_object(endpoint, 'get', **kwargs)
134
+ if 'json' in r.headers.get('content-type', ''):
135
+ return r.json()
136
+ if 'text' in r.headers.get('content-type', ''):
137
+ return r.text
138
+ else:
139
+ log.warning(f"unexpected 'content-type: {r.headers['content-type']}'")
140
+ log.info(f"did you mean to use `{type(self).__name__}.download(..)` instead?")
141
+ return r.content
142
+
143
+ except requests.exceptions.HTTPError as e:
144
+ if e.response.status_code == 410: # Gone
145
+ log.debug(f"nothing there at '{endpoint}' 0_o ?!")
146
+ return None
147
+ raise
148
+
149
+ def get_location(self, endpoint, **kwargs):
150
+ """Returns the actual location that `endpoint` points to (may be a redirect)."""
151
+ r = self._fetch_object(endpoint, 'get', **(kwargs | { "allow_redirects": False }))
152
+ return r.headers.get('Location', r.request.path_url)
51
153
 
52
154
  def post(self, endpoint, data, **kwargs):
53
- return self._create_object(endpoint, data, 'post', **kwargs).headers.get('Location')
155
+ """Append to the collection at `endpoint` the object defined by `data`."""
156
+ r = self._create_object(endpoint, data, 'post', **kwargs)
157
+ return _unsafe(r.status_code, r.headers.get('Location', '')) # no default location known!
54
158
 
55
159
  def put(self, endpoint, data, **kwargs):
56
- return self._create_object(endpoint, data, 'put', **kwargs).headers.get('Location')
160
+ """Replace the entire object at `endpoint` with `data`."""
161
+ r = self._create_object(endpoint, data, 'put', **kwargs)
162
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
163
+
164
+ def patch(self, endpoint, data, **kwargs):
165
+ """Change parts of the object at `endpoint` with fields in `data`."""
166
+ r = self._create_object(endpoint, data, 'patch', **kwargs)
167
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
168
+
169
+ def delete(self, endpoint, **kwargs):
170
+ """Attempt to delete the object at `endpoint`."""
171
+ r = self._fetch_object(endpoint, data, 'delete', **kwargs)
172
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
173
+
174
+ def link(self, parent_ep, child_ep, **kwargs):
175
+ """Make the object at `parent_e[nd]p[oint]` refer to `child_e[nd]p[oint]`"""
176
+ r = self._make_link(parent_ep, child_ep, sever=False, **kwargs)
177
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
178
+
179
+ def unlink(self, parent_ep, child_ep, **kwargs):
180
+ """Destroy the reference from `parent_e[nd]p[oint]` to `child_e[nd]p[oint]`"""
181
+ r = self._make_link(parent_ep, child_ep, sever=True, **kwargs)
182
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
57
183
 
58
184
  def upload(self, endpoint, filename):
185
+ """Upload the file at `filename` to `endpoint`."""
59
186
  if not endpoint.startswith('/'):
60
187
  endpoint = '/' + endpoint
61
- with open(filename) as f:
188
+ with open(filename, 'rb') as f:
62
189
  # Note (important!): this is a "form-data" entry, where the server
63
190
  # expects the "name" to be 'file' and rejects it otherwise:
64
191
  name = 'file'
65
- r = self.session.post(self.url + endpoint, files=[(name, (filename, f, ''))])
192
+ r = self._create_object(endpoint, None, 'post',
193
+ # Note: the requests library will set the content-type automatically
194
+ # and also add a randomly generated "boundary" to separate files:
195
+ #headers={'content-type': 'multipart/form-data'}, No!
196
+ files=[(name, (filename, f, ''))])
66
197
  r.raise_for_status()
67
198
 
68
- return r
199
+ return _unsafe(r.status_code, r.headers.get('Location', r.request.path_url))
200
+
201
+ def download(self, endpoint, out_file='.'):
202
+ """Download from `endpoint` into `out_file` (may be a directory).
203
+
204
+ Returns:
205
+ status_code, actual_filename
206
+ """
207
+ if not endpoint.startswith('/'):
208
+ endpoint = '/' + endpoint
69
209
 
70
- def _get_object(self, endpoint, **kwargs):
210
+ out_file = os.path.abspath(out_file)
211
+
212
+ content_type = 'application/octet-stream'
213
+ r = self._fetch_object(endpoint, 'get', stream=True, headers={'accept': content_type})
214
+ assert r.headers['content-type'] == content_type, "unexcepted content-type"
215
+
216
+ content_dispo = r.headers['content-disposition'].split('; ')
217
+ #['attachment',
218
+ # 'filename=2025_10_06__13_23_32.h5',
219
+ # "filename*=UTF-8''2025_10_06__13_23_32.h5"]
220
+ filename = next(
221
+ (dispo.split('=')[1] for dispo in content_dispo if dispo.startswith("filename="))
222
+ , None)
223
+ if os.path.isdir(out_file):
224
+ assert filename, "no out_file given and server didn't supply filename"
225
+ out_file = os.path.join(out_file, filename)
226
+
227
+ with open(out_file, mode='xb') as f:
228
+ # chunk_size must be of type int or None. A value of None will
229
+ # function differently depending on the value of `stream`.
230
+ # stream=True will read data as it arrives in whatever size the
231
+ # chunks are received. If stream=False, data is returned as
232
+ # a single chunk.
233
+ for chunk in r.iter_content(chunk_size=None):
234
+ f.write(chunk)
235
+ r.close()
236
+
237
+ return _unsafe(r.status_code, out_file)
238
+
239
+ def _fetch_object(self, endpoint, method='get', **kwargs):
71
240
  if not endpoint.startswith('/'):
72
241
  endpoint = '/' + endpoint
73
242
  if 'headers' not in kwargs:
74
- kwargs['headers'] = {'content-type': 'application/hal+json'}
75
- elif 'content-type' not in (k.lower() for k in kwargs['headers']):
76
- kwargs['headers'].update({'content-type': 'application/hal+json'})
243
+ kwargs['headers'] = {'accept': 'application/json'}
244
+ elif 'accept' not in (k.lower() for k in kwargs['headers']):
245
+ kwargs['headers'].update({'accept': 'application/json'})
246
+ if 'timeout' not in kwargs:
247
+ # https://requests.readthedocs.io/en/latest/user/advanced/#timeouts
248
+ kwargs['timeout'] = (6.06, 27)
249
+ r = self.session.request(method, self.url + endpoint, **kwargs)
250
+ r.raise_for_status()
251
+
252
+ return r
253
+
254
+ def _make_link(self, parent_href, child_href, *, sever=False, **kwargs):
255
+ verb = "LINK" if not sever else "UNLINK"
77
256
  if 'timeout' not in kwargs:
78
257
  # https://requests.readthedocs.io/en/latest/user/advanced/#timeouts
79
258
  kwargs['timeout'] = (6.06, 27)
80
- r = self.session.request('get', self.url + endpoint, **kwargs)
259
+ r = self.session.request(verb, self.url + parent_href,
260
+ headers={"location": child_href}, **kwargs)
81
261
  r.raise_for_status()
82
-
262
+
83
263
  return r
84
264
 
85
265
  def _create_object(self, endpoint, data, method='post', **kwargs):
86
266
  if not endpoint.startswith('/'):
87
267
  endpoint = '/' + endpoint
88
- if not isinstance(data, str):
89
- data = json.dumps(data, ensure_ascii=False) # default is `True`, escapes Umlaute!
90
- if 'headers' not in kwargs:
91
- kwargs['headers'] = {'content-type': 'application/hal+json'}
92
- elif 'content-type' not in (k.lower() for k in kwargs['headers']):
93
- kwargs['headers'].update({'content-type': 'application/hal+json'})
268
+ if data is not None:
269
+ if not isinstance(data, str):
270
+ # Note: default is `ensure_ascii=True`, but this escapes Umlaute!
271
+ data = json.dumps(data, ensure_ascii=False)
272
+ if 'headers' not in kwargs:
273
+ kwargs['headers'] = {'content-type': 'application/json'}
274
+ elif 'content-type' not in (k.lower() for k in kwargs['headers']):
275
+ kwargs['headers'].update({'content-type': 'application/json'})
94
276
  if 'timeout' not in kwargs:
95
277
  # https://requests.readthedocs.io/en/latest/user/advanced/#timeouts
96
278
  kwargs['timeout'] = (6.06, 27)
@@ -106,9 +288,10 @@ class IoniConnect(IoniClientBase):
106
288
  from pytrms.peaktable import Peak, PeakTable
107
289
  from operator import attrgetter
108
290
 
109
- # Note: a `Peak` is a hashable object that serves as a key that
110
- # distinguishes between peaks as defined by PyTRMS:
111
- make_key = lambda peak: Peak(center=peak['center'], label=peak['name'], shift=peak['shift'])
291
+ # Note: the DB-API distinguishes between peaks with
292
+ # different center *and* name, while the PyTRMS 'Peak'
293
+ # only distinguishes by center, so this is our key:
294
+ make_key = lambda p_info: (p_info['center'], p_info['name'])
112
295
 
113
296
  if isinstance(peaktable, str):
114
297
  log.info(f"loading peaktable '{peaktable}'...")
@@ -132,58 +315,74 @@ class IoniConnect(IoniClientBase):
132
315
  updates[make_key(payload)] = {'payload': payload}
133
316
 
134
317
  log.info(f"fetching current peaktable from the server...")
318
+ pt_server = self.get('/api/peaks')['_embedded']['peaks']
135
319
  # create a comparable collection of peaks already on the database by
136
320
  # reducing the keys in the response to what we actually want to update:
137
321
  db_peaks = {make_key(p): {
138
- 'payload': {k: p[k] for k in conv.keys()},
139
- 'self': p['_links']['self'],
140
- 'parent': p['_links'].get('parent'),
322
+ 'payload': {k: p[k] for k in conv.keys()},
323
+ 'self': p['_links']['self'],
324
+ 'parent': p['_links'].get('parent'),
141
325
  } for p in self.get('/api/peaks')['_embedded']['peaks']}
142
326
 
143
327
  to_update = updates.keys() & db_peaks.keys()
144
328
  to_upload = updates.keys() - db_peaks.keys()
145
- updated = 0
329
+ updated = up_to_date = 0
146
330
  for key in sorted(to_update):
147
331
  # check if an existing peak needs an update
148
332
  if db_peaks[key]['payload'] == updates[key]['payload']:
149
333
  # nothing to do..
150
334
  log.debug(f"up-to-date: {key}")
151
- continue
152
-
153
- self.put(db_peaks[key]['self']['href'], updates[key]['payload'])
154
- log.info(f"updated: {key}")
155
- updated += 1
335
+ up_to_date += 1
336
+ else:
337
+ self.put(db_peaks[key]['self']['href'], updates[key]['payload'])
338
+ log.info(f"updated: {key}")
339
+ updated += 1
156
340
 
157
341
  if len(to_upload):
158
342
  # Note: POSTing the embedded-collection is *miles faster*
159
343
  # than doing separate requests for each peak!
160
- payload = {'_embedded': {'peaks': [updates[key]['payload'] for key in sorted(to_upload)]}}
344
+ payload = {
345
+ '_embedded': {
346
+ 'peaks': [updates[key]['payload']
347
+ for key in sorted(to_upload)]
348
+ }
349
+ }
161
350
  self.post('/api/peaks', payload)
162
351
  for key in sorted(to_upload):
163
352
  log.info(f"added new: {key}")
353
+ # Note: we need the updated peaktable to learn about
354
+ # the href (id) assigned to newly added peaks:
355
+ pt_server = self.get('/api/peaks')['_embedded']['peaks']
356
+
357
+ log.info("repairing fitpeak~>nominal links...")
358
+ peak2href = {
359
+ Peak(p["center"], label=p["name"]): p["_links"]["self"]["href"]
360
+ for p in pt_server
361
+ }
362
+ to_link = set((peak2href[fitted], peak2href[fitted.parent])
363
+ for fitted in peaktable.fitted)
164
364
 
165
- if len(peaktable.fitted):
166
- # Note: until now, we disregarded the peak-parent-relationship, so
167
- # make another request to the updated peak-table from the server...
168
- db_peaks = {make_key(p): {
169
- 'payload': {k: p[k] for k in conv.keys()},
170
- 'self': p['_links']['self'],
171
- 'parent': p['_links'].get('parent'),
172
- } for p in self.get('/api/peaks')['_embedded']['peaks']}
365
+ is_link = set((child["_links"]["self"]["href"], child["_links"]["parent"]["href"])
366
+ for child in pt_server if "parent" in child["_links"])
367
+
368
+ for child_href, parent_href in is_link & to_link:
369
+ log.debug(f"keep link {parent_href} <~> {child_href}")
370
+ pass
371
+
372
+ for child_href, parent_href in to_link - is_link:
373
+ log.debug(f"make link {parent_href} ~>> {child_href}")
374
+ self.link(parent_href, child_href)
173
375
 
174
- for fitted in peaktable.fitted:
175
- fitted_href = db_peaks[fitted]["self"]["href"]
176
- parent_href = db_peaks[fitted.parent]["self"]["href"]
177
- r = self.session.request('link', self.url + parent_href, headers={"location": fitted_href})
178
- if not r.ok:
179
- log.error(f"LINK {parent_href} to Location: {fitted_href} failed\n\n[{r.status_code}]: {r.content}")
180
- r.raise_for_status()
181
- log.debug(f"linked parent {parent_href} ~> {fitted_href}")
376
+ for child_href, parent_href in is_link - to_link:
377
+ log.debug(f'break link {parent_href} ~x~ {child_href}')
378
+ self.unlink(parent_href, child_href)
182
379
 
183
380
  return {
184
381
  'added': len(to_upload),
185
382
  'updated': updated,
186
- 'up-to-date': len(to_update) - updated,
383
+ 'up-to-date': up_to_date,
384
+ 'linked': len(to_link - is_link),
385
+ 'unlinked': len(is_link - to_link),
187
386
  }
188
387
 
189
388
  def iter_events(self, event_re=r".*"):