hat-event 0.9.27__cp310.cp311.cp312.cp313-abi3-win_amd64.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (73) hide show
  1. hat/event/__init__.py +1 -0
  2. hat/event/adminer/__init__.py +18 -0
  3. hat/event/adminer/client.py +124 -0
  4. hat/event/adminer/common.py +27 -0
  5. hat/event/adminer/server.py +111 -0
  6. hat/event/backends/__init__.py +0 -0
  7. hat/event/backends/dummy.py +49 -0
  8. hat/event/backends/lmdb/__init__.py +9 -0
  9. hat/event/backends/lmdb/backend.py +319 -0
  10. hat/event/backends/lmdb/common.py +277 -0
  11. hat/event/backends/lmdb/conditions.py +102 -0
  12. hat/event/backends/lmdb/convert/__init__.py +0 -0
  13. hat/event/backends/lmdb/convert/__main__.py +8 -0
  14. hat/event/backends/lmdb/convert/convert_v06_to_v07.py +213 -0
  15. hat/event/backends/lmdb/convert/convert_v07_to_v09.py +175 -0
  16. hat/event/backends/lmdb/convert/main.py +88 -0
  17. hat/event/backends/lmdb/convert/v06.py +216 -0
  18. hat/event/backends/lmdb/convert/v07.py +508 -0
  19. hat/event/backends/lmdb/convert/v09.py +50 -0
  20. hat/event/backends/lmdb/convert/version.py +63 -0
  21. hat/event/backends/lmdb/environment.py +100 -0
  22. hat/event/backends/lmdb/latestdb.py +116 -0
  23. hat/event/backends/lmdb/manager/__init__.py +0 -0
  24. hat/event/backends/lmdb/manager/__main__.py +8 -0
  25. hat/event/backends/lmdb/manager/common.py +45 -0
  26. hat/event/backends/lmdb/manager/copy.py +92 -0
  27. hat/event/backends/lmdb/manager/main.py +34 -0
  28. hat/event/backends/lmdb/manager/query.py +215 -0
  29. hat/event/backends/lmdb/refdb.py +234 -0
  30. hat/event/backends/lmdb/systemdb.py +102 -0
  31. hat/event/backends/lmdb/timeseriesdb.py +486 -0
  32. hat/event/backends/memory.py +178 -0
  33. hat/event/common/__init__.py +144 -0
  34. hat/event/common/backend.py +91 -0
  35. hat/event/common/collection/__init__.py +8 -0
  36. hat/event/common/collection/common.py +28 -0
  37. hat/event/common/collection/list.py +19 -0
  38. hat/event/common/collection/tree.py +62 -0
  39. hat/event/common/common.py +176 -0
  40. hat/event/common/encoder.py +305 -0
  41. hat/event/common/json_schema_repo.json +1 -0
  42. hat/event/common/matches.py +44 -0
  43. hat/event/common/module.py +142 -0
  44. hat/event/common/sbs_repo.json +1 -0
  45. hat/event/common/subscription/__init__.py +22 -0
  46. hat/event/common/subscription/_csubscription.abi3.pyd +0 -0
  47. hat/event/common/subscription/common.py +145 -0
  48. hat/event/common/subscription/csubscription.py +47 -0
  49. hat/event/common/subscription/pysubscription.py +97 -0
  50. hat/event/component.py +284 -0
  51. hat/event/eventer/__init__.py +28 -0
  52. hat/event/eventer/client.py +260 -0
  53. hat/event/eventer/common.py +27 -0
  54. hat/event/eventer/server.py +286 -0
  55. hat/event/manager/__init__.py +0 -0
  56. hat/event/manager/__main__.py +8 -0
  57. hat/event/manager/common.py +48 -0
  58. hat/event/manager/main.py +387 -0
  59. hat/event/server/__init__.py +0 -0
  60. hat/event/server/__main__.py +8 -0
  61. hat/event/server/adminer_server.py +43 -0
  62. hat/event/server/engine.py +216 -0
  63. hat/event/server/engine_runner.py +127 -0
  64. hat/event/server/eventer_client.py +205 -0
  65. hat/event/server/eventer_client_runner.py +152 -0
  66. hat/event/server/eventer_server.py +119 -0
  67. hat/event/server/main.py +84 -0
  68. hat/event/server/main_runner.py +212 -0
  69. hat_event-0.9.27.dist-info/LICENSE +202 -0
  70. hat_event-0.9.27.dist-info/METADATA +108 -0
  71. hat_event-0.9.27.dist-info/RECORD +73 -0
  72. hat_event-0.9.27.dist-info/WHEEL +7 -0
  73. hat_event-0.9.27.dist-info/entry_points.txt +5 -0
hat/event/__init__.py ADDED
@@ -0,0 +1 @@
1
+ """Event Server and communication libraries"""
@@ -0,0 +1,18 @@
1
+ """Event adminer communication protocol"""
2
+
3
+ from hat.event.adminer.client import (AdminerError,
4
+ connect,
5
+ Client)
6
+ from hat.event.adminer.server import (GetLogConfCb,
7
+ SetLogConfCb,
8
+ listen,
9
+ Server)
10
+
11
+
12
+ __all__ = ['AdminerError',
13
+ 'connect',
14
+ 'Client',
15
+ 'GetLogConfCb',
16
+ 'SetLogConfCb',
17
+ 'listen',
18
+ 'Server']
@@ -0,0 +1,124 @@
1
+ import asyncio
2
+ import logging
3
+
4
+ from hat import aio
5
+ from hat import json
6
+ from hat.drivers import chatter
7
+ from hat.drivers import tcp
8
+
9
+ from hat.event.eventer import common
10
+
11
+
12
+ mlog: logging.Logger = logging.getLogger(__name__)
13
+ """Module logger"""
14
+
15
+
16
+ class AdminerError(Exception):
17
+ """Errors reported by Event Adminer Server"""
18
+
19
+
20
+ async def connect(addr: tcp.Address,
21
+ **kwargs
22
+ ) -> 'Client':
23
+ """Connect to Event Adminer Server
24
+
25
+ Additional arguments are passed to `hat.chatter.connect` coroutine.
26
+
27
+ """
28
+ client = Client()
29
+ client._loop = asyncio.get_running_loop()
30
+ client._conv_msg_type_futures = {}
31
+
32
+ client._conn = await chatter.connect(addr, **kwargs)
33
+
34
+ try:
35
+ client.async_group.spawn(client._receive_loop)
36
+
37
+ except BaseException:
38
+ await aio.uncancellable(client.async_close())
39
+ raise
40
+
41
+ return client
42
+
43
+
44
+ class Client(aio.Resource):
45
+ """Event adminer client
46
+
47
+ For creating new client see `connect` coroutine.
48
+
49
+ """
50
+
51
+ @property
52
+ def async_group(self) -> aio.Group:
53
+ """Async group"""
54
+ return self._conn.async_group
55
+
56
+ async def get_log_conf(self) -> json.Data:
57
+ """Get logging configuration"""
58
+ data = await self._send(
59
+ req_msg_type='HatEventAdminer.MsgGetLogConfReq',
60
+ req_msg_data=None,
61
+ res_msg_type='HatEventAdminer.MsgGetLogConfRes')
62
+
63
+ return json.decode(data)
64
+
65
+ async def set_log_conf(self, conf: json.Data):
66
+ """Set logging configuration"""
67
+ await self._send(req_msg_type='HatEventAdminer.MsgSetLogConfReq',
68
+ req_msg_data=json.encode(conf),
69
+ res_msg_type='HatEventAdminer.MsgSetLogConfRes')
70
+
71
+ async def _send(self, req_msg_type, req_msg_data, res_msg_type):
72
+ conv = await common.send_msg(
73
+ conn=self._conn,
74
+ msg_type=req_msg_type,
75
+ msg_data=req_msg_data,
76
+ last=False)
77
+
78
+ if not self.is_open:
79
+ raise ConnectionError()
80
+
81
+ future = self._loop.create_future()
82
+ self._conv_msg_type_futures[conv] = res_msg_type, future
83
+
84
+ try:
85
+ return await future
86
+
87
+ finally:
88
+ self._conv_msg_type_futures.pop(conv, None)
89
+
90
+ async def _receive_loop(self):
91
+ mlog.debug("starting receive loop")
92
+ try:
93
+ while True:
94
+ mlog.debug("waiting for incoming message")
95
+ msg, msg_type, msg_data = await common.receive_msg(self._conn)
96
+
97
+ mlog.debug(f"received message {msg_type}")
98
+
99
+ res_msg_type, future = self._conv_msg_type_futures.get(
100
+ msg.conv, (None, None))
101
+ if not future or future.done():
102
+ return
103
+
104
+ if res_msg_type != msg_type:
105
+ raise Exception('invalid response message type')
106
+
107
+ if msg_data[0] == 'error':
108
+ future.set_exception(AdminerError(msg_data[1]))
109
+
110
+ future.set_result(msg_data[1])
111
+
112
+ except ConnectionError:
113
+ pass
114
+
115
+ except Exception as e:
116
+ mlog.error("read loop error: %s", e, exc_info=e)
117
+
118
+ finally:
119
+ mlog.debug("stopping receive loop")
120
+ self.close()
121
+
122
+ for _, future in self._conv_msg_type_futures.values():
123
+ if not future.done():
124
+ future.set_exception(ConnectionError())
@@ -0,0 +1,27 @@
1
+ from hat.event.common import * # NOQA
2
+
3
+ import typing
4
+
5
+ from hat import sbs
6
+ from hat.drivers import chatter
7
+
8
+ from hat.event.common import sbs_repo
9
+
10
+
11
+ MsgType: typing.TypeAlias = str
12
+
13
+
14
+ async def send_msg(conn: chatter.Connection,
15
+ msg_type: MsgType,
16
+ msg_data: sbs.Data,
17
+ **kwargs
18
+ ) -> chatter.Conversation:
19
+ msg = sbs_repo.encode(msg_type, msg_data)
20
+ return await conn.send(chatter.Data(msg_type, msg), **kwargs)
21
+
22
+
23
+ async def receive_msg(conn: chatter.Connection
24
+ ) -> tuple[chatter.Msg, MsgType, sbs.Data]:
25
+ msg = await conn.receive()
26
+ msg_data = sbs_repo.decode(msg.data.type, msg.data.data)
27
+ return msg, msg.data.type, msg_data
@@ -0,0 +1,111 @@
1
+ import logging
2
+ import typing
3
+
4
+ from hat import aio
5
+ from hat import json
6
+ from hat.drivers import chatter
7
+ from hat.drivers import tcp
8
+
9
+ from hat.event.eventer import common
10
+
11
+
12
+ mlog: logging.Logger = logging.getLogger(__name__)
13
+ """Module logger"""
14
+
15
+ GetLogConfCb: typing.TypeAlias = aio.AsyncCallable[[None], json.Data]
16
+ """Get logging configuratio callback"""
17
+
18
+ SetLogConfCb: typing.TypeAlias = aio.AsyncCallable[[json.Data], None]
19
+ """Set logging configuratio callback"""
20
+
21
+
22
+ async def listen(addr: tcp.Address,
23
+ *,
24
+ get_log_conf_cb: GetLogConfCb | None = None,
25
+ set_log_conf_cb: SetLogConfCb | None = None,
26
+ **kwargs
27
+ ) -> 'Server':
28
+ """Create listening Event Adminer Server instance"""
29
+ server = Server()
30
+ server._get_log_conf_cb = get_log_conf_cb
31
+ server._set_log_conf_cb = set_log_conf_cb
32
+
33
+ server._srv = await chatter.listen(server._connection_loop, addr, **kwargs)
34
+ mlog.debug("listening on %s", addr)
35
+
36
+ return server
37
+
38
+
39
+ class Server(aio.Resource):
40
+
41
+ @property
42
+ def async_group(self) -> aio.Group:
43
+ """Async group"""
44
+ return self._srv.async_group
45
+
46
+ async def _connection_loop(self, conn):
47
+ mlog.debug("starting connection loop")
48
+ try:
49
+ while True:
50
+ mlog.debug("waiting for incomming messages")
51
+ msg, msg_type, msg_data = await common.receive_msg(conn)
52
+
53
+ mlog.debug(f"received message {msg_type}")
54
+
55
+ if msg_type == 'HatEventAdminer.MsgGetLogConfReq':
56
+ await self._process_msg_get_log_conf(
57
+ conn=conn,
58
+ conv=msg.conv,
59
+ req_msg_data=msg_data)
60
+
61
+ elif msg_type == 'HatEventAdminer.MsgSetLogConfReq':
62
+ await self._process_msg_set_log_conf(
63
+ conn=conn,
64
+ conv=msg.conv,
65
+ req_msg_data=msg_data)
66
+
67
+ else:
68
+ raise Exception('unsupported message type')
69
+
70
+ except ConnectionError:
71
+ pass
72
+
73
+ except Exception as e:
74
+ mlog.error("on connection error: %s", e, exc_info=e)
75
+
76
+ finally:
77
+ mlog.debug("stopping connection loop")
78
+ conn.close()
79
+
80
+ async def _process_msg_get_log_conf(self, conn, conv, req_msg_data):
81
+ try:
82
+ if not self._get_log_conf_cb:
83
+ raise Exception('not implemented')
84
+
85
+ result = await aio.call(self._get_log_conf_cb)
86
+
87
+ res_msg_data = 'success', json.encode(result)
88
+
89
+ except Exception as e:
90
+ res_msg_data = 'error', str(e)
91
+
92
+ await common.send_msg(
93
+ conn, 'HatEventAdminer.MsgGetLogConfRes', res_msg_data,
94
+ conv=conv)
95
+
96
+ async def _process_msg_set_log_conf(self, conn, conv, req_msg_data):
97
+ try:
98
+ if not self._set_log_conf_cb:
99
+ raise Exception('not implemented')
100
+
101
+ conf = json.decode(req_msg_data)
102
+ await aio.call(self._set_log_conf_cb, conf)
103
+
104
+ res_msg_data = 'success', None
105
+
106
+ except Exception as e:
107
+ res_msg_data = 'error', str(e)
108
+
109
+ await common.send_msg(
110
+ conn, 'HatEventAdminer.MsgSetLogConfRes', res_msg_data,
111
+ conv=conv)
File without changes
@@ -0,0 +1,49 @@
1
+ """Dummy backend
2
+
3
+ Simple backend which returns constant values where:
4
+
5
+ * `DummyBackend.get_last_event_id` returns session and instance ``0``
6
+ * `DummyBackend.register` returns input arguments
7
+ * `DummyBackend.query` returns ``QueryResult([], False)``
8
+
9
+ Registered and flushed events callback is called on every register.
10
+
11
+ """
12
+
13
+ from hat import aio
14
+
15
+ from hat.event import common
16
+
17
+
18
+ class DummyBackend(common.Backend):
19
+
20
+ def __init__(self, conf, registered_events_cb, flushed_events_cb):
21
+ self._registered_events_cbs = registered_events_cb
22
+ self._flushed_events_cbs = flushed_events_cb
23
+ self._async_group = aio.Group()
24
+
25
+ @property
26
+ def async_group(self):
27
+ return self._async_group
28
+
29
+ async def get_last_event_id(self, server_id):
30
+ return common.EventId(server_id, 0, 0)
31
+
32
+ async def register(self, events):
33
+ if self._registered_events_cbs:
34
+ await aio.call(self._registered_events_cbs, events)
35
+
36
+ if self._flushed_events_cbs:
37
+ await aio.call(self._flushed_events_cbs, events)
38
+
39
+ return events
40
+
41
+ async def query(self, params):
42
+ return common.QueryResult(events=[],
43
+ more_follows=False)
44
+
45
+ async def flush(self):
46
+ pass
47
+
48
+
49
+ info = common.BackendInfo(DummyBackend)
@@ -0,0 +1,9 @@
1
+ """LMDB backend"""
2
+
3
+ from hat.event.backends.lmdb import common
4
+ from hat.event.backends.lmdb.backend import create
5
+
6
+
7
+ info = common.BackendInfo(create=create,
8
+ json_schema_id='hat-event://backends/lmdb.yaml',
9
+ json_schema_repo=common.json_schema_repo)
@@ -0,0 +1,319 @@
1
+ from collections.abc import Collection
2
+ from pathlib import Path
3
+ import asyncio
4
+ import collections
5
+ import contextlib
6
+ import logging
7
+ import typing
8
+
9
+ from hat import aio
10
+ from hat import json
11
+
12
+ from hat.event.backends.lmdb import common
13
+ from hat.event.backends.lmdb import environment
14
+ from hat.event.backends.lmdb import latestdb
15
+ from hat.event.backends.lmdb import refdb
16
+ from hat.event.backends.lmdb import systemdb
17
+ from hat.event.backends.lmdb import timeseriesdb
18
+ from hat.event.backends.lmdb.conditions import Conditions
19
+
20
+
21
+ mlog = logging.getLogger(__name__)
22
+
23
+ cleanup_max_results = 1024
24
+
25
+ flush_queue_size = 4096
26
+
27
+ max_registered_count = 1024 * 256
28
+
29
+ version = '0.9'
30
+
31
+
32
+ class Databases(typing.NamedTuple):
33
+ system: systemdb.SystemDb
34
+ latest: latestdb.LatestDb
35
+ timeseries: timeseriesdb.TimeseriesDb
36
+ ref: refdb.RefDb
37
+
38
+
39
+ class Changes(typing.NamedTuple):
40
+ system: systemdb.Changes
41
+ latest: latestdb.Changes
42
+ timeseries: timeseriesdb.Changes
43
+ ref: refdb.Changes
44
+
45
+
46
+ async def create(conf: json.Data,
47
+ registered_events_cb: common.BackendRegisteredEventsCb | None,
48
+ flushed_events_cb: common.BackendFlushedEventsCb | None
49
+ ) -> 'LmdbBackend':
50
+ backend = LmdbBackend()
51
+ backend._registered_events_cb = registered_events_cb
52
+ backend._flushed_events_cb = flushed_events_cb
53
+ backend._conditions = Conditions(conf['conditions'])
54
+ backend._loop = asyncio.get_running_loop()
55
+ backend._flush_queue = aio.Queue(flush_queue_size)
56
+ backend._registered_count = 0
57
+ backend._registered_queue = collections.deque()
58
+ backend._async_group = aio.Group()
59
+
60
+ backend._env = await environment.create(Path(conf['db_path']))
61
+ backend.async_group.spawn(aio.call_on_done, backend._env.wait_closing(),
62
+ backend.close)
63
+
64
+ try:
65
+ latest_subscription = common.create_subscription(
66
+ tuple(i) for i in conf['latest']['subscriptions'])
67
+
68
+ timeseries_partitions = (
69
+ timeseriesdb.Partition(
70
+ order_by=common.OrderBy[i['order_by']],
71
+ subscription=common.create_subscription(
72
+ tuple(event_type) for event_type in i['subscriptions']),
73
+ limit=(
74
+ timeseriesdb.Limit(
75
+ min_entries=i['limit'].get('min_entries'),
76
+ max_entries=i['limit'].get('max_entries'),
77
+ duration=i['limit'].get('duration'),
78
+ size=i['limit'].get('size'))
79
+ if 'limit' in i else None))
80
+ for i in conf['timeseries'])
81
+
82
+ backend._dbs = await backend._env.execute(
83
+ _ext_create_dbs, backend._env, conf['identifier'],
84
+ backend._conditions, latest_subscription, timeseries_partitions)
85
+
86
+ backend.async_group.spawn(backend._flush_loop, conf['flush_period'])
87
+ backend.async_group.spawn(backend._cleanup_loop,
88
+ conf['cleanup_period'])
89
+
90
+ except BaseException:
91
+ await aio.uncancellable(backend._env.async_close())
92
+ raise
93
+
94
+ return backend
95
+
96
+
97
+ def _ext_create_dbs(env, identifier, conditions, latest_subscription,
98
+ timeseries_partitions):
99
+ with env.ext_begin(write=True) as txn:
100
+ system_db = systemdb.ext_create(env, txn, version, identifier)
101
+ latest_db = latestdb.ext_create(env, txn, conditions,
102
+ latest_subscription)
103
+ timeseries_db = timeseriesdb.ext_create(env, txn, conditions,
104
+ timeseries_partitions)
105
+ ref_db = refdb.RefDb(env)
106
+
107
+ return Databases(system=system_db,
108
+ latest=latest_db,
109
+ timeseries=timeseries_db,
110
+ ref=ref_db)
111
+
112
+
113
+ class LmdbBackend(common.Backend):
114
+
115
+ @property
116
+ def async_group(self) -> aio.Group:
117
+ return self._async_group
118
+
119
+ async def get_last_event_id(self,
120
+ server_id: int
121
+ ) -> common.EventId:
122
+ if not self.is_open:
123
+ raise common.BackendClosedError()
124
+
125
+ return self._dbs.system.get_last_event_id(server_id)
126
+
127
+ async def register(self,
128
+ events: Collection[common.Event]
129
+ ) -> Collection[common.Event] | None:
130
+ if not self.is_open:
131
+ raise common.BackendClosedError()
132
+
133
+ for event in events:
134
+ server_id = event.id.server
135
+
136
+ last_event_id = self._dbs.system.get_last_event_id(server_id)
137
+ last_timestamp = self._dbs.system.get_last_timestamp(server_id)
138
+
139
+ if last_event_id >= event.id:
140
+ mlog.warning("event registration skipped: invalid event id")
141
+ continue
142
+
143
+ if last_timestamp > event.timestamp:
144
+ mlog.warning("event registration skipped: invalid timestamp")
145
+ continue
146
+
147
+ if not self._conditions.matches(event):
148
+ mlog.warning("event registration skipped: invalid conditions")
149
+ continue
150
+
151
+ refs = collections.deque()
152
+
153
+ latest_result = self._dbs.latest.add(event)
154
+
155
+ if latest_result.added_ref:
156
+ refs.append(latest_result.added_ref)
157
+
158
+ if latest_result.removed_ref:
159
+ self._dbs.ref.remove(*latest_result.removed_ref)
160
+
161
+ refs.extend(self._dbs.timeseries.add(event))
162
+
163
+ if not refs:
164
+ continue
165
+
166
+ self._dbs.ref.add(event, refs)
167
+ self._dbs.system.set_last_event_id(event.id)
168
+ self._dbs.system.set_last_timestamp(server_id, event.timestamp)
169
+
170
+ self._registered_queue.append(events)
171
+ self._registered_count += len(events)
172
+
173
+ if self._registered_count > max_registered_count:
174
+ await self._flush_queue.put(None)
175
+
176
+ if self._registered_events_cb:
177
+ await aio.call(self._registered_events_cb, events)
178
+
179
+ return events
180
+
181
+ async def query(self,
182
+ params: common.QueryParams
183
+ ) -> common.QueryResult:
184
+ if not self.is_open:
185
+ raise common.BackendClosedError()
186
+
187
+ if isinstance(params, common.QueryLatestParams):
188
+ return self._dbs.latest.query(params)
189
+
190
+ if isinstance(params, common.QueryTimeseriesParams):
191
+ return await self._dbs.timeseries.query(params)
192
+
193
+ if isinstance(params, common.QueryServerParams):
194
+ return await self._dbs.ref.query(params)
195
+
196
+ raise ValueError('unsupported params type')
197
+
198
+ async def flush(self):
199
+ try:
200
+ future = self._loop.create_future()
201
+ await self._flush_queue.put(future)
202
+ await future
203
+
204
+ except aio.QueueClosedError:
205
+ raise common.BackendClosedError()
206
+
207
+ async def _flush_loop(self, flush_period):
208
+ futures = collections.deque()
209
+
210
+ async def cleanup():
211
+ with contextlib.suppress(Exception):
212
+ await self._flush()
213
+
214
+ await self._env.async_close()
215
+
216
+ try:
217
+ while True:
218
+ try:
219
+ future = await aio.wait_for(self._flush_queue.get(),
220
+ flush_period)
221
+ futures.append(future)
222
+
223
+ except asyncio.TimeoutError:
224
+ pass
225
+
226
+ except aio.CancelledWithResultError as e:
227
+ if e.result:
228
+ futures.append(e.result)
229
+
230
+ raise
231
+
232
+ while not self._flush_queue.empty():
233
+ futures.append(self._flush_queue.get_nowait())
234
+
235
+ await aio.uncancellable(self._flush())
236
+
237
+ while futures:
238
+ future = futures.popleft()
239
+ if future and not future.done():
240
+ future.set_result(None)
241
+
242
+ except Exception as e:
243
+ mlog.error('backend flush error: %s', e, exc_info=e)
244
+
245
+ finally:
246
+ self.close()
247
+ self._flush_queue.close()
248
+
249
+ while not self._flush_queue.empty():
250
+ futures.append(self._flush_queue.get_nowait())
251
+
252
+ for future in futures:
253
+ if future and not future.done():
254
+ future.set_exception(common.BackendClosedError())
255
+
256
+ await aio.uncancellable(cleanup())
257
+
258
+ async def _cleanup_loop(self, cleanup_period):
259
+ try:
260
+ while True:
261
+ await asyncio.sleep(0)
262
+
263
+ repeat = await self._env.execute(_ext_cleanup, self._env,
264
+ self._dbs, common.now())
265
+ if repeat:
266
+ continue
267
+
268
+ await asyncio.sleep(cleanup_period)
269
+
270
+ except Exception as e:
271
+ mlog.error('backend cleanup error: %s', e, exc_info=e)
272
+
273
+ finally:
274
+ self.close()
275
+
276
+ async def _flush(self):
277
+ if not self._env.is_open:
278
+ return
279
+
280
+ self._registered_count = 0
281
+ registered_queue, self._registered_queue = (self._registered_queue,
282
+ collections.deque())
283
+
284
+ changes = Changes(system=self._dbs.system.create_changes(),
285
+ latest=self._dbs.latest.create_changes(),
286
+ timeseries=self._dbs.timeseries.create_changes(),
287
+ ref=self._dbs.ref.create_changes())
288
+
289
+ # TODO lock period between create_changes and locking executor
290
+ # (timeseries and ref must write changes before new queries are
291
+ # allowed)
292
+
293
+ await self._env.execute(_ext_flush, self._env, self._dbs, changes)
294
+
295
+ if not self._flushed_events_cb:
296
+ return
297
+
298
+ while registered_queue:
299
+ events = registered_queue.popleft()
300
+ await aio.call(self._flushed_events_cb, events)
301
+
302
+
303
+ def _ext_flush(env, dbs, changes):
304
+ with env.ext_begin(write=True) as txn:
305
+ dbs.system.ext_write(txn, changes.system)
306
+ dbs.latest.ext_write(txn, changes.latest)
307
+ dbs.timeseries.ext_write(txn, changes.timeseries)
308
+ dbs.ref.ext_write(txn, changes.ref)
309
+
310
+
311
+ def _ext_cleanup(env, dbs, now):
312
+ with env.ext_begin(write=True) as txn:
313
+ result = dbs.timeseries.ext_cleanup(txn, now, cleanup_max_results)
314
+ if not result:
315
+ return False
316
+
317
+ dbs.ref.ext_cleanup(txn, result)
318
+
319
+ return len(result) >= cleanup_max_results