hat-event 0.9.27__cp310.cp311.cp312.cp313-abi3-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hat/event/__init__.py +1 -0
- hat/event/adminer/__init__.py +18 -0
- hat/event/adminer/client.py +124 -0
- hat/event/adminer/common.py +27 -0
- hat/event/adminer/server.py +111 -0
- hat/event/backends/__init__.py +0 -0
- hat/event/backends/dummy.py +49 -0
- hat/event/backends/lmdb/__init__.py +9 -0
- hat/event/backends/lmdb/backend.py +319 -0
- hat/event/backends/lmdb/common.py +277 -0
- hat/event/backends/lmdb/conditions.py +102 -0
- hat/event/backends/lmdb/convert/__init__.py +0 -0
- hat/event/backends/lmdb/convert/__main__.py +8 -0
- hat/event/backends/lmdb/convert/convert_v06_to_v07.py +213 -0
- hat/event/backends/lmdb/convert/convert_v07_to_v09.py +175 -0
- hat/event/backends/lmdb/convert/main.py +88 -0
- hat/event/backends/lmdb/convert/v06.py +216 -0
- hat/event/backends/lmdb/convert/v07.py +508 -0
- hat/event/backends/lmdb/convert/v09.py +50 -0
- hat/event/backends/lmdb/convert/version.py +63 -0
- hat/event/backends/lmdb/environment.py +100 -0
- hat/event/backends/lmdb/latestdb.py +116 -0
- hat/event/backends/lmdb/manager/__init__.py +0 -0
- hat/event/backends/lmdb/manager/__main__.py +8 -0
- hat/event/backends/lmdb/manager/common.py +45 -0
- hat/event/backends/lmdb/manager/copy.py +92 -0
- hat/event/backends/lmdb/manager/main.py +34 -0
- hat/event/backends/lmdb/manager/query.py +215 -0
- hat/event/backends/lmdb/refdb.py +234 -0
- hat/event/backends/lmdb/systemdb.py +102 -0
- hat/event/backends/lmdb/timeseriesdb.py +486 -0
- hat/event/backends/memory.py +178 -0
- hat/event/common/__init__.py +144 -0
- hat/event/common/backend.py +91 -0
- hat/event/common/collection/__init__.py +8 -0
- hat/event/common/collection/common.py +28 -0
- hat/event/common/collection/list.py +19 -0
- hat/event/common/collection/tree.py +62 -0
- hat/event/common/common.py +176 -0
- hat/event/common/encoder.py +305 -0
- hat/event/common/json_schema_repo.json +1 -0
- hat/event/common/matches.py +44 -0
- hat/event/common/module.py +142 -0
- hat/event/common/sbs_repo.json +1 -0
- hat/event/common/subscription/__init__.py +22 -0
- hat/event/common/subscription/_csubscription.abi3.pyd +0 -0
- hat/event/common/subscription/common.py +145 -0
- hat/event/common/subscription/csubscription.py +47 -0
- hat/event/common/subscription/pysubscription.py +97 -0
- hat/event/component.py +284 -0
- hat/event/eventer/__init__.py +28 -0
- hat/event/eventer/client.py +260 -0
- hat/event/eventer/common.py +27 -0
- hat/event/eventer/server.py +286 -0
- hat/event/manager/__init__.py +0 -0
- hat/event/manager/__main__.py +8 -0
- hat/event/manager/common.py +48 -0
- hat/event/manager/main.py +387 -0
- hat/event/server/__init__.py +0 -0
- hat/event/server/__main__.py +8 -0
- hat/event/server/adminer_server.py +43 -0
- hat/event/server/engine.py +216 -0
- hat/event/server/engine_runner.py +127 -0
- hat/event/server/eventer_client.py +205 -0
- hat/event/server/eventer_client_runner.py +152 -0
- hat/event/server/eventer_server.py +119 -0
- hat/event/server/main.py +84 -0
- hat/event/server/main_runner.py +212 -0
- hat_event-0.9.27.dist-info/LICENSE +202 -0
- hat_event-0.9.27.dist-info/METADATA +108 -0
- hat_event-0.9.27.dist-info/RECORD +73 -0
- hat_event-0.9.27.dist-info/WHEEL +7 -0
- hat_event-0.9.27.dist-info/entry_points.txt +5 -0
|
@@ -0,0 +1,277 @@
|
|
|
1
|
+
from hat.event.common import * # NOQA
|
|
2
|
+
|
|
3
|
+
from pathlib import Path
|
|
4
|
+
import enum
|
|
5
|
+
import itertools
|
|
6
|
+
import platform
|
|
7
|
+
import struct
|
|
8
|
+
import sys
|
|
9
|
+
import typing
|
|
10
|
+
|
|
11
|
+
import lmdb
|
|
12
|
+
|
|
13
|
+
from hat import json
|
|
14
|
+
from hat import util
|
|
15
|
+
|
|
16
|
+
from hat.event.common import (ServerId, Event, EventId, EventType,
|
|
17
|
+
Timestamp, EventPayloadBinary,
|
|
18
|
+
sbs_repo, event_to_sbs, event_from_sbs)
|
|
19
|
+
|
|
20
|
+
|
|
21
|
+
default_max_size: int = (1024 * 1024 * 1024 * 1024
|
|
22
|
+
if platform.architecture()[0] == '64bit'
|
|
23
|
+
else 2 * 1024 * 1024 * 1024)
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
DbKey = typing.TypeVar('DbKey')
|
|
27
|
+
DbValue = typing.TypeVar('DbValue')
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class DbType(enum.Enum):
|
|
31
|
+
SYSTEM_SETTINGS = 0
|
|
32
|
+
SYSTEM_LAST_EVENT_ID = 1
|
|
33
|
+
SYSTEM_LAST_TIMESTAMP = 2
|
|
34
|
+
REF = 3
|
|
35
|
+
LATEST_DATA = 4
|
|
36
|
+
LATEST_TYPE = 5
|
|
37
|
+
TIMESERIES_DATA = 6
|
|
38
|
+
TIMESERIES_PARTITION = 7
|
|
39
|
+
TIMESERIES_COUNT = 8
|
|
40
|
+
|
|
41
|
+
|
|
42
|
+
if sys.version_info[:2] >= (3, 11):
|
|
43
|
+
|
|
44
|
+
class DbDef(typing.NamedTuple, typing.Generic[DbKey, DbValue]):
|
|
45
|
+
encode_key: typing.Callable[[DbKey], util.Bytes]
|
|
46
|
+
decode_key: typing.Callable[[util.Bytes], DbKey]
|
|
47
|
+
encode_value: typing.Callable[[DbValue], util.Bytes]
|
|
48
|
+
decode_value: typing.Callable[[util.Bytes], DbValue]
|
|
49
|
+
|
|
50
|
+
def create_db_def(key_type: typing.Type,
|
|
51
|
+
value_type: typing.Type
|
|
52
|
+
) -> typing.Type[DbDef]:
|
|
53
|
+
return DbDef[key_type, value_type]
|
|
54
|
+
|
|
55
|
+
else:
|
|
56
|
+
|
|
57
|
+
class DbDef(typing.NamedTuple):
|
|
58
|
+
encode_key: typing.Callable[[DbKey], util.Bytes]
|
|
59
|
+
decode_key: typing.Callable[[util.Bytes], DbKey]
|
|
60
|
+
encode_value: typing.Callable[[DbValue], util.Bytes]
|
|
61
|
+
decode_value: typing.Callable[[util.Bytes], DbValue]
|
|
62
|
+
|
|
63
|
+
def create_db_def(key_type: typing.Type,
|
|
64
|
+
value_type: typing.Type
|
|
65
|
+
) -> typing.Type[DbDef]:
|
|
66
|
+
return DbDef
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
EventTypeRef: typing.TypeAlias = int
|
|
70
|
+
PartitionId: typing.TypeAlias = int
|
|
71
|
+
TimeseriesKey: typing.TypeAlias = tuple[PartitionId, Timestamp, EventId]
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
class SettingsId(enum.Enum):
|
|
75
|
+
VERSION = 0
|
|
76
|
+
IDENTIFIER = 1
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class LatestEventRef(typing.NamedTuple):
|
|
80
|
+
key: EventTypeRef
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class TimeseriesEventRef(typing.NamedTuple):
|
|
84
|
+
key: TimeseriesKey
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
EventRef: typing.TypeAlias = LatestEventRef | TimeseriesEventRef
|
|
88
|
+
|
|
89
|
+
|
|
90
|
+
db_defs = {
|
|
91
|
+
DbType.SYSTEM_SETTINGS: create_db_def(SettingsId, json.Data)(
|
|
92
|
+
encode_key=lambda key: _encode_uint(key.value),
|
|
93
|
+
decode_key=lambda key_bytes: SettingsId(_decode_uint(key_bytes)),
|
|
94
|
+
encode_value=lambda value: _encode_json(value),
|
|
95
|
+
decode_value=lambda value_bytes: _decode_json(value_bytes)),
|
|
96
|
+
|
|
97
|
+
DbType.SYSTEM_LAST_EVENT_ID: create_db_def(ServerId, EventId)(
|
|
98
|
+
encode_key=lambda key: _encode_uint(key),
|
|
99
|
+
decode_key=lambda key_bytes: _decode_uint(key_bytes),
|
|
100
|
+
encode_value=lambda value: _encode_event_id(value),
|
|
101
|
+
decode_value=lambda value_bytes: _decode_event_id(value_bytes)),
|
|
102
|
+
|
|
103
|
+
DbType.SYSTEM_LAST_TIMESTAMP: create_db_def(ServerId, Timestamp)(
|
|
104
|
+
encode_key=lambda key: _encode_uint(key),
|
|
105
|
+
decode_key=lambda key_bytes: _decode_uint(key_bytes),
|
|
106
|
+
encode_value=lambda value: _encode_timestamp(value),
|
|
107
|
+
decode_value=lambda value_bytes: _decode_timestamp(value_bytes)),
|
|
108
|
+
|
|
109
|
+
DbType.REF: create_db_def(EventId, set[EventRef])(
|
|
110
|
+
encode_key=lambda key: _encode_event_id(key),
|
|
111
|
+
decode_key=lambda key_bytes: _decode_event_id(key_bytes),
|
|
112
|
+
encode_value=lambda value: bytes(_encode_event_refs(value)),
|
|
113
|
+
decode_value=lambda value_bytes: set(_decode_event_refs(value_bytes))),
|
|
114
|
+
|
|
115
|
+
DbType.LATEST_DATA: create_db_def(EventTypeRef, Event)(
|
|
116
|
+
encode_key=lambda key: _encode_uint(key),
|
|
117
|
+
decode_key=lambda key_bytes: _decode_uint(key_bytes),
|
|
118
|
+
encode_value=lambda value: _encode_event(value),
|
|
119
|
+
decode_value=lambda value_bytes: _decode_event(value_bytes)),
|
|
120
|
+
|
|
121
|
+
DbType.LATEST_TYPE: create_db_def(EventTypeRef, EventType)(
|
|
122
|
+
encode_key=lambda key: _encode_uint(key),
|
|
123
|
+
decode_key=lambda key_bytes: _decode_uint(key_bytes),
|
|
124
|
+
encode_value=lambda value: _encode_json(list(value)),
|
|
125
|
+
decode_value=lambda value_bytes: tuple(_decode_json(value_bytes))),
|
|
126
|
+
|
|
127
|
+
DbType.TIMESERIES_DATA: create_db_def(TimeseriesKey, Event)(
|
|
128
|
+
encode_key=lambda key: _encode_timeseries_key(key),
|
|
129
|
+
decode_key=lambda key_bytes: _decode_timeseries_key(key_bytes),
|
|
130
|
+
encode_value=lambda value: _encode_event(value),
|
|
131
|
+
decode_value=lambda value_bytes: _decode_event(value_bytes)),
|
|
132
|
+
|
|
133
|
+
DbType.TIMESERIES_PARTITION: create_db_def(PartitionId, json.Data)(
|
|
134
|
+
encode_key=lambda key: _encode_uint(key),
|
|
135
|
+
decode_key=lambda key_bytes: _decode_uint(key_bytes),
|
|
136
|
+
encode_value=lambda value: _encode_json(value),
|
|
137
|
+
decode_value=lambda value_bytes: _decode_json(value_bytes)),
|
|
138
|
+
|
|
139
|
+
DbType.TIMESERIES_COUNT: create_db_def(PartitionId, int)(
|
|
140
|
+
encode_key=lambda key: _encode_uint(key),
|
|
141
|
+
decode_key=lambda key_bytes: _decode_uint(key_bytes),
|
|
142
|
+
encode_value=lambda value: _encode_uint(value),
|
|
143
|
+
decode_value=lambda value_bytes: _decode_uint(value_bytes))}
|
|
144
|
+
|
|
145
|
+
|
|
146
|
+
def ext_create_env(path: Path,
|
|
147
|
+
max_size: int = default_max_size,
|
|
148
|
+
readonly: bool = False
|
|
149
|
+
) -> lmdb.Environment:
|
|
150
|
+
return lmdb.Environment(str(path),
|
|
151
|
+
map_size=max_size,
|
|
152
|
+
subdir=False,
|
|
153
|
+
max_dbs=len(DbType),
|
|
154
|
+
readonly=readonly)
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def ext_open_db(env: lmdb.Environment,
|
|
158
|
+
db_type: DbType,
|
|
159
|
+
create: bool = True
|
|
160
|
+
) -> lmdb._Database:
|
|
161
|
+
return env.open_db(db_type.name.encode('utf-8'), create=create)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _encode_uint(value):
|
|
165
|
+
return struct.pack(">Q", value)
|
|
166
|
+
|
|
167
|
+
|
|
168
|
+
def _decode_uint(value_bytes):
|
|
169
|
+
return struct.unpack(">Q", value_bytes)[0]
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def _encode_event_id(event_id):
|
|
173
|
+
return struct.pack(">QQQ", event_id.server, event_id.session,
|
|
174
|
+
event_id.instance)
|
|
175
|
+
|
|
176
|
+
|
|
177
|
+
def _decode_event_id(event_id_bytes):
|
|
178
|
+
server_id, session_id, instance_id = struct.unpack(">QQQ", event_id_bytes)
|
|
179
|
+
return EventId(server=server_id,
|
|
180
|
+
session=session_id,
|
|
181
|
+
instance=instance_id)
|
|
182
|
+
|
|
183
|
+
|
|
184
|
+
def _encode_timestamp(timestamp):
|
|
185
|
+
return struct.pack(">QI", timestamp.s + (1 << 63), timestamp.us)
|
|
186
|
+
|
|
187
|
+
|
|
188
|
+
def _decode_timestamp(timestamp_bytes):
|
|
189
|
+
s, us = struct.unpack(">QI", timestamp_bytes)
|
|
190
|
+
return Timestamp(s - (1 << 63), us)
|
|
191
|
+
|
|
192
|
+
|
|
193
|
+
def _encode_event_ref(ref):
|
|
194
|
+
if isinstance(ref, LatestEventRef):
|
|
195
|
+
db_type = DbType.LATEST_DATA
|
|
196
|
+
|
|
197
|
+
elif isinstance(ref, TimeseriesEventRef):
|
|
198
|
+
db_type = DbType.TIMESERIES_DATA
|
|
199
|
+
|
|
200
|
+
else:
|
|
201
|
+
raise ValueError('unsupported event reference')
|
|
202
|
+
|
|
203
|
+
return bytes([db_type.value]) + db_defs[db_type].encode_key(ref.key)
|
|
204
|
+
|
|
205
|
+
|
|
206
|
+
def _decode_event_ref(ref_bytes):
|
|
207
|
+
db_type = DbType(ref_bytes[0])
|
|
208
|
+
key = db_defs[db_type].decode_key(ref_bytes[1:])
|
|
209
|
+
|
|
210
|
+
if db_type == DbType.LATEST_DATA:
|
|
211
|
+
return LatestEventRef(key)
|
|
212
|
+
|
|
213
|
+
if db_type == DbType.TIMESERIES_DATA:
|
|
214
|
+
return TimeseriesEventRef(key)
|
|
215
|
+
|
|
216
|
+
raise ValueError('unsupported database type')
|
|
217
|
+
|
|
218
|
+
|
|
219
|
+
def _encode_event_refs(refs):
|
|
220
|
+
return bytes(itertools.chain.from_iterable(_encode_event_ref(ref)
|
|
221
|
+
for ref in refs))
|
|
222
|
+
|
|
223
|
+
|
|
224
|
+
def _decode_event_refs(refs_bytes):
|
|
225
|
+
while refs_bytes:
|
|
226
|
+
db_type = DbType(refs_bytes[0])
|
|
227
|
+
|
|
228
|
+
if db_type == DbType.LATEST_DATA:
|
|
229
|
+
ref_key_len = 8
|
|
230
|
+
|
|
231
|
+
elif db_type == DbType.TIMESERIES_DATA:
|
|
232
|
+
ref_key_len = 44
|
|
233
|
+
|
|
234
|
+
else:
|
|
235
|
+
raise ValueError('unsupported event reference')
|
|
236
|
+
|
|
237
|
+
ref, refs_bytes = (_decode_event_ref(refs_bytes[:ref_key_len+1]),
|
|
238
|
+
refs_bytes[ref_key_len+1:])
|
|
239
|
+
yield ref
|
|
240
|
+
|
|
241
|
+
|
|
242
|
+
def _encode_timeseries_key(key):
|
|
243
|
+
partition_id, timestamp, event_id = key
|
|
244
|
+
return bytes(itertools.chain(_encode_uint(partition_id),
|
|
245
|
+
_encode_timestamp(timestamp),
|
|
246
|
+
_encode_event_id(event_id)))
|
|
247
|
+
|
|
248
|
+
|
|
249
|
+
def _decode_timeseries_key(key_bytes):
|
|
250
|
+
partition_id = _decode_uint(key_bytes[:8])
|
|
251
|
+
timestamp = _decode_timestamp(key_bytes[8:20])
|
|
252
|
+
event_id = _decode_event_id(key_bytes[20:])
|
|
253
|
+
return partition_id, timestamp, event_id
|
|
254
|
+
|
|
255
|
+
|
|
256
|
+
def _encode_event(event):
|
|
257
|
+
event_sbs = event_to_sbs(event)
|
|
258
|
+
return sbs_repo.encode('HatEventer.Event', event_sbs)
|
|
259
|
+
|
|
260
|
+
|
|
261
|
+
def _decode_event(event_bytes):
|
|
262
|
+
event_sbs = sbs_repo.decode('HatEventer.Event', event_bytes)
|
|
263
|
+
event = event_from_sbs(event_sbs)
|
|
264
|
+
|
|
265
|
+
if isinstance(event.payload, EventPayloadBinary):
|
|
266
|
+
event = event._replace(
|
|
267
|
+
payload=event.payload._replace(data=bytes(event.payload.data)))
|
|
268
|
+
|
|
269
|
+
return event
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
def _encode_json(data):
|
|
273
|
+
return json.encode(data).encode('utf-8')
|
|
274
|
+
|
|
275
|
+
|
|
276
|
+
def _decode_json(data_bytes):
|
|
277
|
+
return json.decode(str(data_bytes, encoding='utf-8'))
|
|
@@ -0,0 +1,102 @@
|
|
|
1
|
+
from hat import json
|
|
2
|
+
|
|
3
|
+
from hat.event.backends.lmdb import common
|
|
4
|
+
|
|
5
|
+
|
|
6
|
+
class Conditions:
|
|
7
|
+
|
|
8
|
+
def __init__(self, conf: json.Data):
|
|
9
|
+
self._conditions = [
|
|
10
|
+
(common.create_subscription(tuple(event_type)
|
|
11
|
+
for event_type in i['subscriptions']),
|
|
12
|
+
_create_condition(i['condition']))
|
|
13
|
+
for i in conf]
|
|
14
|
+
|
|
15
|
+
def matches(self, event: common.Event) -> bool:
|
|
16
|
+
for subscription, condition in self._conditions:
|
|
17
|
+
if not subscription.matches(event.type):
|
|
18
|
+
continue
|
|
19
|
+
|
|
20
|
+
if not condition.matches(event):
|
|
21
|
+
return False
|
|
22
|
+
|
|
23
|
+
return True
|
|
24
|
+
|
|
25
|
+
|
|
26
|
+
def _create_condition(conf):
|
|
27
|
+
if conf['type'] == 'all':
|
|
28
|
+
return _AllCondition(conf)
|
|
29
|
+
|
|
30
|
+
if conf['type'] == 'any':
|
|
31
|
+
return _AnyCondition(conf)
|
|
32
|
+
|
|
33
|
+
if conf['type'] == 'json':
|
|
34
|
+
return _JsonCondition(conf)
|
|
35
|
+
|
|
36
|
+
raise ValueError('unsupported condition type')
|
|
37
|
+
|
|
38
|
+
|
|
39
|
+
class _AllCondition:
|
|
40
|
+
|
|
41
|
+
def __init__(self, conf):
|
|
42
|
+
self._conditions = [_create_condition(i) for i in conf['conditions']]
|
|
43
|
+
|
|
44
|
+
def matches(self, event):
|
|
45
|
+
return all(condition.matches(event) for condition in self._conditions)
|
|
46
|
+
|
|
47
|
+
|
|
48
|
+
class _AnyCondition:
|
|
49
|
+
|
|
50
|
+
def __init__(self, conf):
|
|
51
|
+
self._conditions = [_create_condition(i) for i in conf['conditions']]
|
|
52
|
+
|
|
53
|
+
def matches(self, event):
|
|
54
|
+
return any(condition.matches(event) for condition in self._conditions)
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
class _JsonCondition:
|
|
58
|
+
|
|
59
|
+
def __init__(self, conf):
|
|
60
|
+
self._conf = conf
|
|
61
|
+
|
|
62
|
+
def matches(self, event):
|
|
63
|
+
if not isinstance(event.payload, common.EventPayloadJson):
|
|
64
|
+
return False
|
|
65
|
+
|
|
66
|
+
data_path = self._conf.get('data_path', [])
|
|
67
|
+
data = json.get(event.payload.data, data_path)
|
|
68
|
+
|
|
69
|
+
if 'data_type' in self._conf:
|
|
70
|
+
data_type = self._conf['data_type']
|
|
71
|
+
|
|
72
|
+
if data_type == 'null':
|
|
73
|
+
if data is not None:
|
|
74
|
+
return False
|
|
75
|
+
|
|
76
|
+
elif data_type == 'boolean':
|
|
77
|
+
if not isinstance(data, bool):
|
|
78
|
+
return False
|
|
79
|
+
|
|
80
|
+
elif data_type == 'string':
|
|
81
|
+
if not isinstance(data, str):
|
|
82
|
+
return False
|
|
83
|
+
|
|
84
|
+
elif data_type == 'number':
|
|
85
|
+
if not (isinstance(data, float) or
|
|
86
|
+
(isinstance(data, int) and
|
|
87
|
+
not isinstance(data, bool))):
|
|
88
|
+
return False
|
|
89
|
+
|
|
90
|
+
elif data_type == 'array':
|
|
91
|
+
if not isinstance(data, list):
|
|
92
|
+
return False
|
|
93
|
+
|
|
94
|
+
elif data_type == 'object':
|
|
95
|
+
if not isinstance(data, dict):
|
|
96
|
+
return False
|
|
97
|
+
|
|
98
|
+
if 'data_value' in self._conf:
|
|
99
|
+
if self._conf['data_value'] != data:
|
|
100
|
+
return False
|
|
101
|
+
|
|
102
|
+
return True
|
|
File without changes
|
|
@@ -0,0 +1,213 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import itertools
|
|
3
|
+
|
|
4
|
+
from hat.event.backends.lmdb.convert import v06
|
|
5
|
+
from hat.event.backends.lmdb.convert import v07
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
def convert(src_path: Path,
|
|
9
|
+
dst_path: Path):
|
|
10
|
+
with v06.create_env(src_path) as src_env:
|
|
11
|
+
src_system_db = src_env.open_db(b'system')
|
|
12
|
+
server_id = _get_server_id(src_env, src_system_db)
|
|
13
|
+
|
|
14
|
+
if server_id < 1:
|
|
15
|
+
server_id = 1
|
|
16
|
+
|
|
17
|
+
with v07.create_env(dst_path) as dst_env:
|
|
18
|
+
dst_system_db = v07.open_db(dst_env, v07.DbType.SYSTEM)
|
|
19
|
+
dst_ref_db = v07.open_db(dst_env, v07.DbType.REF)
|
|
20
|
+
|
|
21
|
+
_convert_latest(src_env=src_env,
|
|
22
|
+
dst_env=dst_env,
|
|
23
|
+
dst_system_db=dst_system_db,
|
|
24
|
+
dst_ref_db=dst_ref_db,
|
|
25
|
+
server_id=server_id)
|
|
26
|
+
|
|
27
|
+
_convert_ordered(src_env=src_env,
|
|
28
|
+
dst_env=dst_env,
|
|
29
|
+
dst_system_db=dst_system_db,
|
|
30
|
+
dst_ref_db=dst_ref_db,
|
|
31
|
+
server_id=server_id)
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
def _convert_latest(src_env, dst_env, dst_system_db, dst_ref_db, server_id):
|
|
35
|
+
src_latest_db = src_env.open_db(b'latest')
|
|
36
|
+
|
|
37
|
+
dst_latest_data_db = v07.open_db(dst_env, v07.DbType.LATEST_DATA)
|
|
38
|
+
dst_latest_type_db = v07.open_db(dst_env, v07.DbType.LATEST_TYPE)
|
|
39
|
+
|
|
40
|
+
latest_event = None
|
|
41
|
+
next_event_type_ref = itertools.count(1)
|
|
42
|
+
|
|
43
|
+
with src_env.begin(db=src_latest_db, buffers=True) as src_txn:
|
|
44
|
+
for _, src_encoded_value in src_txn.cursor():
|
|
45
|
+
src_event = v06.decode_event(src_encoded_value)
|
|
46
|
+
dst_event = _convert_event(src_event, server_id)
|
|
47
|
+
|
|
48
|
+
if not latest_event or latest_event.event_id < dst_event.event_id:
|
|
49
|
+
latest_event = dst_event
|
|
50
|
+
|
|
51
|
+
event_type_ref = next(next_event_type_ref)
|
|
52
|
+
|
|
53
|
+
with dst_env.begin(db=dst_latest_data_db, write=True) as dst_txn:
|
|
54
|
+
dst_txn.put(
|
|
55
|
+
v07.encode_latest_data_db_key(
|
|
56
|
+
event_type_ref),
|
|
57
|
+
v07.encode_latest_data_db_value(
|
|
58
|
+
dst_event))
|
|
59
|
+
|
|
60
|
+
with dst_env.begin(db=dst_latest_type_db, write=True) as dst_txn:
|
|
61
|
+
dst_txn.put(
|
|
62
|
+
v07.encode_latest_type_db_key(
|
|
63
|
+
event_type_ref),
|
|
64
|
+
v07.encode_latest_type_db_value(
|
|
65
|
+
dst_event.event_type))
|
|
66
|
+
|
|
67
|
+
_update_ref_db(dst_env=dst_env,
|
|
68
|
+
dst_ref_db=dst_ref_db,
|
|
69
|
+
dst_event=dst_event,
|
|
70
|
+
event_ref=v07.LatestEventRef(event_type_ref))
|
|
71
|
+
|
|
72
|
+
if latest_event:
|
|
73
|
+
_update_system_db(dst_env=dst_env,
|
|
74
|
+
dst_system_db=dst_system_db,
|
|
75
|
+
server_id=server_id,
|
|
76
|
+
latest_event=latest_event)
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
def _convert_ordered(src_env, dst_env, dst_system_db, dst_ref_db, server_id):
|
|
80
|
+
src_ordered_data_db = src_env.open_db(b'ordered_data')
|
|
81
|
+
src_ordered_partition_db = src_env.open_db(b'ordered_partition')
|
|
82
|
+
src_ordered_count_db = src_env.open_db(b'ordered_count')
|
|
83
|
+
|
|
84
|
+
dst_ordered_data_db = v07.open_db(dst_env, v07.DbType.ORDERED_DATA)
|
|
85
|
+
dst_ordered_partition_db = v07.open_db(
|
|
86
|
+
dst_env, v07.DbType.ORDERED_PARTITION)
|
|
87
|
+
dst_ordered_count_db = v07.open_db(dst_env, v07.DbType.ORDERED_COUNT)
|
|
88
|
+
|
|
89
|
+
latest_event = None
|
|
90
|
+
|
|
91
|
+
with src_env.begin(db=src_ordered_data_db, buffers=True) as src_txn:
|
|
92
|
+
for src_encoded_key, src_encoded_value in src_txn.cursor():
|
|
93
|
+
partition_id, src_timestamp, _ = v06.decode_uint_timestamp_uint(
|
|
94
|
+
src_encoded_key)
|
|
95
|
+
src_event = v06.decode_event(src_encoded_value)
|
|
96
|
+
dst_timestamp = _convert_timestamp(src_timestamp)
|
|
97
|
+
dst_event = _convert_event(src_event, server_id)
|
|
98
|
+
|
|
99
|
+
if not latest_event or latest_event.event_id < dst_event.event_id:
|
|
100
|
+
latest_event = dst_event
|
|
101
|
+
|
|
102
|
+
dst_key = partition_id, dst_timestamp, dst_event.event_id
|
|
103
|
+
|
|
104
|
+
with dst_env.begin(db=dst_ordered_data_db, write=True) as dst_txn:
|
|
105
|
+
dst_txn.put(
|
|
106
|
+
v07.encode_ordered_data_db_key(
|
|
107
|
+
dst_key),
|
|
108
|
+
v07.encode_ordered_data_db_value(
|
|
109
|
+
dst_event))
|
|
110
|
+
|
|
111
|
+
_update_ref_db(dst_env=dst_env,
|
|
112
|
+
dst_ref_db=dst_ref_db,
|
|
113
|
+
dst_event=dst_event,
|
|
114
|
+
event_ref=v07.OrderedEventRef(dst_key))
|
|
115
|
+
|
|
116
|
+
if latest_event:
|
|
117
|
+
_update_system_db(dst_env=dst_env,
|
|
118
|
+
dst_system_db=dst_system_db,
|
|
119
|
+
server_id=server_id,
|
|
120
|
+
latest_event=latest_event)
|
|
121
|
+
|
|
122
|
+
with src_env.begin(db=src_ordered_partition_db, buffers=True) as src_txn:
|
|
123
|
+
for src_encoded_key, src_encoded_value in src_txn.cursor():
|
|
124
|
+
partition_id = v06.decode_uint(src_encoded_key)
|
|
125
|
+
partition_data = v06.decode_json(src_encoded_value)
|
|
126
|
+
|
|
127
|
+
with dst_env.begin(db=dst_ordered_partition_db,
|
|
128
|
+
write=True) as dst_txn:
|
|
129
|
+
dst_txn.put(
|
|
130
|
+
v07.encode_ordered_partition_db_key(
|
|
131
|
+
partition_id),
|
|
132
|
+
v07.encode_ordered_partition_db_value(
|
|
133
|
+
partition_data))
|
|
134
|
+
|
|
135
|
+
with src_env.begin(db=src_ordered_count_db, buffers=True) as src_txn:
|
|
136
|
+
for src_encoded_key, src_encoded_value in src_txn.cursor():
|
|
137
|
+
partition_id = v06.decode_uint(src_encoded_key)
|
|
138
|
+
count = v06.decode_json(src_encoded_value)
|
|
139
|
+
|
|
140
|
+
with dst_env.begin(db=dst_ordered_count_db,
|
|
141
|
+
write=True) as dst_txn:
|
|
142
|
+
dst_txn.put(
|
|
143
|
+
v07.encode_ordered_count_db_key(
|
|
144
|
+
partition_id),
|
|
145
|
+
v07.encode_ordered_count_db_value(
|
|
146
|
+
count))
|
|
147
|
+
|
|
148
|
+
|
|
149
|
+
def _update_ref_db(dst_env, dst_ref_db, dst_event, event_ref):
|
|
150
|
+
with dst_env.begin(db=dst_ref_db,
|
|
151
|
+
write=True,
|
|
152
|
+
buffers=True) as dst_txn:
|
|
153
|
+
dst_encoded_key = v07.encode_ref_db_key(dst_event.event_id)
|
|
154
|
+
dst_encoded_value = dst_txn.get(dst_encoded_key)
|
|
155
|
+
|
|
156
|
+
event_refs = (v07.decode_ref_db_value(dst_encoded_value)
|
|
157
|
+
if dst_encoded_value else set())
|
|
158
|
+
event_refs.add(event_ref)
|
|
159
|
+
dst_encoded_value = v07.encode_ref_db_value(event_refs)
|
|
160
|
+
|
|
161
|
+
dst_txn.put(dst_encoded_key, dst_encoded_value)
|
|
162
|
+
|
|
163
|
+
|
|
164
|
+
def _update_system_db(dst_env, dst_system_db, server_id, latest_event):
|
|
165
|
+
with dst_env.begin(db=dst_system_db,
|
|
166
|
+
write=True,
|
|
167
|
+
buffers=True) as dst_txn:
|
|
168
|
+
dst_encoded_key = v07.encode_system_db_key(server_id)
|
|
169
|
+
dst_encoded_value = dst_txn.get(dst_encoded_key)
|
|
170
|
+
|
|
171
|
+
event_id, _ = (v07.decode_system_db_value(dst_encoded_value)
|
|
172
|
+
if dst_encoded_value
|
|
173
|
+
else (v07.EventId(server_id, 0, 0), None))
|
|
174
|
+
|
|
175
|
+
if latest_event.event_id > event_id:
|
|
176
|
+
dst_txn.put(dst_encoded_key,
|
|
177
|
+
v07.encode_system_db_value((latest_event.event_id,
|
|
178
|
+
latest_event.timestamp)))
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
def _get_server_id(src_env, src_system_db):
|
|
182
|
+
with src_env.begin(db=src_system_db, buffers=True) as txn:
|
|
183
|
+
src_encoded_server_id = txn.get(b'server_id')
|
|
184
|
+
return v06.decode_uint(src_encoded_server_id)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def _convert_event(src_event, server_id):
|
|
188
|
+
return v07.Event(
|
|
189
|
+
event_id=_convert_event_id(src_event.event_id, server_id),
|
|
190
|
+
event_type=src_event.event_type,
|
|
191
|
+
timestamp=_convert_timestamp(src_event.timestamp),
|
|
192
|
+
source_timestamp=(_convert_timestamp(src_event.source_timestamp)
|
|
193
|
+
if src_event.source_timestamp else None),
|
|
194
|
+
payload=(_convert_event_payload(src_event.payload)
|
|
195
|
+
if src_event.payload else None))
|
|
196
|
+
|
|
197
|
+
|
|
198
|
+
def _convert_event_id(src_event_id, server_id):
|
|
199
|
+
return v07.EventId(server=(src_event_id.server if src_event_id.server > 0
|
|
200
|
+
else server_id),
|
|
201
|
+
session=src_event_id.instance,
|
|
202
|
+
instance=src_event_id.instance)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def _convert_event_payload(src_event_payload):
|
|
206
|
+
return v07.EventPayload(
|
|
207
|
+
type=v07.EventPayloadType[src_event_payload.type.name],
|
|
208
|
+
data=src_event_payload.data)
|
|
209
|
+
|
|
210
|
+
|
|
211
|
+
def _convert_timestamp(src_timestamp):
|
|
212
|
+
return v07.Timestamp(s=src_timestamp.s,
|
|
213
|
+
us=src_timestamp.us)
|