hat-event 0.9.27__cp310.cp311.cp312.cp313-abi3-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hat/event/__init__.py +1 -0
- hat/event/adminer/__init__.py +18 -0
- hat/event/adminer/client.py +124 -0
- hat/event/adminer/common.py +27 -0
- hat/event/adminer/server.py +111 -0
- hat/event/backends/__init__.py +0 -0
- hat/event/backends/dummy.py +49 -0
- hat/event/backends/lmdb/__init__.py +9 -0
- hat/event/backends/lmdb/backend.py +319 -0
- hat/event/backends/lmdb/common.py +277 -0
- hat/event/backends/lmdb/conditions.py +102 -0
- hat/event/backends/lmdb/convert/__init__.py +0 -0
- hat/event/backends/lmdb/convert/__main__.py +8 -0
- hat/event/backends/lmdb/convert/convert_v06_to_v07.py +213 -0
- hat/event/backends/lmdb/convert/convert_v07_to_v09.py +175 -0
- hat/event/backends/lmdb/convert/main.py +88 -0
- hat/event/backends/lmdb/convert/v06.py +216 -0
- hat/event/backends/lmdb/convert/v07.py +508 -0
- hat/event/backends/lmdb/convert/v09.py +50 -0
- hat/event/backends/lmdb/convert/version.py +63 -0
- hat/event/backends/lmdb/environment.py +100 -0
- hat/event/backends/lmdb/latestdb.py +116 -0
- hat/event/backends/lmdb/manager/__init__.py +0 -0
- hat/event/backends/lmdb/manager/__main__.py +8 -0
- hat/event/backends/lmdb/manager/common.py +45 -0
- hat/event/backends/lmdb/manager/copy.py +92 -0
- hat/event/backends/lmdb/manager/main.py +34 -0
- hat/event/backends/lmdb/manager/query.py +215 -0
- hat/event/backends/lmdb/refdb.py +234 -0
- hat/event/backends/lmdb/systemdb.py +102 -0
- hat/event/backends/lmdb/timeseriesdb.py +486 -0
- hat/event/backends/memory.py +178 -0
- hat/event/common/__init__.py +144 -0
- hat/event/common/backend.py +91 -0
- hat/event/common/collection/__init__.py +8 -0
- hat/event/common/collection/common.py +28 -0
- hat/event/common/collection/list.py +19 -0
- hat/event/common/collection/tree.py +62 -0
- hat/event/common/common.py +176 -0
- hat/event/common/encoder.py +305 -0
- hat/event/common/json_schema_repo.json +1 -0
- hat/event/common/matches.py +44 -0
- hat/event/common/module.py +142 -0
- hat/event/common/sbs_repo.json +1 -0
- hat/event/common/subscription/__init__.py +22 -0
- hat/event/common/subscription/_csubscription.abi3.pyd +0 -0
- hat/event/common/subscription/common.py +145 -0
- hat/event/common/subscription/csubscription.py +47 -0
- hat/event/common/subscription/pysubscription.py +97 -0
- hat/event/component.py +284 -0
- hat/event/eventer/__init__.py +28 -0
- hat/event/eventer/client.py +260 -0
- hat/event/eventer/common.py +27 -0
- hat/event/eventer/server.py +286 -0
- hat/event/manager/__init__.py +0 -0
- hat/event/manager/__main__.py +8 -0
- hat/event/manager/common.py +48 -0
- hat/event/manager/main.py +387 -0
- hat/event/server/__init__.py +0 -0
- hat/event/server/__main__.py +8 -0
- hat/event/server/adminer_server.py +43 -0
- hat/event/server/engine.py +216 -0
- hat/event/server/engine_runner.py +127 -0
- hat/event/server/eventer_client.py +205 -0
- hat/event/server/eventer_client_runner.py +152 -0
- hat/event/server/eventer_server.py +119 -0
- hat/event/server/main.py +84 -0
- hat/event/server/main_runner.py +212 -0
- hat_event-0.9.27.dist-info/LICENSE +202 -0
- hat_event-0.9.27.dist-info/METADATA +108 -0
- hat_event-0.9.27.dist-info/RECORD +73 -0
- hat_event-0.9.27.dist-info/WHEEL +7 -0
- hat_event-0.9.27.dist-info/entry_points.txt +5 -0
|
@@ -0,0 +1,508 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import enum
|
|
3
|
+
import itertools
|
|
4
|
+
import platform
|
|
5
|
+
import struct
|
|
6
|
+
import typing
|
|
7
|
+
|
|
8
|
+
import lmdb
|
|
9
|
+
|
|
10
|
+
from hat import json
|
|
11
|
+
from hat import sbs
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
OrderBy = enum.Enum('OrderBy', [
|
|
15
|
+
'TIMESTAMP',
|
|
16
|
+
'SOURCE_TIMESTAMP'])
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
EventType: typing.TypeAlias = typing.Tuple[str, ...]
|
|
20
|
+
|
|
21
|
+
|
|
22
|
+
class EventId(typing.NamedTuple):
|
|
23
|
+
server: int
|
|
24
|
+
session: int
|
|
25
|
+
instance: int
|
|
26
|
+
|
|
27
|
+
|
|
28
|
+
EventPayloadType = enum.Enum('EventPayloadType', [
|
|
29
|
+
'BINARY',
|
|
30
|
+
'JSON',
|
|
31
|
+
'SBS'])
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class SbsData(typing.NamedTuple):
|
|
35
|
+
module: str | None
|
|
36
|
+
type: str
|
|
37
|
+
data: bytes
|
|
38
|
+
|
|
39
|
+
|
|
40
|
+
class EventPayload(typing.NamedTuple):
|
|
41
|
+
type: EventPayloadType
|
|
42
|
+
data: bytes | json.Data | SbsData
|
|
43
|
+
|
|
44
|
+
|
|
45
|
+
class Timestamp(typing.NamedTuple):
|
|
46
|
+
s: int
|
|
47
|
+
us: int
|
|
48
|
+
|
|
49
|
+
def add(self, s: float) -> 'Timestamp':
|
|
50
|
+
us = self.us + round((s - int(s)) * 1e6)
|
|
51
|
+
s = self.s + int(s)
|
|
52
|
+
return Timestamp(s=s + us // int(1e6),
|
|
53
|
+
us=us % int(1e6))
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
class Event(typing.NamedTuple):
|
|
57
|
+
event_id: EventId
|
|
58
|
+
event_type: EventType
|
|
59
|
+
timestamp: Timestamp
|
|
60
|
+
source_timestamp: Timestamp | None
|
|
61
|
+
payload: EventPayload | None
|
|
62
|
+
|
|
63
|
+
|
|
64
|
+
class DbType(enum.Enum):
|
|
65
|
+
SYSTEM = 0
|
|
66
|
+
LATEST_DATA = 1
|
|
67
|
+
LATEST_TYPE = 2
|
|
68
|
+
ORDERED_DATA = 3
|
|
69
|
+
ORDERED_PARTITION = 4
|
|
70
|
+
ORDERED_COUNT = 5
|
|
71
|
+
REF = 6
|
|
72
|
+
|
|
73
|
+
|
|
74
|
+
ServerId: typing.TypeAlias = int
|
|
75
|
+
EventTypeRef: typing.TypeAlias = int
|
|
76
|
+
PartitionId: typing.TypeAlias = int
|
|
77
|
+
|
|
78
|
+
|
|
79
|
+
class LatestEventRef(typing.NamedTuple):
|
|
80
|
+
key: 'LatestDataDbKey'
|
|
81
|
+
|
|
82
|
+
|
|
83
|
+
class OrderedEventRef(typing.NamedTuple):
|
|
84
|
+
key: 'OrderedDataDbKey'
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
EventRef: typing.TypeAlias = LatestEventRef | OrderedEventRef
|
|
88
|
+
|
|
89
|
+
SystemDbKey = ServerId
|
|
90
|
+
SystemDbValue = tuple[EventId, Timestamp]
|
|
91
|
+
|
|
92
|
+
LatestDataDbKey = EventTypeRef
|
|
93
|
+
LatestDataDbValue = Event
|
|
94
|
+
|
|
95
|
+
LatestTypeDbKey = EventTypeRef
|
|
96
|
+
LatestTypeDbValue = EventType
|
|
97
|
+
|
|
98
|
+
OrderedDataDbKey = tuple[PartitionId, Timestamp, EventId]
|
|
99
|
+
OrderedDataDbValue = Event
|
|
100
|
+
|
|
101
|
+
OrderedPartitionDbKey = PartitionId
|
|
102
|
+
OrderedPartitionDbValue = json.Data
|
|
103
|
+
|
|
104
|
+
OrderedCountDbKey = PartitionId
|
|
105
|
+
OrderedCountDbValue = int
|
|
106
|
+
|
|
107
|
+
RefDbKey = EventId
|
|
108
|
+
RefDbValue = set[EventRef]
|
|
109
|
+
|
|
110
|
+
|
|
111
|
+
def encode_system_db_key(key: SystemDbKey) -> bytes:
|
|
112
|
+
return _encode_uint(key)
|
|
113
|
+
|
|
114
|
+
|
|
115
|
+
def decode_system_db_key(key_bytes: bytes) -> SystemDbKey:
|
|
116
|
+
key, _ = _decode_uint(key_bytes)
|
|
117
|
+
return key
|
|
118
|
+
|
|
119
|
+
|
|
120
|
+
def encode_system_db_value(value: SystemDbValue) -> bytes:
|
|
121
|
+
event_id, timestamp = value
|
|
122
|
+
return _encode_event_id(event_id) + _encode_timestamp(timestamp)
|
|
123
|
+
|
|
124
|
+
|
|
125
|
+
def decode_system_db_value(value_bytes: bytes) -> SystemDbValue:
|
|
126
|
+
event_id, rest = _decode_event_id(value_bytes)
|
|
127
|
+
timestamp, _ = _decode_timestamp(rest)
|
|
128
|
+
return event_id, timestamp
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
def encode_latest_data_db_key(key: LatestDataDbKey) -> bytes:
|
|
132
|
+
return _encode_uint(key)
|
|
133
|
+
|
|
134
|
+
|
|
135
|
+
def decode_latest_data_db_key(key_bytes: bytes) -> LatestDataDbKey:
|
|
136
|
+
key, _ = _decode_uint(key_bytes)
|
|
137
|
+
return key
|
|
138
|
+
|
|
139
|
+
|
|
140
|
+
def encode_latest_data_db_value(value: LatestDataDbValue) -> bytes:
|
|
141
|
+
return _encode_event(value)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def decode_latest_data_db_value(value_bytes: bytes) -> LatestDataDbValue:
|
|
145
|
+
return _decode_event(value_bytes)
|
|
146
|
+
|
|
147
|
+
|
|
148
|
+
def encode_latest_type_db_key(key: LatestTypeDbKey) -> bytes:
|
|
149
|
+
return _encode_uint(key)
|
|
150
|
+
|
|
151
|
+
|
|
152
|
+
def decode_latest_type_db_key(key_bytes: bytes) -> LatestTypeDbKey:
|
|
153
|
+
key, _ = _decode_uint(key_bytes)
|
|
154
|
+
return key
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def encode_latest_type_db_value(value: OrderedDataDbKey) -> bytes:
|
|
158
|
+
return _encode_json(list(value))
|
|
159
|
+
|
|
160
|
+
|
|
161
|
+
def decode_latest_type_db_value(value_bytes: bytes) -> OrderedDataDbKey:
|
|
162
|
+
return tuple(_decode_json(value_bytes))
|
|
163
|
+
|
|
164
|
+
|
|
165
|
+
def encode_ordered_data_db_key(key: OrderedDataDbKey) -> bytes:
|
|
166
|
+
partition_id, timestamp, event_id = key
|
|
167
|
+
return (_encode_uint(partition_id) +
|
|
168
|
+
_encode_timestamp(timestamp) +
|
|
169
|
+
_encode_event_id(event_id))
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
def decode_ordered_data_db_key(key_bytes: bytes) -> OrderedDataDbKey:
|
|
173
|
+
partition_id, rest = _decode_uint(key_bytes)
|
|
174
|
+
timestamp, rest = _decode_timestamp(rest)
|
|
175
|
+
event_id, _ = _decode_event_id(rest)
|
|
176
|
+
return partition_id, timestamp, event_id
|
|
177
|
+
|
|
178
|
+
|
|
179
|
+
def encode_ordered_data_db_value(value: OrderedDataDbValue) -> bytes:
|
|
180
|
+
return _encode_event(value)
|
|
181
|
+
|
|
182
|
+
|
|
183
|
+
def decode_ordered_data_db_value(value_bytes: bytes) -> OrderedDataDbValue:
|
|
184
|
+
return _decode_event(value_bytes)
|
|
185
|
+
|
|
186
|
+
|
|
187
|
+
def encode_ordered_partition_db_key(key: OrderedPartitionDbKey) -> bytes:
|
|
188
|
+
return _encode_uint(key)
|
|
189
|
+
|
|
190
|
+
|
|
191
|
+
def decode_ordered_partition_db_key(key_bytes: bytes) -> OrderedPartitionDbKey:
|
|
192
|
+
key, _ = _decode_uint(key_bytes)
|
|
193
|
+
return key
|
|
194
|
+
|
|
195
|
+
|
|
196
|
+
def encode_ordered_partition_db_value(value: OrderedPartitionDbValue) -> bytes:
|
|
197
|
+
return _encode_json(value)
|
|
198
|
+
|
|
199
|
+
|
|
200
|
+
def decode_ordered_partition_db_value(value_bytes: bytes
|
|
201
|
+
) -> OrderedPartitionDbValue:
|
|
202
|
+
return _decode_json(value_bytes)
|
|
203
|
+
|
|
204
|
+
|
|
205
|
+
def encode_ordered_count_db_key(key: OrderedCountDbKey) -> bytes:
|
|
206
|
+
return _encode_uint(key)
|
|
207
|
+
|
|
208
|
+
|
|
209
|
+
def decode_ordered_count_db_key(key_bytes: bytes) -> OrderedCountDbKey:
|
|
210
|
+
key, _ = _decode_uint(key_bytes)
|
|
211
|
+
return key
|
|
212
|
+
|
|
213
|
+
|
|
214
|
+
def encode_ordered_count_db_value(value: OrderedCountDbValue) -> bytes:
|
|
215
|
+
return _encode_uint(value)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
def decode_ordered_count_db_value(value_bytes: bytes) -> OrderedCountDbValue:
|
|
219
|
+
value, _ = _decode_uint(value_bytes)
|
|
220
|
+
return value
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def encode_ref_db_key(key: RefDbKey) -> bytes:
|
|
224
|
+
return _encode_event_id(key)
|
|
225
|
+
|
|
226
|
+
|
|
227
|
+
def decode_ref_db_key(key_bytes: bytes) -> RefDbKey:
|
|
228
|
+
event_id, _ = _decode_event_id(key_bytes)
|
|
229
|
+
return event_id
|
|
230
|
+
|
|
231
|
+
|
|
232
|
+
def encode_ref_db_value(value: RefDbValue) -> bytes:
|
|
233
|
+
return bytes(itertools.chain.from_iterable(
|
|
234
|
+
_encode_event_ref(ref) for ref in value))
|
|
235
|
+
|
|
236
|
+
|
|
237
|
+
def decode_ref_db_value(value_bytes: bytes) -> RefDbValue:
|
|
238
|
+
refs = set()
|
|
239
|
+
while value_bytes:
|
|
240
|
+
ref, value_bytes = _decode_event_ref(value_bytes)
|
|
241
|
+
refs.add(ref)
|
|
242
|
+
return refs
|
|
243
|
+
|
|
244
|
+
|
|
245
|
+
def open_db(env: lmdb.Environment,
|
|
246
|
+
db_type: DbType
|
|
247
|
+
) -> lmdb._Database:
|
|
248
|
+
return env.open_db(db_type.name.encode('utf-8'))
|
|
249
|
+
|
|
250
|
+
|
|
251
|
+
def create_env(path: Path) -> lmdb.Environment:
|
|
252
|
+
max_dbs = len(DbType)
|
|
253
|
+
max_db_size = (512 * 1024 * 1024 * 1024
|
|
254
|
+
if platform.architecture()[0] == '64bit'
|
|
255
|
+
else 1024 * 1024 * 1024)
|
|
256
|
+
return lmdb.Environment(str(path),
|
|
257
|
+
map_size=max_db_size,
|
|
258
|
+
subdir=False,
|
|
259
|
+
max_dbs=max_dbs)
|
|
260
|
+
|
|
261
|
+
|
|
262
|
+
_sbs_repo = sbs.Repository(r"""
|
|
263
|
+
module HatEventer
|
|
264
|
+
|
|
265
|
+
MsgSubscribe = Array(EventType)
|
|
266
|
+
|
|
267
|
+
MsgNotify = Array(Event)
|
|
268
|
+
|
|
269
|
+
MsgRegisterReq = Array(RegisterEvent)
|
|
270
|
+
|
|
271
|
+
MsgRegisterRes = Array(Choice {
|
|
272
|
+
event: Event
|
|
273
|
+
failure: None
|
|
274
|
+
})
|
|
275
|
+
|
|
276
|
+
MsgQueryReq = QueryData
|
|
277
|
+
|
|
278
|
+
MsgQueryRes = Array(Event)
|
|
279
|
+
|
|
280
|
+
Timestamp = Record {
|
|
281
|
+
s: Integer
|
|
282
|
+
us: Integer
|
|
283
|
+
}
|
|
284
|
+
|
|
285
|
+
EventId = Record {
|
|
286
|
+
server: Integer
|
|
287
|
+
session: Integer
|
|
288
|
+
instance: Integer
|
|
289
|
+
}
|
|
290
|
+
|
|
291
|
+
Order = Choice {
|
|
292
|
+
descending: None
|
|
293
|
+
ascending: None
|
|
294
|
+
}
|
|
295
|
+
|
|
296
|
+
OrderBy = Choice {
|
|
297
|
+
timestamp: None
|
|
298
|
+
sourceTimestamp: None
|
|
299
|
+
}
|
|
300
|
+
|
|
301
|
+
EventType = Array(String)
|
|
302
|
+
|
|
303
|
+
EventPayload = Choice {
|
|
304
|
+
binary: Bytes
|
|
305
|
+
json: String
|
|
306
|
+
sbs: Record {
|
|
307
|
+
module: Optional(String)
|
|
308
|
+
type: String
|
|
309
|
+
data: Bytes
|
|
310
|
+
}
|
|
311
|
+
}
|
|
312
|
+
|
|
313
|
+
Event = Record {
|
|
314
|
+
id: EventId
|
|
315
|
+
type: EventType
|
|
316
|
+
timestamp: Timestamp
|
|
317
|
+
sourceTimestamp: Optional(Timestamp)
|
|
318
|
+
payload: Optional(EventPayload)
|
|
319
|
+
}
|
|
320
|
+
|
|
321
|
+
RegisterEvent = Record {
|
|
322
|
+
type: EventType
|
|
323
|
+
sourceTimestamp: Optional(Timestamp)
|
|
324
|
+
payload: Optional(EventPayload)
|
|
325
|
+
}
|
|
326
|
+
|
|
327
|
+
QueryData = Record {
|
|
328
|
+
serverId: Optional(Integer)
|
|
329
|
+
ids: Optional(Array(EventId))
|
|
330
|
+
types: Optional(Array(EventType))
|
|
331
|
+
tFrom: Optional(Timestamp)
|
|
332
|
+
tTo: Optional(Timestamp)
|
|
333
|
+
sourceTFrom: Optional(Timestamp)
|
|
334
|
+
sourceTTo: Optional(Timestamp)
|
|
335
|
+
payload: Optional(EventPayload)
|
|
336
|
+
order: Order
|
|
337
|
+
orderBy: OrderBy
|
|
338
|
+
uniqueType: Boolean
|
|
339
|
+
maxResults: Optional(Integer)
|
|
340
|
+
}
|
|
341
|
+
""")
|
|
342
|
+
|
|
343
|
+
|
|
344
|
+
def _event_to_sbs(event):
|
|
345
|
+
return {
|
|
346
|
+
'id': _event_id_to_sbs(event.event_id),
|
|
347
|
+
'type': list(event.event_type),
|
|
348
|
+
'timestamp': _timestamp_to_sbs(event.timestamp),
|
|
349
|
+
'sourceTimestamp': _optional_to_sbs(event.source_timestamp,
|
|
350
|
+
_timestamp_to_sbs),
|
|
351
|
+
'payload': _optional_to_sbs(event.payload, _event_payload_to_sbs)}
|
|
352
|
+
|
|
353
|
+
|
|
354
|
+
def _event_from_sbs(data):
|
|
355
|
+
return Event(
|
|
356
|
+
event_id=_event_id_from_sbs(data['id']),
|
|
357
|
+
event_type=tuple(data['type']),
|
|
358
|
+
timestamp=_timestamp_from_sbs(data['timestamp']),
|
|
359
|
+
source_timestamp=_optional_from_sbs(data['sourceTimestamp'],
|
|
360
|
+
_timestamp_from_sbs),
|
|
361
|
+
payload=_optional_from_sbs(data['payload'], _event_payload_from_sbs))
|
|
362
|
+
|
|
363
|
+
|
|
364
|
+
def _event_id_to_sbs(event_id):
|
|
365
|
+
return {'server': event_id.server,
|
|
366
|
+
'session': event_id.session,
|
|
367
|
+
'instance': event_id.instance}
|
|
368
|
+
|
|
369
|
+
|
|
370
|
+
def _event_id_from_sbs(data):
|
|
371
|
+
return EventId(server=data['server'],
|
|
372
|
+
session=data['session'],
|
|
373
|
+
instance=data['instance'])
|
|
374
|
+
|
|
375
|
+
|
|
376
|
+
def _timestamp_to_sbs(t):
|
|
377
|
+
return {'s': t.s, 'us': t.us}
|
|
378
|
+
|
|
379
|
+
|
|
380
|
+
def _timestamp_from_sbs(data):
|
|
381
|
+
return Timestamp(s=data['s'], us=data['us'])
|
|
382
|
+
|
|
383
|
+
|
|
384
|
+
def _event_payload_to_sbs(payload):
|
|
385
|
+
if payload.type == EventPayloadType.BINARY:
|
|
386
|
+
return 'binary', payload.data
|
|
387
|
+
|
|
388
|
+
if payload.type == EventPayloadType.JSON:
|
|
389
|
+
return 'json', json.encode(payload.data)
|
|
390
|
+
|
|
391
|
+
if payload.type == EventPayloadType.SBS:
|
|
392
|
+
return 'sbs', _sbs_data_to_sbs(payload.data)
|
|
393
|
+
|
|
394
|
+
raise ValueError('unsupported payload type')
|
|
395
|
+
|
|
396
|
+
|
|
397
|
+
def _event_payload_from_sbs(data):
|
|
398
|
+
data_type, data_data = data
|
|
399
|
+
|
|
400
|
+
if data_type == 'binary':
|
|
401
|
+
return EventPayload(type=EventPayloadType.BINARY,
|
|
402
|
+
data=data_data)
|
|
403
|
+
|
|
404
|
+
if data_type == 'json':
|
|
405
|
+
return EventPayload(type=EventPayloadType.JSON,
|
|
406
|
+
data=json.decode(data_data))
|
|
407
|
+
|
|
408
|
+
if data_type == 'sbs':
|
|
409
|
+
return EventPayload(type=EventPayloadType.SBS,
|
|
410
|
+
data=_sbs_data_from_sbs(data_data))
|
|
411
|
+
|
|
412
|
+
raise ValueError('unsupported payload type')
|
|
413
|
+
|
|
414
|
+
|
|
415
|
+
def _sbs_data_to_sbs(data):
|
|
416
|
+
return {'module': _optional_to_sbs(data.module),
|
|
417
|
+
'type': data.type,
|
|
418
|
+
'data': data.data}
|
|
419
|
+
|
|
420
|
+
|
|
421
|
+
def _sbs_data_from_sbs(data):
|
|
422
|
+
return SbsData(module=_optional_from_sbs(data['module']),
|
|
423
|
+
type=data['type'],
|
|
424
|
+
data=data['data'])
|
|
425
|
+
|
|
426
|
+
|
|
427
|
+
def _optional_to_sbs(value, fn=lambda i: i):
|
|
428
|
+
return ('value', fn(value)) if value is not None else ('none', None)
|
|
429
|
+
|
|
430
|
+
|
|
431
|
+
def _optional_from_sbs(data, fn=lambda i: i):
|
|
432
|
+
return fn(data[1]) if data[0] == 'value' else None
|
|
433
|
+
|
|
434
|
+
|
|
435
|
+
def _encode_uint(value):
|
|
436
|
+
return struct.pack(">Q", value)
|
|
437
|
+
|
|
438
|
+
|
|
439
|
+
def _decode_uint(value_bytes):
|
|
440
|
+
return struct.unpack(">Q", value_bytes[:8])[0], value_bytes[8:]
|
|
441
|
+
|
|
442
|
+
|
|
443
|
+
def _encode_event_id(event_id):
|
|
444
|
+
return struct.pack(">QQQ", event_id.server, event_id.session,
|
|
445
|
+
event_id.instance)
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
def _decode_event_id(event_id_bytes):
|
|
449
|
+
server_id, session_id, instance_id = struct.unpack(">QQQ",
|
|
450
|
+
event_id_bytes[:24])
|
|
451
|
+
event_id = EventId(server=server_id,
|
|
452
|
+
session=session_id,
|
|
453
|
+
instance=instance_id)
|
|
454
|
+
return event_id, event_id_bytes[24:]
|
|
455
|
+
|
|
456
|
+
|
|
457
|
+
def _encode_timestamp(timestamp):
|
|
458
|
+
return struct.pack(">QI", timestamp.s + (1 << 63), timestamp.us)
|
|
459
|
+
|
|
460
|
+
|
|
461
|
+
def _decode_timestamp(timestamp_bytes):
|
|
462
|
+
s, us = struct.unpack(">QI", timestamp_bytes[:12])
|
|
463
|
+
return Timestamp(s - (1 << 63), us), timestamp_bytes[12:]
|
|
464
|
+
|
|
465
|
+
|
|
466
|
+
def _encode_event(event):
|
|
467
|
+
event_sbs = _event_to_sbs(event)
|
|
468
|
+
return _sbs_repo.encode('HatEventer.Event', event_sbs)
|
|
469
|
+
|
|
470
|
+
|
|
471
|
+
def _decode_event(event_bytes):
|
|
472
|
+
event_sbs = _sbs_repo.decode('HatEventer.Event', event_bytes)
|
|
473
|
+
return _event_from_sbs(event_sbs)
|
|
474
|
+
|
|
475
|
+
|
|
476
|
+
def _encode_json(data):
|
|
477
|
+
return json.encode(data).encode('utf-8')
|
|
478
|
+
|
|
479
|
+
|
|
480
|
+
def _decode_json(data_bytes):
|
|
481
|
+
return json.decode(str(data_bytes, encoding='utf-8'))
|
|
482
|
+
|
|
483
|
+
|
|
484
|
+
def _encode_event_ref(ref):
|
|
485
|
+
if isinstance(ref, LatestEventRef):
|
|
486
|
+
yield DbType.LATEST_DATA.value
|
|
487
|
+
yield from encode_latest_data_db_key(ref.key)
|
|
488
|
+
|
|
489
|
+
elif isinstance(ref, OrderedEventRef):
|
|
490
|
+
yield DbType.ORDERED_DATA.value
|
|
491
|
+
yield from encode_ordered_data_db_key(ref.key)
|
|
492
|
+
|
|
493
|
+
else:
|
|
494
|
+
raise ValueError('unsupported event reference')
|
|
495
|
+
|
|
496
|
+
|
|
497
|
+
def _decode_event_ref(ref_bytes):
|
|
498
|
+
db_type, rest = DbType(ref_bytes[0]), ref_bytes[1:]
|
|
499
|
+
|
|
500
|
+
if db_type == DbType.LATEST_DATA:
|
|
501
|
+
ref = LatestEventRef(decode_latest_data_db_key(rest[:8]))
|
|
502
|
+
return ref, rest[8:]
|
|
503
|
+
|
|
504
|
+
if db_type == DbType.ORDERED_DATA:
|
|
505
|
+
ref = OrderedEventRef(decode_ordered_data_db_key(rest[:44]))
|
|
506
|
+
return ref, rest[44:]
|
|
507
|
+
|
|
508
|
+
raise ValueError('unsupported database type')
|
|
@@ -0,0 +1,50 @@
|
|
|
1
|
+
import typing
|
|
2
|
+
|
|
3
|
+
import lmdb
|
|
4
|
+
|
|
5
|
+
from hat.event.backends.lmdb import common
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
version = '0.9'
|
|
9
|
+
|
|
10
|
+
Timestamp = common.Timestamp
|
|
11
|
+
OrderBy = common.OrderBy
|
|
12
|
+
EventId = common.EventId
|
|
13
|
+
EventType = common.EventType
|
|
14
|
+
EventPayloadBinary = common.EventPayloadBinary
|
|
15
|
+
EventPayloadJson = common.EventPayloadJson
|
|
16
|
+
Event = common.Event
|
|
17
|
+
|
|
18
|
+
DbKey = common.DbKey
|
|
19
|
+
DbValue = common.DbValue
|
|
20
|
+
DbType = common.DbType
|
|
21
|
+
SettingsId = common.SettingsId
|
|
22
|
+
LatestEventRef = common.LatestEventRef
|
|
23
|
+
TimeseriesEventRef = common.TimeseriesEventRef
|
|
24
|
+
db_defs = common.db_defs
|
|
25
|
+
create_env = common.ext_create_env
|
|
26
|
+
open_db = common.ext_open_db
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def read(dbs: dict[DbType, lmdb._Database],
|
|
30
|
+
txn: lmdb.Transaction,
|
|
31
|
+
db_type: DbType
|
|
32
|
+
) -> typing.Iterable[tuple[DbKey, DbValue]]:
|
|
33
|
+
db_def = db_defs[db_type]
|
|
34
|
+
with txn.cursor(dbs[db_type]) as cursor:
|
|
35
|
+
for encoded_key, encoded_value in cursor:
|
|
36
|
+
key = db_def.decode_key(encoded_key)
|
|
37
|
+
value = db_def.decode_value(encoded_value)
|
|
38
|
+
yield key, value
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
def write(dbs: dict[DbType, lmdb._Database],
|
|
42
|
+
txn: lmdb.Transaction,
|
|
43
|
+
db_type: DbType,
|
|
44
|
+
key: DbKey,
|
|
45
|
+
value: DbValue):
|
|
46
|
+
db_def = db_defs[db_type]
|
|
47
|
+
encoded_key = db_def.encode_key(key)
|
|
48
|
+
encoded_value = db_def.encode_value(value)
|
|
49
|
+
|
|
50
|
+
txn.put(encoded_key, encoded_value, db=dbs[db_type])
|
|
@@ -0,0 +1,63 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import contextlib
|
|
3
|
+
import enum
|
|
4
|
+
import platform
|
|
5
|
+
import struct
|
|
6
|
+
|
|
7
|
+
import lmdb
|
|
8
|
+
|
|
9
|
+
from hat import json
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
_max_size = (1024 * 1024 * 1024 * 1024
|
|
13
|
+
if platform.architecture()[0] == '64bit'
|
|
14
|
+
else 2 * 1024 * 1024 * 1024)
|
|
15
|
+
|
|
16
|
+
_max_dbs = 9
|
|
17
|
+
|
|
18
|
+
|
|
19
|
+
class Version(enum.Enum):
|
|
20
|
+
v06 = '0.6'
|
|
21
|
+
v07 = '0.7'
|
|
22
|
+
v09 = '0.9'
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
def get_version(path: Path) -> Version:
|
|
26
|
+
with lmdb.Environment(str(path),
|
|
27
|
+
map_size=_max_size,
|
|
28
|
+
subdir=False,
|
|
29
|
+
max_dbs=_max_dbs,
|
|
30
|
+
readonly=True) as env:
|
|
31
|
+
with contextlib.suppress(Exception):
|
|
32
|
+
db = env.open_db(b'SYSTEM_SETTINGS', create=False)
|
|
33
|
+
|
|
34
|
+
with env.begin(buffers=True) as txn:
|
|
35
|
+
key = _encode_uint(0)
|
|
36
|
+
value = txn.get(key, db=db)
|
|
37
|
+
|
|
38
|
+
return Version(_decode_json(value))
|
|
39
|
+
|
|
40
|
+
with contextlib.suppress(Exception):
|
|
41
|
+
for db_name in [b'SYSTEM', b'LATEST_DATA', b'LATEST_TYPE',
|
|
42
|
+
b'ORDERED_DATA', b'ORDERED_PARTITION',
|
|
43
|
+
b'ORDERED_COUNT', b'REF']:
|
|
44
|
+
env.open_db(db_name, create=False)
|
|
45
|
+
|
|
46
|
+
return Version.v07
|
|
47
|
+
|
|
48
|
+
with contextlib.suppress(Exception):
|
|
49
|
+
for db_name in [b'system', b'latest', b'ordered_data',
|
|
50
|
+
b'ordered_partition', b'ordered_count']:
|
|
51
|
+
env.open_db(db_name, create=False)
|
|
52
|
+
|
|
53
|
+
return Version.v06
|
|
54
|
+
|
|
55
|
+
raise Exception('unsupported version')
|
|
56
|
+
|
|
57
|
+
|
|
58
|
+
def _encode_uint(value):
|
|
59
|
+
return struct.pack(">Q", value)
|
|
60
|
+
|
|
61
|
+
|
|
62
|
+
def _decode_json(data_bytes):
|
|
63
|
+
return json.decode(str(data_bytes, encoding='utf-8'))
|
|
@@ -0,0 +1,100 @@
|
|
|
1
|
+
from pathlib import Path
|
|
2
|
+
import asyncio
|
|
3
|
+
import typing
|
|
4
|
+
|
|
5
|
+
import lmdb
|
|
6
|
+
|
|
7
|
+
from hat import aio
|
|
8
|
+
|
|
9
|
+
from hat.event.backends.lmdb import common
|
|
10
|
+
|
|
11
|
+
|
|
12
|
+
async def create(db_path: Path,
|
|
13
|
+
max_size: int = common.default_max_size
|
|
14
|
+
) -> 'Environment':
|
|
15
|
+
env = Environment()
|
|
16
|
+
env._loop = asyncio.get_running_loop()
|
|
17
|
+
env._async_group = aio.Group()
|
|
18
|
+
env._executor = aio.Executor(1, log_exceptions=False)
|
|
19
|
+
env._env = None
|
|
20
|
+
env._dbs = {}
|
|
21
|
+
|
|
22
|
+
env.async_group.spawn(aio.call_on_cancel, env._on_close)
|
|
23
|
+
|
|
24
|
+
try:
|
|
25
|
+
await env._executor.spawn(env._ext_init, db_path, max_size)
|
|
26
|
+
|
|
27
|
+
except BaseException:
|
|
28
|
+
await aio.uncancellable(env.async_close())
|
|
29
|
+
raise
|
|
30
|
+
|
|
31
|
+
return env
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
class Environment(aio.Resource):
|
|
35
|
+
|
|
36
|
+
@property
|
|
37
|
+
def async_group(self) -> aio.Group:
|
|
38
|
+
return self._async_group
|
|
39
|
+
|
|
40
|
+
async def execute(self,
|
|
41
|
+
ext_fn: typing.Callable,
|
|
42
|
+
*args: typing.Any) -> typing.Any:
|
|
43
|
+
if not self.is_open:
|
|
44
|
+
raise Exception('environment closed')
|
|
45
|
+
|
|
46
|
+
return await self._executor.spawn(ext_fn, *args)
|
|
47
|
+
|
|
48
|
+
def ext_begin(self, write: bool = False) -> lmdb.Transaction:
|
|
49
|
+
return self._env.begin(write=write,
|
|
50
|
+
buffers=True)
|
|
51
|
+
|
|
52
|
+
def ext_cursor(self,
|
|
53
|
+
txn: lmdb.Transaction,
|
|
54
|
+
db_type: common.DbType
|
|
55
|
+
) -> lmdb.Cursor:
|
|
56
|
+
return txn.cursor(self._dbs[db_type])
|
|
57
|
+
|
|
58
|
+
def ext_stat(self,
|
|
59
|
+
txn: lmdb.Transaction,
|
|
60
|
+
db_type: common.DbType
|
|
61
|
+
) -> dict[str, int]:
|
|
62
|
+
return txn.stat(self._dbs[db_type])
|
|
63
|
+
|
|
64
|
+
def ext_read(self,
|
|
65
|
+
txn: lmdb.Transaction,
|
|
66
|
+
db_type: common.DbType
|
|
67
|
+
) -> typing.Iterable[tuple[common.DbKey, common.DbValue]]:
|
|
68
|
+
db_def = common.db_defs[db_type]
|
|
69
|
+
with txn.cursor(self._dbs[db_type]) as cursor:
|
|
70
|
+
for encoded_key, encoded_value in cursor:
|
|
71
|
+
key = db_def.decode_key(encoded_key)
|
|
72
|
+
value = db_def.decode_value(encoded_value)
|
|
73
|
+
yield key, value
|
|
74
|
+
|
|
75
|
+
def ext_write(self,
|
|
76
|
+
txn: lmdb.Transaction,
|
|
77
|
+
db_type: common.DbType,
|
|
78
|
+
data: typing.Iterable[tuple[common.DbKey, common.DbValue]]):
|
|
79
|
+
db_def = common.db_defs[db_type]
|
|
80
|
+
with txn.cursor(self._dbs[db_type]) as cursor:
|
|
81
|
+
for key, value in data:
|
|
82
|
+
encoded_key = db_def.encode_key(key)
|
|
83
|
+
encoded_value = db_def.encode_value(value)
|
|
84
|
+
cursor.put(encoded_key, encoded_value)
|
|
85
|
+
|
|
86
|
+
async def _on_close(self):
|
|
87
|
+
if self._env:
|
|
88
|
+
await self._executor.spawn(self._env.close)
|
|
89
|
+
|
|
90
|
+
await self._executor.async_close()
|
|
91
|
+
|
|
92
|
+
def _ext_init(self, db_path, max_size):
|
|
93
|
+
create = not db_path.exists()
|
|
94
|
+
|
|
95
|
+
self._env = common.ext_create_env(path=db_path,
|
|
96
|
+
max_size=max_size)
|
|
97
|
+
self._dbs = {db_type: common.ext_open_db(env=self._env,
|
|
98
|
+
db_type=db_type,
|
|
99
|
+
create=create)
|
|
100
|
+
for db_type in common.DbType}
|