hat-event 0.9.27__cp310.cp311.cp312.cp313-abi3-win_amd64.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- hat/event/__init__.py +1 -0
- hat/event/adminer/__init__.py +18 -0
- hat/event/adminer/client.py +124 -0
- hat/event/adminer/common.py +27 -0
- hat/event/adminer/server.py +111 -0
- hat/event/backends/__init__.py +0 -0
- hat/event/backends/dummy.py +49 -0
- hat/event/backends/lmdb/__init__.py +9 -0
- hat/event/backends/lmdb/backend.py +319 -0
- hat/event/backends/lmdb/common.py +277 -0
- hat/event/backends/lmdb/conditions.py +102 -0
- hat/event/backends/lmdb/convert/__init__.py +0 -0
- hat/event/backends/lmdb/convert/__main__.py +8 -0
- hat/event/backends/lmdb/convert/convert_v06_to_v07.py +213 -0
- hat/event/backends/lmdb/convert/convert_v07_to_v09.py +175 -0
- hat/event/backends/lmdb/convert/main.py +88 -0
- hat/event/backends/lmdb/convert/v06.py +216 -0
- hat/event/backends/lmdb/convert/v07.py +508 -0
- hat/event/backends/lmdb/convert/v09.py +50 -0
- hat/event/backends/lmdb/convert/version.py +63 -0
- hat/event/backends/lmdb/environment.py +100 -0
- hat/event/backends/lmdb/latestdb.py +116 -0
- hat/event/backends/lmdb/manager/__init__.py +0 -0
- hat/event/backends/lmdb/manager/__main__.py +8 -0
- hat/event/backends/lmdb/manager/common.py +45 -0
- hat/event/backends/lmdb/manager/copy.py +92 -0
- hat/event/backends/lmdb/manager/main.py +34 -0
- hat/event/backends/lmdb/manager/query.py +215 -0
- hat/event/backends/lmdb/refdb.py +234 -0
- hat/event/backends/lmdb/systemdb.py +102 -0
- hat/event/backends/lmdb/timeseriesdb.py +486 -0
- hat/event/backends/memory.py +178 -0
- hat/event/common/__init__.py +144 -0
- hat/event/common/backend.py +91 -0
- hat/event/common/collection/__init__.py +8 -0
- hat/event/common/collection/common.py +28 -0
- hat/event/common/collection/list.py +19 -0
- hat/event/common/collection/tree.py +62 -0
- hat/event/common/common.py +176 -0
- hat/event/common/encoder.py +305 -0
- hat/event/common/json_schema_repo.json +1 -0
- hat/event/common/matches.py +44 -0
- hat/event/common/module.py +142 -0
- hat/event/common/sbs_repo.json +1 -0
- hat/event/common/subscription/__init__.py +22 -0
- hat/event/common/subscription/_csubscription.abi3.pyd +0 -0
- hat/event/common/subscription/common.py +145 -0
- hat/event/common/subscription/csubscription.py +47 -0
- hat/event/common/subscription/pysubscription.py +97 -0
- hat/event/component.py +284 -0
- hat/event/eventer/__init__.py +28 -0
- hat/event/eventer/client.py +260 -0
- hat/event/eventer/common.py +27 -0
- hat/event/eventer/server.py +286 -0
- hat/event/manager/__init__.py +0 -0
- hat/event/manager/__main__.py +8 -0
- hat/event/manager/common.py +48 -0
- hat/event/manager/main.py +387 -0
- hat/event/server/__init__.py +0 -0
- hat/event/server/__main__.py +8 -0
- hat/event/server/adminer_server.py +43 -0
- hat/event/server/engine.py +216 -0
- hat/event/server/engine_runner.py +127 -0
- hat/event/server/eventer_client.py +205 -0
- hat/event/server/eventer_client_runner.py +152 -0
- hat/event/server/eventer_server.py +119 -0
- hat/event/server/main.py +84 -0
- hat/event/server/main_runner.py +212 -0
- hat_event-0.9.27.dist-info/LICENSE +202 -0
- hat_event-0.9.27.dist-info/METADATA +108 -0
- hat_event-0.9.27.dist-info/RECORD +73 -0
- hat_event-0.9.27.dist-info/WHEEL +7 -0
- hat_event-0.9.27.dist-info/entry_points.txt +5 -0
|
@@ -0,0 +1,486 @@
|
|
|
1
|
+
from collections.abc import Iterable
|
|
2
|
+
import collections
|
|
3
|
+
import itertools
|
|
4
|
+
import typing
|
|
5
|
+
|
|
6
|
+
import lmdb
|
|
7
|
+
|
|
8
|
+
from hat.event.backends.lmdb import common
|
|
9
|
+
from hat.event.backends.lmdb import environment
|
|
10
|
+
from hat.event.backends.lmdb.conditions import Conditions
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
Changes: typing.TypeAlias = dict[common.PartitionId,
|
|
14
|
+
collections.deque[tuple[common.Timestamp,
|
|
15
|
+
common.Event]]]
|
|
16
|
+
|
|
17
|
+
|
|
18
|
+
class Limit(typing.NamedTuple):
|
|
19
|
+
min_entries: int | None = None
|
|
20
|
+
max_entries: int | None = None
|
|
21
|
+
duration: float | None = None
|
|
22
|
+
size: int | None = None
|
|
23
|
+
|
|
24
|
+
|
|
25
|
+
class Partition(typing.NamedTuple):
|
|
26
|
+
order_by: common.OrderBy
|
|
27
|
+
subscription: common.Subscription
|
|
28
|
+
limit: Limit | None
|
|
29
|
+
|
|
30
|
+
|
|
31
|
+
def ext_create(env: environment.Environment,
|
|
32
|
+
txn: lmdb.Transaction,
|
|
33
|
+
conditions: Conditions,
|
|
34
|
+
partitions: Iterable[Partition],
|
|
35
|
+
max_results: int = 4096
|
|
36
|
+
) -> 'TimeseriesDb':
|
|
37
|
+
db = TimeseriesDb()
|
|
38
|
+
db._env = env
|
|
39
|
+
db._conditions = conditions
|
|
40
|
+
db._max_results = max_results
|
|
41
|
+
db._changes = collections.defaultdict(collections.deque)
|
|
42
|
+
|
|
43
|
+
# depending on dict order
|
|
44
|
+
db._partitions = dict(_ext_init_partitions(env, txn, partitions))
|
|
45
|
+
|
|
46
|
+
return db
|
|
47
|
+
|
|
48
|
+
|
|
49
|
+
class TimeseriesDb:
|
|
50
|
+
|
|
51
|
+
def add(self,
|
|
52
|
+
event: common.Event
|
|
53
|
+
) -> Iterable[common.EventRef]:
|
|
54
|
+
for partition_id, partition in self._partitions.items():
|
|
55
|
+
if not partition.subscription.matches(event.type):
|
|
56
|
+
continue
|
|
57
|
+
|
|
58
|
+
if partition.order_by == common.OrderBy.TIMESTAMP:
|
|
59
|
+
timestamp = event.timestamp
|
|
60
|
+
|
|
61
|
+
elif partition.order_by == common.OrderBy.SOURCE_TIMESTAMP:
|
|
62
|
+
if event.source_timestamp is None:
|
|
63
|
+
continue
|
|
64
|
+
|
|
65
|
+
timestamp = event.source_timestamp
|
|
66
|
+
|
|
67
|
+
else:
|
|
68
|
+
raise ValueError('unsupported order by')
|
|
69
|
+
|
|
70
|
+
self._changes[partition_id].append((timestamp, event))
|
|
71
|
+
|
|
72
|
+
yield common.TimeseriesEventRef(
|
|
73
|
+
(partition_id, timestamp, event.id))
|
|
74
|
+
|
|
75
|
+
async def query(self,
|
|
76
|
+
params: common.QueryTimeseriesParams
|
|
77
|
+
) -> common.QueryResult:
|
|
78
|
+
subscription = (common.create_subscription(params.event_types)
|
|
79
|
+
if params.event_types is not None else None)
|
|
80
|
+
|
|
81
|
+
max_results = (params.max_results
|
|
82
|
+
if params.max_results is not None and
|
|
83
|
+
params.max_results < self._max_results
|
|
84
|
+
else self._max_results)
|
|
85
|
+
|
|
86
|
+
for partition_id, partition in self._partitions.items():
|
|
87
|
+
if partition.order_by != params.order_by:
|
|
88
|
+
continue
|
|
89
|
+
|
|
90
|
+
if (subscription and
|
|
91
|
+
subscription.isdisjoint(partition.subscription)):
|
|
92
|
+
continue
|
|
93
|
+
|
|
94
|
+
return await self._query_partition(partition_id, params,
|
|
95
|
+
subscription, max_results)
|
|
96
|
+
|
|
97
|
+
return common.QueryResult(events=[],
|
|
98
|
+
more_follows=False)
|
|
99
|
+
|
|
100
|
+
def create_changes(self) -> Changes:
|
|
101
|
+
changes, self._changes = (self._changes,
|
|
102
|
+
collections.defaultdict(collections.deque))
|
|
103
|
+
return changes
|
|
104
|
+
|
|
105
|
+
def ext_write(self,
|
|
106
|
+
txn: lmdb.Transaction,
|
|
107
|
+
changes: Changes):
|
|
108
|
+
data = (((partition_id, timestamp, event.id), event)
|
|
109
|
+
for partition_id, partition_changes in changes.items()
|
|
110
|
+
for timestamp, event in partition_changes)
|
|
111
|
+
self._env.ext_write(txn, common.DbType.TIMESERIES_DATA, data)
|
|
112
|
+
|
|
113
|
+
counts = ((partition_id, len(partition_changes))
|
|
114
|
+
for partition_id, partition_changes in changes.items())
|
|
115
|
+
_ext_inc_partition_count(self._env, txn, counts)
|
|
116
|
+
|
|
117
|
+
def ext_cleanup(self,
|
|
118
|
+
txn: lmdb.Transaction,
|
|
119
|
+
now: common.Timestamp,
|
|
120
|
+
max_results: int | None = None,
|
|
121
|
+
) -> collections.deque[tuple[common.EventId,
|
|
122
|
+
common.EventRef]]:
|
|
123
|
+
result = collections.deque()
|
|
124
|
+
|
|
125
|
+
for partition_id, partition in self._partitions.items():
|
|
126
|
+
if not partition.limit:
|
|
127
|
+
continue
|
|
128
|
+
|
|
129
|
+
partition_max_results = (max_results - len(result)
|
|
130
|
+
if max_results is not None else None)
|
|
131
|
+
if partition_max_results is not None and partition_max_results < 1:
|
|
132
|
+
break
|
|
133
|
+
|
|
134
|
+
result.extend(_ext_cleanup_partition(self._env, txn, now,
|
|
135
|
+
partition_id, partition.limit,
|
|
136
|
+
partition_max_results))
|
|
137
|
+
|
|
138
|
+
return result
|
|
139
|
+
|
|
140
|
+
async def _query_partition(self, partition_id, params, subscription,
|
|
141
|
+
max_results):
|
|
142
|
+
events = collections.deque()
|
|
143
|
+
changes = self._changes
|
|
144
|
+
|
|
145
|
+
filter = _Filter(subscription=subscription,
|
|
146
|
+
t_from=params.t_from,
|
|
147
|
+
t_to=params.t_to,
|
|
148
|
+
source_t_from=params.source_t_from,
|
|
149
|
+
source_t_to=params.source_t_to,
|
|
150
|
+
max_results=max_results + 1,
|
|
151
|
+
last_event_id=params.last_event_id)
|
|
152
|
+
|
|
153
|
+
if params.order == common.Order.DESCENDING:
|
|
154
|
+
events.extend(_query_partition_changes(
|
|
155
|
+
changes[partition_id], params, filter))
|
|
156
|
+
|
|
157
|
+
if not filter.done:
|
|
158
|
+
events.extend(await self._env.execute(
|
|
159
|
+
_ext_query_partition_events, self._env, self._conditions,
|
|
160
|
+
partition_id, params, filter))
|
|
161
|
+
|
|
162
|
+
elif params.order == common.Order.ASCENDING:
|
|
163
|
+
events.extend(await self._env.execute(
|
|
164
|
+
_ext_query_partition_events, self._env, self._conditions,
|
|
165
|
+
partition_id, params, filter))
|
|
166
|
+
|
|
167
|
+
if not filter.done:
|
|
168
|
+
events.extend(_query_partition_changes(
|
|
169
|
+
changes[partition_id], params, filter))
|
|
170
|
+
|
|
171
|
+
else:
|
|
172
|
+
raise ValueError('unsupported order')
|
|
173
|
+
|
|
174
|
+
more_follows = len(events) > max_results
|
|
175
|
+
while len(events) > max_results:
|
|
176
|
+
events.pop()
|
|
177
|
+
|
|
178
|
+
return common.QueryResult(events=events,
|
|
179
|
+
more_follows=more_follows)
|
|
180
|
+
|
|
181
|
+
|
|
182
|
+
def _ext_init_partitions(env, txn, partitions):
|
|
183
|
+
db_data = dict(env.ext_read(txn, common.DbType.TIMESERIES_PARTITION))
|
|
184
|
+
next_partition_ids = itertools.count(max(db_data.keys(), default=0) + 1)
|
|
185
|
+
|
|
186
|
+
for partition in partitions:
|
|
187
|
+
event_types = sorted(partition.subscription.get_query_types())
|
|
188
|
+
partition_data = {'order': partition.order_by.value,
|
|
189
|
+
'subscriptions': [list(i) for i in event_types]}
|
|
190
|
+
|
|
191
|
+
for partition_id, i in db_data.items():
|
|
192
|
+
if i == partition_data:
|
|
193
|
+
break
|
|
194
|
+
|
|
195
|
+
else:
|
|
196
|
+
partition_id = next(next_partition_ids)
|
|
197
|
+
db_data[partition_id] = partition_data
|
|
198
|
+
env.ext_write(txn, common.DbType.TIMESERIES_PARTITION,
|
|
199
|
+
[(partition_id, partition_data)])
|
|
200
|
+
|
|
201
|
+
yield partition_id, partition
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
def _ext_query_partition_events(env, conditions, partition_id, params,
|
|
205
|
+
filter):
|
|
206
|
+
if params.order_by == common.OrderBy.TIMESTAMP:
|
|
207
|
+
events = _ext_query_partition_events_range(
|
|
208
|
+
env, partition_id, params.t_from, params.t_to, params.order)
|
|
209
|
+
|
|
210
|
+
elif params.order_by == common.OrderBy.SOURCE_TIMESTAMP:
|
|
211
|
+
events = _ext_query_partition_events_range(
|
|
212
|
+
env, partition_id, params.source_t_from, params.source_t_to,
|
|
213
|
+
params.order)
|
|
214
|
+
|
|
215
|
+
else:
|
|
216
|
+
raise ValueError('unsupported order by')
|
|
217
|
+
|
|
218
|
+
events = (event for event in events if conditions.matches(event))
|
|
219
|
+
events = filter.process(events)
|
|
220
|
+
return collections.deque(events)
|
|
221
|
+
|
|
222
|
+
|
|
223
|
+
def _ext_query_partition_events_range(env, partition_id, t_from, t_to, order):
|
|
224
|
+
db_def = common.db_defs[common.DbType.TIMESERIES_DATA]
|
|
225
|
+
|
|
226
|
+
if not t_from:
|
|
227
|
+
t_from = common.min_timestamp
|
|
228
|
+
|
|
229
|
+
from_key = partition_id, t_from, common.EventId(0, 0, 0)
|
|
230
|
+
encoded_from_key = db_def.encode_key(from_key)
|
|
231
|
+
|
|
232
|
+
if not t_to:
|
|
233
|
+
t_to = common.max_timestamp
|
|
234
|
+
|
|
235
|
+
to_key = (partition_id,
|
|
236
|
+
t_to,
|
|
237
|
+
common.EventId((1 << 64) - 1, (1 << 64) - 1, (1 << 64) - 1))
|
|
238
|
+
encoded_to_key = db_def.encode_key(to_key)
|
|
239
|
+
|
|
240
|
+
with env.ext_begin() as txn:
|
|
241
|
+
with env.ext_cursor(txn, common.DbType.TIMESERIES_DATA) as cursor:
|
|
242
|
+
if order == common.Order.DESCENDING:
|
|
243
|
+
encoded_start_key, encoded_stop_key = (encoded_to_key,
|
|
244
|
+
encoded_from_key)
|
|
245
|
+
|
|
246
|
+
if cursor.set_range(encoded_start_key):
|
|
247
|
+
more = cursor.prev()
|
|
248
|
+
else:
|
|
249
|
+
more = cursor.last()
|
|
250
|
+
|
|
251
|
+
while more and encoded_stop_key <= bytes(cursor.key()):
|
|
252
|
+
yield db_def.decode_value(cursor.value())
|
|
253
|
+
more = cursor.prev()
|
|
254
|
+
|
|
255
|
+
elif order == common.Order.ASCENDING:
|
|
256
|
+
encoded_start_key, encoded_stop_key = (encoded_from_key,
|
|
257
|
+
encoded_to_key)
|
|
258
|
+
|
|
259
|
+
more = cursor.set_range(encoded_start_key)
|
|
260
|
+
|
|
261
|
+
while more and bytes(cursor.key()) <= encoded_stop_key:
|
|
262
|
+
yield db_def.decode_value(cursor.value())
|
|
263
|
+
more = cursor.next()
|
|
264
|
+
|
|
265
|
+
else:
|
|
266
|
+
raise ValueError('unsupported order')
|
|
267
|
+
|
|
268
|
+
|
|
269
|
+
def _ext_get_partition_count(env, txn, partition_id):
|
|
270
|
+
db_def = common.db_defs[common.DbType.TIMESERIES_COUNT]
|
|
271
|
+
|
|
272
|
+
with env.ext_cursor(txn, common.DbType.TIMESERIES_COUNT) as cursor:
|
|
273
|
+
encoded_key = db_def.encode_key(partition_id)
|
|
274
|
+
encoded_value = cursor.get(encoded_key)
|
|
275
|
+
|
|
276
|
+
return db_def.decode_value(encoded_value) if encoded_value else 0
|
|
277
|
+
|
|
278
|
+
|
|
279
|
+
def _ext_set_partition_count(env, txn, partition_id, count):
|
|
280
|
+
env.ext_write(txn, common.DbType.TIMESERIES_COUNT, [(partition_id, count)])
|
|
281
|
+
|
|
282
|
+
|
|
283
|
+
def _ext_inc_partition_count(env, txn, partition_counts):
|
|
284
|
+
db_def = common.db_defs[common.DbType.TIMESERIES_COUNT]
|
|
285
|
+
|
|
286
|
+
with env.ext_cursor(txn, common.DbType.TIMESERIES_COUNT) as cursor:
|
|
287
|
+
for partition_id, count in partition_counts:
|
|
288
|
+
encoded_key = db_def.encode_key(partition_id)
|
|
289
|
+
encoded_value = cursor.get(encoded_key)
|
|
290
|
+
|
|
291
|
+
value = db_def.decode_value(encoded_value) if encoded_value else 0
|
|
292
|
+
inc_value = value + count
|
|
293
|
+
|
|
294
|
+
encoded_value = db_def.encode_value(inc_value)
|
|
295
|
+
cursor.put(encoded_key, encoded_value)
|
|
296
|
+
|
|
297
|
+
|
|
298
|
+
def _ext_cleanup_partition(env, txn, now, partition_id, limit, max_results):
|
|
299
|
+
db_def = common.db_defs[common.DbType.TIMESERIES_DATA]
|
|
300
|
+
|
|
301
|
+
timestamp = common.min_timestamp
|
|
302
|
+
start_key = (partition_id,
|
|
303
|
+
timestamp,
|
|
304
|
+
common.EventId(0, 0, 0))
|
|
305
|
+
stop_key = ((partition_id + 1),
|
|
306
|
+
timestamp,
|
|
307
|
+
common.EventId(0, 0, 0))
|
|
308
|
+
|
|
309
|
+
encoded_start_key = db_def.encode_key(start_key)
|
|
310
|
+
encoded_stop_key = db_def.encode_key(stop_key)
|
|
311
|
+
|
|
312
|
+
min_entries = limit.min_entries or 0
|
|
313
|
+
max_entries = None
|
|
314
|
+
encoded_duration_key = None
|
|
315
|
+
|
|
316
|
+
if limit.size is not None:
|
|
317
|
+
stat = env.ext_stat(txn, common.DbType.TIMESERIES_DATA)
|
|
318
|
+
|
|
319
|
+
if stat['entries']:
|
|
320
|
+
total_size = stat['psize'] * (stat['branch_pages'] +
|
|
321
|
+
stat['leaf_pages'] +
|
|
322
|
+
stat['overflow_pages'])
|
|
323
|
+
entry_size = total_size / stat['entries']
|
|
324
|
+
max_entries = int(limit.size / entry_size)
|
|
325
|
+
|
|
326
|
+
if limit.max_entries is not None:
|
|
327
|
+
max_entries = (limit.max_entries if max_entries is None
|
|
328
|
+
else min(max_entries, limit.max_entries))
|
|
329
|
+
|
|
330
|
+
if limit.duration is not None:
|
|
331
|
+
duration_key = (partition_id,
|
|
332
|
+
now.add(-limit.duration),
|
|
333
|
+
common.EventId(0, 0, 0))
|
|
334
|
+
encoded_duration_key = db_def.encode_key(duration_key)
|
|
335
|
+
|
|
336
|
+
result_count = 0
|
|
337
|
+
entries_count = _ext_get_partition_count(env, txn, partition_id)
|
|
338
|
+
|
|
339
|
+
with env.ext_cursor(txn, common.DbType.TIMESERIES_DATA) as cursor:
|
|
340
|
+
more = cursor.set_range(encoded_start_key)
|
|
341
|
+
while more:
|
|
342
|
+
if max_results is not None and result_count >= max_results:
|
|
343
|
+
break
|
|
344
|
+
|
|
345
|
+
if entries_count - result_count <= min_entries:
|
|
346
|
+
break
|
|
347
|
+
|
|
348
|
+
encoded_key = bytes(cursor.key())
|
|
349
|
+
if encoded_key >= encoded_stop_key:
|
|
350
|
+
break
|
|
351
|
+
|
|
352
|
+
if ((max_entries is None or
|
|
353
|
+
entries_count - result_count <= max_entries) and
|
|
354
|
+
(encoded_duration_key is None or
|
|
355
|
+
encoded_key >= encoded_duration_key)):
|
|
356
|
+
break
|
|
357
|
+
|
|
358
|
+
key = db_def.decode_key(encoded_key)
|
|
359
|
+
event_id = key[2]
|
|
360
|
+
|
|
361
|
+
more = cursor.delete()
|
|
362
|
+
result_count += 1
|
|
363
|
+
|
|
364
|
+
yield event_id, common.TimeseriesEventRef(key)
|
|
365
|
+
|
|
366
|
+
if result_count > 0:
|
|
367
|
+
_ext_set_partition_count(env, txn, partition_id,
|
|
368
|
+
entries_count - result_count)
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
def _query_partition_changes(changes, params, filter):
|
|
372
|
+
if params.order == common.Order.DESCENDING:
|
|
373
|
+
events = (event for _, event in reversed(changes))
|
|
374
|
+
|
|
375
|
+
if (params.order_by == common.OrderBy.TIMESTAMP and
|
|
376
|
+
params.t_to is not None):
|
|
377
|
+
events = itertools.dropwhile(
|
|
378
|
+
lambda i: params.t_to < i.timestamp,
|
|
379
|
+
events)
|
|
380
|
+
|
|
381
|
+
elif (params.order_by == common.OrderBy.SOURCE_TIMESTAMP and
|
|
382
|
+
params.source_t_to is not None):
|
|
383
|
+
events = itertools.dropwhile(
|
|
384
|
+
lambda i: params.source_t_to < i.source_timestamp,
|
|
385
|
+
events)
|
|
386
|
+
|
|
387
|
+
if (params.order_by == common.OrderBy.TIMESTAMP and
|
|
388
|
+
params.t_from is not None):
|
|
389
|
+
events = itertools.takewhile(
|
|
390
|
+
lambda i: params.t_from <= i.timestamp,
|
|
391
|
+
events)
|
|
392
|
+
|
|
393
|
+
elif (params.order_by == common.OrderBy.SOURCE_TIMESTAMP and
|
|
394
|
+
params.source_t_from is not None):
|
|
395
|
+
events = itertools.takewhile(
|
|
396
|
+
lambda i: params.source_t_from <= i.source_timestamp,
|
|
397
|
+
events)
|
|
398
|
+
|
|
399
|
+
elif params.order == common.Order.ASCENDING:
|
|
400
|
+
events = (event for _, event in changes)
|
|
401
|
+
|
|
402
|
+
if (params.order_by == common.OrderBy.TIMESTAMP and
|
|
403
|
+
params.t_from is not None):
|
|
404
|
+
events = itertools.dropwhile(
|
|
405
|
+
lambda i: i.timestamp < params.t_from,
|
|
406
|
+
events)
|
|
407
|
+
|
|
408
|
+
elif (params.order_by == common.OrderBy.SOURCE_TIMESTAMP and
|
|
409
|
+
params.source_t_from is not None):
|
|
410
|
+
events = itertools.dropwhile(
|
|
411
|
+
lambda i: i.source_timestamp < params.source_t_from,
|
|
412
|
+
events)
|
|
413
|
+
|
|
414
|
+
if (params.order_by == common.OrderBy.TIMESTAMP and
|
|
415
|
+
params.t_to is not None):
|
|
416
|
+
events = itertools.takewhile(
|
|
417
|
+
lambda i: i.timestamp <= params.t_to,
|
|
418
|
+
events)
|
|
419
|
+
|
|
420
|
+
elif (params.order_by == common.OrderBy.SOURCE_TIMESTAMP and
|
|
421
|
+
params.source_t_to is not None):
|
|
422
|
+
events = itertools.takewhile(
|
|
423
|
+
lambda i: i.source_timestamp <= params.source_t_to,
|
|
424
|
+
events)
|
|
425
|
+
|
|
426
|
+
else:
|
|
427
|
+
raise ValueError('unsupported order')
|
|
428
|
+
|
|
429
|
+
return filter.process(events)
|
|
430
|
+
|
|
431
|
+
|
|
432
|
+
class _Filter:
|
|
433
|
+
|
|
434
|
+
def __init__(self,
|
|
435
|
+
subscription: common.Subscription,
|
|
436
|
+
t_from: common.Timestamp | None,
|
|
437
|
+
t_to: common.Timestamp | None,
|
|
438
|
+
source_t_from: common.Timestamp | None,
|
|
439
|
+
source_t_to: common.Timestamp | None,
|
|
440
|
+
max_results: int,
|
|
441
|
+
last_event_id: common.EventId | None):
|
|
442
|
+
self._subscription = subscription
|
|
443
|
+
self._t_from = t_from
|
|
444
|
+
self._t_to = t_to
|
|
445
|
+
self._source_t_from = source_t_from
|
|
446
|
+
self._source_t_to = source_t_to
|
|
447
|
+
self._max_results = max_results
|
|
448
|
+
self._last_event_id = last_event_id
|
|
449
|
+
|
|
450
|
+
@property
|
|
451
|
+
def done(self):
|
|
452
|
+
return self._max_results < 1
|
|
453
|
+
|
|
454
|
+
def process(self, events: Iterable[common.Event]):
|
|
455
|
+
for event in events:
|
|
456
|
+
if self._max_results < 1:
|
|
457
|
+
return
|
|
458
|
+
|
|
459
|
+
if self._last_event_id:
|
|
460
|
+
if event.id == self._last_event_id:
|
|
461
|
+
self._last_event_id = None
|
|
462
|
+
|
|
463
|
+
continue
|
|
464
|
+
|
|
465
|
+
if self._t_from is not None and event.timestamp < self._t_from:
|
|
466
|
+
continue
|
|
467
|
+
|
|
468
|
+
if self._t_to is not None and self._t_to < event.timestamp:
|
|
469
|
+
continue
|
|
470
|
+
|
|
471
|
+
if self._source_t_from is not None and (
|
|
472
|
+
event.source_timestamp is None or
|
|
473
|
+
event.source_timestamp < self._source_t_from):
|
|
474
|
+
continue
|
|
475
|
+
|
|
476
|
+
if self._source_t_to is not None and (
|
|
477
|
+
event.source_timestamp is None or
|
|
478
|
+
self._source_t_to < event.source_timestamp):
|
|
479
|
+
continue
|
|
480
|
+
|
|
481
|
+
if (self._subscription and
|
|
482
|
+
not self._subscription.matches(event.type)):
|
|
483
|
+
continue
|
|
484
|
+
|
|
485
|
+
self._max_results -= 1
|
|
486
|
+
yield event
|
|
@@ -0,0 +1,178 @@
|
|
|
1
|
+
"""Simple memory backend
|
|
2
|
+
|
|
3
|
+
All registered events are stored in single unsorted continuous event list.
|
|
4
|
+
|
|
5
|
+
"""
|
|
6
|
+
|
|
7
|
+
import collections
|
|
8
|
+
|
|
9
|
+
from hat import aio
|
|
10
|
+
|
|
11
|
+
from hat.event import common
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
class MemoryBackend(common.Backend):
|
|
15
|
+
|
|
16
|
+
def __init__(self, conf, registered_events_cb, flushed_events_cb):
|
|
17
|
+
self._registered_events_cb = registered_events_cb
|
|
18
|
+
self._flushed_events_cb = flushed_events_cb
|
|
19
|
+
self._async_group = aio.Group()
|
|
20
|
+
self._events = collections.deque()
|
|
21
|
+
|
|
22
|
+
@property
|
|
23
|
+
def async_group(self):
|
|
24
|
+
return self._async_group
|
|
25
|
+
|
|
26
|
+
async def get_last_event_id(self, server_id):
|
|
27
|
+
event_ids = (e.event_id for e in self._events
|
|
28
|
+
if e.server == server_id)
|
|
29
|
+
default = common.EventId(server=server_id, session=0, instance=0)
|
|
30
|
+
return max(event_ids, default=default)
|
|
31
|
+
|
|
32
|
+
async def register(self, events):
|
|
33
|
+
self._events.extend(events)
|
|
34
|
+
|
|
35
|
+
if self._registered_events_cb:
|
|
36
|
+
await aio.call(self._registered_events_cb, events)
|
|
37
|
+
|
|
38
|
+
if self._flushed_events_cb:
|
|
39
|
+
await aio.call(self._flushed_events_cb, events)
|
|
40
|
+
|
|
41
|
+
return events
|
|
42
|
+
|
|
43
|
+
async def query(self, params):
|
|
44
|
+
if isinstance(params, common.QueryLatestParams):
|
|
45
|
+
return self._query_latest(params)
|
|
46
|
+
|
|
47
|
+
if isinstance(params, common.QueryTimeseriesParams):
|
|
48
|
+
return self._query_timeseries(params)
|
|
49
|
+
|
|
50
|
+
if isinstance(params, common.QueryServerParams):
|
|
51
|
+
return self._query_server(params)
|
|
52
|
+
|
|
53
|
+
raise ValueError('unsupported params type')
|
|
54
|
+
|
|
55
|
+
async def flush(self):
|
|
56
|
+
pass
|
|
57
|
+
|
|
58
|
+
def _query_latest(self, params):
|
|
59
|
+
events = self._events
|
|
60
|
+
|
|
61
|
+
if params.event_types is not None:
|
|
62
|
+
events = _filter_event_types(events, params.event_types)
|
|
63
|
+
|
|
64
|
+
result = {}
|
|
65
|
+
for event in events:
|
|
66
|
+
previous = result.get(event.type)
|
|
67
|
+
if previous is None or previous < event:
|
|
68
|
+
result[event.type] = event
|
|
69
|
+
|
|
70
|
+
return common.QueryResult(events=list(result.values()),
|
|
71
|
+
more_follows=False)
|
|
72
|
+
|
|
73
|
+
def _query_timeseries(self, params):
|
|
74
|
+
events = self._events
|
|
75
|
+
|
|
76
|
+
if params.event_types is not None:
|
|
77
|
+
events = _filter_event_types(events, params.event_types)
|
|
78
|
+
|
|
79
|
+
if params.t_from is not None:
|
|
80
|
+
events = _filter_t_from(events, params.t_from)
|
|
81
|
+
|
|
82
|
+
if params.t_to is not None:
|
|
83
|
+
events = _filter_t_to(events, params.t_to)
|
|
84
|
+
|
|
85
|
+
if params.source_t_from is not None:
|
|
86
|
+
events = _filter_source_t_from(events, params.source_t_from)
|
|
87
|
+
|
|
88
|
+
if params.source_t_to is not None:
|
|
89
|
+
events = _filter_source_t_to(events, params.source_t_to)
|
|
90
|
+
|
|
91
|
+
if params.order_by == common.OrderBy.TIMESTAMP:
|
|
92
|
+
sort_key = lambda event: event.timestamp, event # NOQA
|
|
93
|
+
elif params.order_by == common.OrderBy.SOURCE_TIMESTAMP:
|
|
94
|
+
sort_key = lambda event: event.source_timestamp, event # NOQA
|
|
95
|
+
else:
|
|
96
|
+
raise ValueError('invalid order by')
|
|
97
|
+
|
|
98
|
+
if params.order == common.Order.ASCENDING:
|
|
99
|
+
sort_reverse = False
|
|
100
|
+
elif params.order == common.Order.DESCENDING:
|
|
101
|
+
sort_reverse = True
|
|
102
|
+
else:
|
|
103
|
+
raise ValueError('invalid order by')
|
|
104
|
+
|
|
105
|
+
events = sorted(events, key=sort_key, reverse=sort_reverse)
|
|
106
|
+
|
|
107
|
+
if params.last_event_id and events:
|
|
108
|
+
for i, event in enumerate(events):
|
|
109
|
+
if event.id == params.last_event_id:
|
|
110
|
+
break
|
|
111
|
+
events = events[i+1:]
|
|
112
|
+
|
|
113
|
+
if params.max_results is not None and len(events) > params.max_results:
|
|
114
|
+
more_follows = True
|
|
115
|
+
events = events[:params.max_results]
|
|
116
|
+
else:
|
|
117
|
+
more_follows = False
|
|
118
|
+
|
|
119
|
+
return common.QueryResult(events=events,
|
|
120
|
+
more_follows=more_follows)
|
|
121
|
+
|
|
122
|
+
def _query_server(self, params):
|
|
123
|
+
events = sorted(_filter_server_id(self._events, params.server_id))
|
|
124
|
+
|
|
125
|
+
if params.last_event_id and events:
|
|
126
|
+
for i, event in enumerate(events):
|
|
127
|
+
if event.id > params.last_event_id:
|
|
128
|
+
break
|
|
129
|
+
events = events[i:]
|
|
130
|
+
|
|
131
|
+
if params.max_results is not None and len(events) > params.max_results:
|
|
132
|
+
more_follows = True
|
|
133
|
+
events = events[:params.max_results]
|
|
134
|
+
else:
|
|
135
|
+
more_follows = False
|
|
136
|
+
|
|
137
|
+
return common.QueryResult(events=events,
|
|
138
|
+
more_follows=more_follows)
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
info = common.BackendInfo(MemoryBackend)
|
|
142
|
+
|
|
143
|
+
|
|
144
|
+
def _filter_event_types(events, event_types):
|
|
145
|
+
subscription = common.create_subscription(event_types)
|
|
146
|
+
for event in events:
|
|
147
|
+
if subscription.matches(event.type):
|
|
148
|
+
yield event
|
|
149
|
+
|
|
150
|
+
|
|
151
|
+
def _filter_t_from(events, t_from):
|
|
152
|
+
for event in events:
|
|
153
|
+
if event.timestamp >= t_from:
|
|
154
|
+
yield event
|
|
155
|
+
|
|
156
|
+
|
|
157
|
+
def _filter_t_to(events, t_to):
|
|
158
|
+
for event in events:
|
|
159
|
+
if event.timestamp <= t_to:
|
|
160
|
+
yield event
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
def _filter_source_t_from(events, source_t_from):
|
|
164
|
+
for event in events:
|
|
165
|
+
if event.source_timestamp >= source_t_from:
|
|
166
|
+
yield event
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
def _filter_source_t_to(events, source_t_to):
|
|
170
|
+
for event in events:
|
|
171
|
+
if event.source_timestamp <= source_t_to:
|
|
172
|
+
yield event
|
|
173
|
+
|
|
174
|
+
|
|
175
|
+
def _filter_server_id(events, server_id):
|
|
176
|
+
for event in events:
|
|
177
|
+
if event.event_id.server == server_id:
|
|
178
|
+
yield event
|