eventsourcing 9.5.0b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eventsourcing/__init__.py +0 -0
- eventsourcing/application.py +998 -0
- eventsourcing/cipher.py +107 -0
- eventsourcing/compressor.py +15 -0
- eventsourcing/cryptography.py +91 -0
- eventsourcing/dcb/__init__.py +0 -0
- eventsourcing/dcb/api.py +144 -0
- eventsourcing/dcb/application.py +159 -0
- eventsourcing/dcb/domain.py +369 -0
- eventsourcing/dcb/msgpack.py +38 -0
- eventsourcing/dcb/persistence.py +193 -0
- eventsourcing/dcb/popo.py +178 -0
- eventsourcing/dcb/postgres_tt.py +704 -0
- eventsourcing/dcb/tests.py +608 -0
- eventsourcing/dispatch.py +80 -0
- eventsourcing/domain.py +1964 -0
- eventsourcing/interface.py +164 -0
- eventsourcing/persistence.py +1429 -0
- eventsourcing/popo.py +267 -0
- eventsourcing/postgres.py +1441 -0
- eventsourcing/projection.py +502 -0
- eventsourcing/py.typed +0 -0
- eventsourcing/sqlite.py +816 -0
- eventsourcing/system.py +1203 -0
- eventsourcing/tests/__init__.py +3 -0
- eventsourcing/tests/application.py +483 -0
- eventsourcing/tests/domain.py +105 -0
- eventsourcing/tests/persistence.py +1744 -0
- eventsourcing/tests/postgres_utils.py +131 -0
- eventsourcing/utils.py +257 -0
- eventsourcing-9.5.0b3.dist-info/METADATA +253 -0
- eventsourcing-9.5.0b3.dist-info/RECORD +35 -0
- eventsourcing-9.5.0b3.dist-info/WHEEL +4 -0
- eventsourcing-9.5.0b3.dist-info/licenses/AUTHORS +10 -0
- eventsourcing-9.5.0b3.dist-info/licenses/LICENSE +29 -0
|
@@ -0,0 +1,704 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import threading
|
|
4
|
+
from typing import TYPE_CHECKING, Any, NamedTuple
|
|
5
|
+
|
|
6
|
+
from psycopg.generators import notifies
|
|
7
|
+
from psycopg.sql import SQL, Composed, Identifier
|
|
8
|
+
|
|
9
|
+
from eventsourcing.dcb.api import (
|
|
10
|
+
DCBAppendCondition,
|
|
11
|
+
DCBEvent,
|
|
12
|
+
DCBQuery,
|
|
13
|
+
DCBQueryItem,
|
|
14
|
+
DCBReadResponse,
|
|
15
|
+
DCBRecorder,
|
|
16
|
+
DCBSequencedEvent,
|
|
17
|
+
)
|
|
18
|
+
from eventsourcing.dcb.persistence import (
|
|
19
|
+
DCBInfrastructureFactory,
|
|
20
|
+
DCBListenNotifySubscription,
|
|
21
|
+
)
|
|
22
|
+
from eventsourcing.dcb.popo import SimpleDCBReadResponse
|
|
23
|
+
from eventsourcing.persistence import IntegrityError, InternalError, ProgrammingError
|
|
24
|
+
from eventsourcing.postgres import (
|
|
25
|
+
NO_TRACEBACK,
|
|
26
|
+
BasePostgresFactory,
|
|
27
|
+
PostgresDatastore,
|
|
28
|
+
PostgresRecorder,
|
|
29
|
+
PostgresTrackingRecorder,
|
|
30
|
+
)
|
|
31
|
+
|
|
32
|
+
if TYPE_CHECKING:
|
|
33
|
+
from collections.abc import Sequence
|
|
34
|
+
|
|
35
|
+
from psycopg import Connection, Cursor
|
|
36
|
+
from psycopg.abc import Params
|
|
37
|
+
from psycopg.rows import DictRow
|
|
38
|
+
|
|
39
|
+
DB_TYPE_NAME_DCB_EVENT_TT = "dcb_event_tt"
|
|
40
|
+
|
|
41
|
+
DB_TYPE_DCB_EVENT = SQL("""
|
|
42
|
+
CREATE TYPE {schema}.{event_type} AS (
|
|
43
|
+
type text,
|
|
44
|
+
data bytea,
|
|
45
|
+
tags text[]
|
|
46
|
+
)
|
|
47
|
+
""")
|
|
48
|
+
|
|
49
|
+
DB_TYPE_NAME_DCB_QUERY_ITEM_TT = "dcb_query_item_tt"
|
|
50
|
+
|
|
51
|
+
DB_TYPE_DCB_QUERY_ITEM = SQL("""
|
|
52
|
+
CREATE TYPE {schema}.{query_item_type} AS (
|
|
53
|
+
types text[],
|
|
54
|
+
tags text[]
|
|
55
|
+
)
|
|
56
|
+
""")
|
|
57
|
+
|
|
58
|
+
DB_TABLE_DCB_EVENTS = SQL("""
|
|
59
|
+
CREATE TABLE IF NOT EXISTS {schema}.{events_table} (
|
|
60
|
+
id bigserial,
|
|
61
|
+
type text NOT NULL ,
|
|
62
|
+
data bytea,
|
|
63
|
+
tags text[] NOT NULL
|
|
64
|
+
) WITH (
|
|
65
|
+
autovacuum_enabled = true,
|
|
66
|
+
autovacuum_vacuum_threshold = 100000000, -- Effectively disables VACUUM
|
|
67
|
+
autovacuum_vacuum_scale_factor = 0.5, -- Same here, high scale factor
|
|
68
|
+
autovacuum_analyze_threshold = 1000, -- Triggers ANALYZE more often
|
|
69
|
+
autovacuum_analyze_scale_factor = 0.01 -- Triggers after 1% new rows
|
|
70
|
+
)
|
|
71
|
+
""")
|
|
72
|
+
|
|
73
|
+
DB_INDEX_UNIQUE_ID_COVER_TYPE = SQL("""
|
|
74
|
+
CREATE UNIQUE INDEX IF NOT EXISTS {id_cover_type_index} ON
|
|
75
|
+
{schema}.{events_table} (id) INCLUDE (type)
|
|
76
|
+
""")
|
|
77
|
+
|
|
78
|
+
DB_TABLE_DCB_TAGS = SQL("""
|
|
79
|
+
CREATE TABLE IF NOT EXISTS {schema}.{tags_table} (
|
|
80
|
+
tag text,
|
|
81
|
+
main_id bigint REFERENCES {events_table} (id)
|
|
82
|
+
) WITH (
|
|
83
|
+
autovacuum_enabled = true,
|
|
84
|
+
autovacuum_vacuum_threshold = 100000000, -- Effectively disables VACUUM
|
|
85
|
+
autovacuum_vacuum_scale_factor = 0.5, -- Same here, high scale factor
|
|
86
|
+
autovacuum_analyze_threshold = 1000, -- Triggers ANALYZE more often
|
|
87
|
+
autovacuum_analyze_scale_factor = 0.01 -- Triggers after 1% new rows
|
|
88
|
+
)
|
|
89
|
+
""")
|
|
90
|
+
|
|
91
|
+
DB_INDEX_TAG_MAIN_ID = SQL("""
|
|
92
|
+
CREATE INDEX IF NOT EXISTS {tag_main_id_index} ON
|
|
93
|
+
{schema}.{tags_table} (tag, main_id)
|
|
94
|
+
""")
|
|
95
|
+
|
|
96
|
+
SQL_SELECT_ALL = SQL("""
|
|
97
|
+
SELECT * FROM {schema}.{events_table}
|
|
98
|
+
WHERE id > COALESCE(%(after)s, 0)
|
|
99
|
+
ORDER BY id ASC
|
|
100
|
+
LIMIT COALESCE(%(limit)s, 9223372036854775807)
|
|
101
|
+
""")
|
|
102
|
+
|
|
103
|
+
SQL_SELECT_EVENTS_BY_TYPE = SQL("""
|
|
104
|
+
SELECT * FROM {schema}.{events_table}
|
|
105
|
+
WHERE type = %(event_type)s
|
|
106
|
+
AND id > COALESCE(%(after)s, 0)
|
|
107
|
+
ORDER BY id ASC
|
|
108
|
+
LIMIT COALESCE(%(limit)s, 9223372036854775807)
|
|
109
|
+
""")
|
|
110
|
+
|
|
111
|
+
SQL_SELECT_MAX_ID = SQL("""
|
|
112
|
+
SELECT MAX(id) FROM {schema}.{events_table}
|
|
113
|
+
""")
|
|
114
|
+
|
|
115
|
+
SQL_SELECT_BY_TAGS = SQL("""
|
|
116
|
+
WITH query_items AS (
|
|
117
|
+
SELECT * FROM unnest(
|
|
118
|
+
%(query_items)s::{schema}.{query_item_type}[]
|
|
119
|
+
) WITH ORDINALITY
|
|
120
|
+
),
|
|
121
|
+
initial_matches AS (
|
|
122
|
+
SELECT
|
|
123
|
+
t.main_id,
|
|
124
|
+
qi.ordinality,
|
|
125
|
+
t.tag,
|
|
126
|
+
qi.tags AS required_tags,
|
|
127
|
+
qi.types AS allowed_types
|
|
128
|
+
FROM query_items qi
|
|
129
|
+
JOIN {schema}.{tags_table} t
|
|
130
|
+
ON t.tag = ANY(qi.tags)
|
|
131
|
+
WHERE t.main_id > COALESCE(%(after)s, 0)
|
|
132
|
+
),
|
|
133
|
+
matched_groups AS (
|
|
134
|
+
SELECT
|
|
135
|
+
main_id,
|
|
136
|
+
ordinality,
|
|
137
|
+
COUNT(DISTINCT tag) AS matched_tag_count,
|
|
138
|
+
array_length(required_tags, 1) AS required_tag_count,
|
|
139
|
+
allowed_types
|
|
140
|
+
FROM initial_matches
|
|
141
|
+
GROUP BY main_id, ordinality, required_tag_count, allowed_types
|
|
142
|
+
),
|
|
143
|
+
qualified_ids AS (
|
|
144
|
+
SELECT main_id, allowed_types
|
|
145
|
+
FROM matched_groups
|
|
146
|
+
WHERE matched_tag_count = required_tag_count
|
|
147
|
+
),
|
|
148
|
+
filtered_ids AS (
|
|
149
|
+
SELECT m.id
|
|
150
|
+
FROM {schema}.{events_table} m
|
|
151
|
+
JOIN qualified_ids q ON q.main_id = m.id
|
|
152
|
+
WHERE
|
|
153
|
+
m.id > COALESCE(%(after)s, 0)
|
|
154
|
+
AND (
|
|
155
|
+
array_length(q.allowed_types, 1) IS NULL
|
|
156
|
+
OR array_length(q.allowed_types, 1) = 0
|
|
157
|
+
OR m.type = ANY(q.allowed_types)
|
|
158
|
+
)
|
|
159
|
+
ORDER BY m.id ASC
|
|
160
|
+
LIMIT COALESCE(%(limit)s, 9223372036854775807)
|
|
161
|
+
)
|
|
162
|
+
SELECT *
|
|
163
|
+
FROM {schema}.{events_table} m
|
|
164
|
+
WHERE m.id IN (SELECT id FROM filtered_ids)
|
|
165
|
+
ORDER BY m.id ASC;
|
|
166
|
+
""")
|
|
167
|
+
|
|
168
|
+
|
|
169
|
+
SQL_UNCONDITIONAL_APPEND = SQL("""
|
|
170
|
+
SELECT * FROM {schema}.{unconditional_append}(%(events)s)
|
|
171
|
+
""")
|
|
172
|
+
DB_FUNCTION_NAME_DCB_UNCONDITIONAL_APPEND_TT = "dcb_unconditional_append_tt"
|
|
173
|
+
DB_FUNCTION_UNCONDITIONAL_APPEND = SQL("""
|
|
174
|
+
CREATE OR REPLACE FUNCTION {schema}.{unconditional_append}(
|
|
175
|
+
new_events {schema}.{event_type}[]
|
|
176
|
+
) RETURNS SETOF bigint
|
|
177
|
+
LANGUAGE plpgsql AS $$
|
|
178
|
+
BEGIN
|
|
179
|
+
RETURN QUERY
|
|
180
|
+
WITH new_data AS (
|
|
181
|
+
SELECT * FROM unnest(new_events)
|
|
182
|
+
),
|
|
183
|
+
inserted AS (
|
|
184
|
+
INSERT INTO {schema}.{events_table} (type, data, tags)
|
|
185
|
+
SELECT type, data, tags
|
|
186
|
+
FROM new_data
|
|
187
|
+
RETURNING id, tags
|
|
188
|
+
),
|
|
189
|
+
expanded_tags AS (
|
|
190
|
+
SELECT ins.id AS main_id, tag
|
|
191
|
+
FROM inserted ins,
|
|
192
|
+
unnest(ins.tags) AS tag
|
|
193
|
+
),
|
|
194
|
+
tag_insert AS (
|
|
195
|
+
INSERT INTO {schema}.{tags_table} (tag, main_id)
|
|
196
|
+
SELECT tag, main_id
|
|
197
|
+
FROM expanded_tags
|
|
198
|
+
)
|
|
199
|
+
SELECT MAX(id) FROM inserted;
|
|
200
|
+
NOTIFY {channel};
|
|
201
|
+
|
|
202
|
+
END
|
|
203
|
+
$$;
|
|
204
|
+
""")
|
|
205
|
+
|
|
206
|
+
SQL_CONDITIONAL_APPEND = SQL("""
|
|
207
|
+
SELECT * FROM {schema}.{conditional_append}(%(query_items)s, %(after)s, %(events)s)
|
|
208
|
+
""")
|
|
209
|
+
DB_FUNCTION_NAME_DCB_CONDITIONAL_APPEND_TT = "dcb_conditional_append_tt"
|
|
210
|
+
DB_FUNCTION_CONDITIONAL_APPEND = SQL("""
|
|
211
|
+
CREATE OR REPLACE FUNCTION {schema}.{conditional_append}(
|
|
212
|
+
query_items {schema}.{query_item_type}[],
|
|
213
|
+
after_id bigint,
|
|
214
|
+
new_events {schema}.{event_type}[]
|
|
215
|
+
) RETURNS SETOF bigint
|
|
216
|
+
LANGUAGE plpgsql AS $$
|
|
217
|
+
DECLARE
|
|
218
|
+
conflict_exists boolean;
|
|
219
|
+
BEGIN
|
|
220
|
+
-- Step 0: Lock table in exclusive mode (reads can still read)
|
|
221
|
+
SET LOCAL lock_timeout = '{lock_timeout}s';
|
|
222
|
+
LOCK TABLE {schema}.{events_table} IN EXCLUSIVE MODE;
|
|
223
|
+
|
|
224
|
+
-- Step 1: Check for conflicts
|
|
225
|
+
WITH query_items_cte AS (
|
|
226
|
+
SELECT * FROM unnest(query_items) WITH ORDINALITY
|
|
227
|
+
),
|
|
228
|
+
initial_matches AS (
|
|
229
|
+
SELECT
|
|
230
|
+
t.main_id,
|
|
231
|
+
qi.ordinality,
|
|
232
|
+
t.tag,
|
|
233
|
+
qi.tags AS required_tags,
|
|
234
|
+
qi.types AS allowed_types
|
|
235
|
+
FROM query_items_cte qi
|
|
236
|
+
JOIN {schema}.{tags_table} t
|
|
237
|
+
ON t.tag = ANY(qi.tags)
|
|
238
|
+
WHERE t.main_id > COALESCE(after_id, 0)
|
|
239
|
+
),
|
|
240
|
+
matched_groups AS (
|
|
241
|
+
SELECT
|
|
242
|
+
main_id,
|
|
243
|
+
ordinality,
|
|
244
|
+
COUNT(DISTINCT tag) AS matched_tag_count,
|
|
245
|
+
array_length(required_tags, 1) AS required_tag_count,
|
|
246
|
+
allowed_types
|
|
247
|
+
FROM initial_matches
|
|
248
|
+
GROUP BY main_id, ordinality, required_tag_count, allowed_types
|
|
249
|
+
),
|
|
250
|
+
qualified_ids AS (
|
|
251
|
+
SELECT main_id, allowed_types
|
|
252
|
+
FROM matched_groups
|
|
253
|
+
WHERE matched_tag_count = required_tag_count
|
|
254
|
+
),
|
|
255
|
+
filtered_ids AS (
|
|
256
|
+
SELECT m.id
|
|
257
|
+
FROM {schema}.{events_table} m
|
|
258
|
+
JOIN qualified_ids q ON q.main_id = m.id
|
|
259
|
+
WHERE
|
|
260
|
+
m.id > COALESCE(after_id, 0)
|
|
261
|
+
AND (
|
|
262
|
+
array_length(q.allowed_types, 1) IS NULL
|
|
263
|
+
OR array_length(q.allowed_types, 1) = 0
|
|
264
|
+
OR m.type = ANY(q.allowed_types)
|
|
265
|
+
)
|
|
266
|
+
LIMIT 1
|
|
267
|
+
)
|
|
268
|
+
SELECT EXISTS (SELECT 1 FROM filtered_ids)
|
|
269
|
+
INTO conflict_exists;
|
|
270
|
+
|
|
271
|
+
-- Step 2: Insert if no conflicts
|
|
272
|
+
IF NOT conflict_exists THEN
|
|
273
|
+
RETURN QUERY
|
|
274
|
+
WITH new_data AS (
|
|
275
|
+
SELECT * FROM unnest(new_events)
|
|
276
|
+
),
|
|
277
|
+
inserted AS (
|
|
278
|
+
INSERT INTO {schema}.{events_table} (type, data, tags)
|
|
279
|
+
SELECT type, data, tags
|
|
280
|
+
FROM new_data
|
|
281
|
+
RETURNING id, tags
|
|
282
|
+
),
|
|
283
|
+
expanded_tags AS (
|
|
284
|
+
SELECT ins.id AS main_id, tag
|
|
285
|
+
FROM inserted ins,
|
|
286
|
+
unnest(ins.tags) AS tag
|
|
287
|
+
),
|
|
288
|
+
tag_insert AS (
|
|
289
|
+
INSERT INTO {schema}.{tags_table} (tag, main_id)
|
|
290
|
+
SELECT tag, main_id
|
|
291
|
+
FROM expanded_tags
|
|
292
|
+
)
|
|
293
|
+
SELECT MAX(id) FROM inserted;
|
|
294
|
+
NOTIFY {channel};
|
|
295
|
+
|
|
296
|
+
END IF;
|
|
297
|
+
|
|
298
|
+
-- If conflict exists, return empty result
|
|
299
|
+
RETURN;
|
|
300
|
+
END
|
|
301
|
+
$$;
|
|
302
|
+
""")
|
|
303
|
+
|
|
304
|
+
|
|
305
|
+
SQL_SET_LOCAL_LOCK_TIMEOUT = SQL("SET LOCAL lock_timeout = '{lock_timeout}s'")
|
|
306
|
+
SQL_LOCK_TABLE = SQL("LOCK TABLE {schema}.{events_table} IN EXCLUSIVE MODE")
|
|
307
|
+
|
|
308
|
+
SQL_EXPLAIN = SQL("EXPLAIN")
|
|
309
|
+
SQL_EXPLAIN_ANALYZE = SQL("EXPLAIN (ANALYZE, BUFFERS, VERBOSE)")
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
class PostgresDCBRecorderTT(DCBRecorder, PostgresRecorder):
|
|
313
|
+
def __init__(
|
|
314
|
+
self,
|
|
315
|
+
datastore: PostgresDatastore,
|
|
316
|
+
*,
|
|
317
|
+
events_table_name: str = "dcb_events",
|
|
318
|
+
):
|
|
319
|
+
super().__init__(datastore)
|
|
320
|
+
# Define identifiers.
|
|
321
|
+
self.events_table_name = events_table_name + "_tt_main"
|
|
322
|
+
self.channel_name = self.events_table_name.replace(".", "_")
|
|
323
|
+
self.tags_table_name = events_table_name + "_tt_tag"
|
|
324
|
+
self.index_name_id_cover_type = self.events_table_name + "_idx_id_type"
|
|
325
|
+
self.index_name_tag_main_id = self.tags_table_name + "_idx_tag_main_id"
|
|
326
|
+
|
|
327
|
+
# Check identifier lengths.
|
|
328
|
+
self.check_identifier_length(self.events_table_name)
|
|
329
|
+
self.check_identifier_length(self.tags_table_name)
|
|
330
|
+
self.check_identifier_length(self.index_name_id_cover_type)
|
|
331
|
+
self.check_identifier_length(self.index_name_tag_main_id)
|
|
332
|
+
self.check_identifier_length(DB_TYPE_NAME_DCB_EVENT_TT)
|
|
333
|
+
self.check_identifier_length(DB_TYPE_NAME_DCB_QUERY_ITEM_TT)
|
|
334
|
+
|
|
335
|
+
# Register composite database types.
|
|
336
|
+
self.datastore.db_type_names.add(DB_TYPE_NAME_DCB_EVENT_TT)
|
|
337
|
+
self.datastore.db_type_names.add(DB_TYPE_NAME_DCB_QUERY_ITEM_TT)
|
|
338
|
+
self.datastore.register_type_adapters()
|
|
339
|
+
|
|
340
|
+
# Define SQL template keyword arguments.
|
|
341
|
+
self.sql_kwargs = {
|
|
342
|
+
"schema": Identifier(self.datastore.schema),
|
|
343
|
+
"events_table": Identifier(self.events_table_name),
|
|
344
|
+
"channel": Identifier(self.channel_name),
|
|
345
|
+
"tags_table": Identifier(self.tags_table_name),
|
|
346
|
+
"event_type": Identifier(DB_TYPE_NAME_DCB_EVENT_TT),
|
|
347
|
+
"query_item_type": Identifier(DB_TYPE_NAME_DCB_QUERY_ITEM_TT),
|
|
348
|
+
"id_cover_type_index": Identifier(self.index_name_id_cover_type),
|
|
349
|
+
"tag_main_id_index": Identifier(self.index_name_tag_main_id),
|
|
350
|
+
"unconditional_append": Identifier(
|
|
351
|
+
DB_FUNCTION_NAME_DCB_UNCONDITIONAL_APPEND_TT
|
|
352
|
+
),
|
|
353
|
+
"conditional_append": Identifier(
|
|
354
|
+
DB_FUNCTION_NAME_DCB_CONDITIONAL_APPEND_TT
|
|
355
|
+
),
|
|
356
|
+
"lock_timeout": self.datastore.lock_timeout,
|
|
357
|
+
}
|
|
358
|
+
|
|
359
|
+
# Format and extend SQL create statements.
|
|
360
|
+
self.sql_create_statements.extend(
|
|
361
|
+
[
|
|
362
|
+
self.format(DB_TYPE_DCB_EVENT),
|
|
363
|
+
self.format(DB_TYPE_DCB_QUERY_ITEM),
|
|
364
|
+
self.format(DB_TABLE_DCB_EVENTS),
|
|
365
|
+
self.format(DB_INDEX_UNIQUE_ID_COVER_TYPE),
|
|
366
|
+
self.format(DB_TABLE_DCB_TAGS),
|
|
367
|
+
self.format(DB_INDEX_TAG_MAIN_ID),
|
|
368
|
+
self.format(DB_FUNCTION_UNCONDITIONAL_APPEND),
|
|
369
|
+
self.format(DB_FUNCTION_CONDITIONAL_APPEND),
|
|
370
|
+
]
|
|
371
|
+
)
|
|
372
|
+
|
|
373
|
+
# Format other SQL statements.
|
|
374
|
+
self.sql_select_by_tags = self.format(SQL_SELECT_BY_TAGS)
|
|
375
|
+
self.sql_select_all = self.format(SQL_SELECT_ALL)
|
|
376
|
+
self.sql_select_by_type = self.format(SQL_SELECT_EVENTS_BY_TYPE)
|
|
377
|
+
self.sql_select_max_id = self.format(SQL_SELECT_MAX_ID)
|
|
378
|
+
self.sql_unconditional_append = self.format(SQL_UNCONDITIONAL_APPEND)
|
|
379
|
+
self.sql_conditional_append = self.format(SQL_CONDITIONAL_APPEND)
|
|
380
|
+
self.sql_set_local_lock_timeout = self.format(SQL_SET_LOCAL_LOCK_TIMEOUT)
|
|
381
|
+
self.sql_lock_table = self.format(SQL_LOCK_TABLE)
|
|
382
|
+
|
|
383
|
+
def format(self, sql: SQL) -> Composed:
|
|
384
|
+
return sql.format(**self.sql_kwargs)
|
|
385
|
+
|
|
386
|
+
def read(
|
|
387
|
+
self,
|
|
388
|
+
query: DCBQuery | None = None,
|
|
389
|
+
*,
|
|
390
|
+
after: int | None = None,
|
|
391
|
+
limit: int | None = None,
|
|
392
|
+
) -> DCBReadResponse:
|
|
393
|
+
with self.datastore.cursor() as curs:
|
|
394
|
+
events, head = self._read(
|
|
395
|
+
curs=curs,
|
|
396
|
+
query=query,
|
|
397
|
+
after=after,
|
|
398
|
+
limit=limit,
|
|
399
|
+
return_head=True,
|
|
400
|
+
)
|
|
401
|
+
# TODO: Actually return an iterator from _read()!
|
|
402
|
+
return SimpleDCBReadResponse(events=iter(events), head=head)
|
|
403
|
+
|
|
404
|
+
def _read(
|
|
405
|
+
self,
|
|
406
|
+
curs: Cursor[DictRow],
|
|
407
|
+
query: DCBQuery | None = None,
|
|
408
|
+
*,
|
|
409
|
+
after: int | None = None,
|
|
410
|
+
limit: int | None = None,
|
|
411
|
+
return_head: bool = True,
|
|
412
|
+
) -> tuple[Sequence[DCBSequencedEvent], int | None]:
|
|
413
|
+
if return_head and limit is None:
|
|
414
|
+
self.execute(curs, self.sql_select_max_id, explain=False)
|
|
415
|
+
row = curs.fetchone()
|
|
416
|
+
head = None if row is None else row["max"]
|
|
417
|
+
else:
|
|
418
|
+
head = None
|
|
419
|
+
|
|
420
|
+
if not query or not query.items:
|
|
421
|
+
# Select all.
|
|
422
|
+
self.execute(
|
|
423
|
+
curs,
|
|
424
|
+
self.sql_select_all,
|
|
425
|
+
{
|
|
426
|
+
"after": after,
|
|
427
|
+
"limit": limit,
|
|
428
|
+
},
|
|
429
|
+
explain=False,
|
|
430
|
+
)
|
|
431
|
+
rows = curs.fetchall()
|
|
432
|
+
|
|
433
|
+
elif self.all_query_items_have_tags(query):
|
|
434
|
+
# Select with tags.
|
|
435
|
+
psycopg_dcb_query_items = self.construct_psycopg_query_items(query.items)
|
|
436
|
+
|
|
437
|
+
self.execute(
|
|
438
|
+
curs,
|
|
439
|
+
self.sql_select_by_tags,
|
|
440
|
+
{
|
|
441
|
+
"query_items": psycopg_dcb_query_items,
|
|
442
|
+
"after": after,
|
|
443
|
+
"limit": limit,
|
|
444
|
+
},
|
|
445
|
+
explain=False,
|
|
446
|
+
)
|
|
447
|
+
rows = curs.fetchall()
|
|
448
|
+
|
|
449
|
+
elif self.has_one_query_item_one_type(query):
|
|
450
|
+
# Select for one type.
|
|
451
|
+
self.execute(
|
|
452
|
+
curs,
|
|
453
|
+
self.sql_select_by_type,
|
|
454
|
+
{
|
|
455
|
+
"event_type": query.items[0].types[0],
|
|
456
|
+
"after": after,
|
|
457
|
+
"limit": limit,
|
|
458
|
+
},
|
|
459
|
+
explain=False,
|
|
460
|
+
)
|
|
461
|
+
rows = curs.fetchall()
|
|
462
|
+
|
|
463
|
+
else:
|
|
464
|
+
msg = f"Unsupported query: {query}"
|
|
465
|
+
raise ProgrammingError(msg)
|
|
466
|
+
|
|
467
|
+
events = [
|
|
468
|
+
DCBSequencedEvent(
|
|
469
|
+
event=DCBEvent(
|
|
470
|
+
type=row["type"],
|
|
471
|
+
data=row["data"],
|
|
472
|
+
tags=row["tags"],
|
|
473
|
+
),
|
|
474
|
+
position=row["id"],
|
|
475
|
+
)
|
|
476
|
+
for row in rows
|
|
477
|
+
]
|
|
478
|
+
|
|
479
|
+
# Maybe update head.
|
|
480
|
+
if return_head and events:
|
|
481
|
+
head = max(head or 0, *[e.position for e in events])
|
|
482
|
+
|
|
483
|
+
return events, head
|
|
484
|
+
|
|
485
|
+
def subscribe(
|
|
486
|
+
self,
|
|
487
|
+
query: DCBQuery | None = None,
|
|
488
|
+
*,
|
|
489
|
+
after: int | None = None,
|
|
490
|
+
) -> PostgresDCBSubscription:
|
|
491
|
+
return PostgresDCBSubscription(
|
|
492
|
+
recorder=self,
|
|
493
|
+
query=query,
|
|
494
|
+
after=after,
|
|
495
|
+
)
|
|
496
|
+
|
|
497
|
+
def append(
|
|
498
|
+
self, events: Sequence[DCBEvent], condition: DCBAppendCondition | None = None
|
|
499
|
+
) -> int:
|
|
500
|
+
assert len(events) > 0
|
|
501
|
+
psycopg_dcb_events = self.construct_psycopg_dcb_events(events)
|
|
502
|
+
|
|
503
|
+
# Do single-statement "unconditional append".
|
|
504
|
+
if condition is None:
|
|
505
|
+
with self.datastore.cursor() as curs:
|
|
506
|
+
return self._unconditional_append(curs, psycopg_dcb_events)
|
|
507
|
+
|
|
508
|
+
if self.all_query_items_have_tags(condition.fail_if_events_match):
|
|
509
|
+
# Do single-statement "conditional append".
|
|
510
|
+
psycopg_dcb_query_items = self.construct_psycopg_query_items(
|
|
511
|
+
condition.fail_if_events_match.items
|
|
512
|
+
)
|
|
513
|
+
with self.datastore.cursor() as curs:
|
|
514
|
+
self.execute(
|
|
515
|
+
curs,
|
|
516
|
+
self.sql_conditional_append,
|
|
517
|
+
{
|
|
518
|
+
"query_items": psycopg_dcb_query_items,
|
|
519
|
+
"after": condition.after,
|
|
520
|
+
"events": psycopg_dcb_events,
|
|
521
|
+
},
|
|
522
|
+
explain=False,
|
|
523
|
+
)
|
|
524
|
+
row = curs.fetchone()
|
|
525
|
+
if row is None:
|
|
526
|
+
raise IntegrityError
|
|
527
|
+
|
|
528
|
+
return row[DB_FUNCTION_NAME_DCB_CONDITIONAL_APPEND_TT]
|
|
529
|
+
|
|
530
|
+
# Do separate "read" and "append" operations in a transaction.
|
|
531
|
+
with self.datastore.transaction(commit=True) as curs:
|
|
532
|
+
|
|
533
|
+
# Lock the table in exclusive mode (readers can still read) to ensure
|
|
534
|
+
# nothing else will execute an append condition statement until after
|
|
535
|
+
# we have finished inserting new events, whilst expecting that others
|
|
536
|
+
# are playing by the same game. By the way, this is how optimistic
|
|
537
|
+
# locking works.
|
|
538
|
+
if self.datastore.lock_timeout:
|
|
539
|
+
curs.execute(self.sql_set_local_lock_timeout)
|
|
540
|
+
curs.execute(self.sql_lock_table)
|
|
541
|
+
|
|
542
|
+
# Check the append condition.
|
|
543
|
+
failed, _ = self._read(
|
|
544
|
+
curs=curs,
|
|
545
|
+
query=condition.fail_if_events_match,
|
|
546
|
+
after=condition.after,
|
|
547
|
+
limit=1,
|
|
548
|
+
return_head=False,
|
|
549
|
+
)
|
|
550
|
+
if failed:
|
|
551
|
+
raise IntegrityError(failed)
|
|
552
|
+
|
|
553
|
+
# If okay, then do an "unconditional append".
|
|
554
|
+
return self._unconditional_append(curs, psycopg_dcb_events)
|
|
555
|
+
|
|
556
|
+
def _unconditional_append(
|
|
557
|
+
self, curs: Cursor[DictRow], psycopg_dcb_events: list[PsycopgDCBEvent]
|
|
558
|
+
) -> int:
|
|
559
|
+
self.execute(
|
|
560
|
+
curs,
|
|
561
|
+
self.sql_unconditional_append,
|
|
562
|
+
{
|
|
563
|
+
"events": psycopg_dcb_events,
|
|
564
|
+
},
|
|
565
|
+
explain=False,
|
|
566
|
+
)
|
|
567
|
+
row = curs.fetchone()
|
|
568
|
+
if row is None: # pragma: no cover
|
|
569
|
+
msg = "Shouldn't get here"
|
|
570
|
+
raise InternalError(msg)
|
|
571
|
+
|
|
572
|
+
return row[DB_FUNCTION_NAME_DCB_UNCONDITIONAL_APPEND_TT]
|
|
573
|
+
|
|
574
|
+
def construct_psycopg_dcb_events(
|
|
575
|
+
self, dcb_events: Sequence[DCBEvent]
|
|
576
|
+
) -> list[PsycopgDCBEvent]:
|
|
577
|
+
return [
|
|
578
|
+
self.datastore.psycopg_python_types[DB_TYPE_NAME_DCB_EVENT_TT](
|
|
579
|
+
type=e.type,
|
|
580
|
+
data=e.data,
|
|
581
|
+
tags=e.tags,
|
|
582
|
+
)
|
|
583
|
+
for e in dcb_events
|
|
584
|
+
]
|
|
585
|
+
|
|
586
|
+
def construct_psycopg_query_items(
|
|
587
|
+
self, query_items: Sequence[DCBQueryItem]
|
|
588
|
+
) -> list[PsycopgDCBQueryItem]:
|
|
589
|
+
return [
|
|
590
|
+
self.datastore.psycopg_python_types[DB_TYPE_NAME_DCB_QUERY_ITEM_TT](
|
|
591
|
+
types=q.types,
|
|
592
|
+
tags=q.tags,
|
|
593
|
+
)
|
|
594
|
+
for q in query_items
|
|
595
|
+
]
|
|
596
|
+
|
|
597
|
+
def has_one_query_item_one_type(self, query: DCBQuery) -> bool:
|
|
598
|
+
return (
|
|
599
|
+
len(query.items) == 1
|
|
600
|
+
and len(query.items[0].types) == 1
|
|
601
|
+
and len(query.items[0].tags) == 0
|
|
602
|
+
)
|
|
603
|
+
|
|
604
|
+
def all_query_items_have_tags(self, query: DCBQuery) -> bool:
|
|
605
|
+
return all(len(q.tags) > 0 for q in query.items) and len(query.items) > 0
|
|
606
|
+
|
|
607
|
+
def execute(
|
|
608
|
+
self,
|
|
609
|
+
cursor: Cursor[DictRow],
|
|
610
|
+
statement: Composed,
|
|
611
|
+
params: Params | None = None,
|
|
612
|
+
*,
|
|
613
|
+
explain: bool = False,
|
|
614
|
+
prepare: bool = True,
|
|
615
|
+
) -> None:
|
|
616
|
+
if explain: # pragma: no cover
|
|
617
|
+
print() # noqa: T201
|
|
618
|
+
print("Statement:", statement.as_string().strip()) # noqa: T201
|
|
619
|
+
print("Params:", params) # noqa: T201
|
|
620
|
+
print() # noqa: T201
|
|
621
|
+
# with self.datastore.transaction(commit=False) as explain_cursor:
|
|
622
|
+
# explain_cursor.execute(SQL_EXPLAIN + statement, params)
|
|
623
|
+
# rows = explain_cursor.fetchall()
|
|
624
|
+
# print("\n".join([r["QUERY PLAN"] for r in rows])) # no qa: T201
|
|
625
|
+
# print() # no qa: T201
|
|
626
|
+
# with self.datastore.transaction(commit=False) as explain_cursor:
|
|
627
|
+
cursor.execute(SQL_EXPLAIN + statement, params)
|
|
628
|
+
rows = cursor.fetchall()
|
|
629
|
+
print("\n".join([r["QUERY PLAN"] for r in rows])) # noqa: T201
|
|
630
|
+
print() # noqa: T201
|
|
631
|
+
cursor.execute(statement, params, prepare=prepare)
|
|
632
|
+
|
|
633
|
+
|
|
634
|
+
class PsycopgDCBEvent(NamedTuple):
|
|
635
|
+
type: str
|
|
636
|
+
data: bytes
|
|
637
|
+
tags: list[str]
|
|
638
|
+
|
|
639
|
+
|
|
640
|
+
class PsycopgDCBQueryItem(NamedTuple):
|
|
641
|
+
types: list[str]
|
|
642
|
+
tags: list[str]
|
|
643
|
+
|
|
644
|
+
|
|
645
|
+
class PostgresDCBSubscription(DCBListenNotifySubscription[PostgresDCBRecorderTT]):
|
|
646
|
+
def __init__(
|
|
647
|
+
self,
|
|
648
|
+
recorder: PostgresDCBRecorderTT,
|
|
649
|
+
query: DCBQuery | None = None,
|
|
650
|
+
after: int | None = None,
|
|
651
|
+
) -> None:
|
|
652
|
+
super().__init__(recorder=recorder, query=query, after=after)
|
|
653
|
+
self._has_listen_connection = threading.Event()
|
|
654
|
+
self._listen_connection: Connection[dict[str, Any]] | None = None
|
|
655
|
+
self._listen_thread = threading.Thread(target=self._listen, daemon=True)
|
|
656
|
+
self._listen_thread.start()
|
|
657
|
+
|
|
658
|
+
def __exit__(self, *args: object, **kwargs: Any) -> None:
|
|
659
|
+
super().__exit__(*args, **kwargs)
|
|
660
|
+
self._listen_thread.join()
|
|
661
|
+
|
|
662
|
+
def _listen(self) -> None:
|
|
663
|
+
recorder = self._recorder
|
|
664
|
+
assert isinstance(recorder, PostgresDCBRecorderTT)
|
|
665
|
+
try:
|
|
666
|
+
with recorder.datastore.get_connection() as conn:
|
|
667
|
+
self._listen_connection = conn
|
|
668
|
+
self._has_listen_connection.set()
|
|
669
|
+
conn.execute(
|
|
670
|
+
SQL("LISTEN {0}").format(Identifier(recorder.channel_name))
|
|
671
|
+
)
|
|
672
|
+
while not self._has_been_stopped and not self._thread_error:
|
|
673
|
+
# This block simplifies psycopg's conn.notifies(), because
|
|
674
|
+
# we aren't interested in the actual notify messages, and
|
|
675
|
+
# also we want to stop consuming notify messages when the
|
|
676
|
+
# subscription has an error or is otherwise stopped.
|
|
677
|
+
with conn.lock:
|
|
678
|
+
try:
|
|
679
|
+
if conn.wait(notifies(conn.pgconn), interval=1):
|
|
680
|
+
self._has_been_notified.set()
|
|
681
|
+
except NO_TRACEBACK as ex: # pragma: no cover
|
|
682
|
+
raise ex.with_traceback(None) from None
|
|
683
|
+
|
|
684
|
+
except BaseException as e: # pragma: no cover
|
|
685
|
+
if self._thread_error is None:
|
|
686
|
+
self._thread_error = e
|
|
687
|
+
self.stop()
|
|
688
|
+
|
|
689
|
+
|
|
690
|
+
class PostgresTTDCBFactory(
|
|
691
|
+
BasePostgresFactory[PostgresTrackingRecorder],
|
|
692
|
+
DCBInfrastructureFactory[PostgresTrackingRecorder],
|
|
693
|
+
):
|
|
694
|
+
def dcb_recorder(self) -> DCBRecorder:
|
|
695
|
+
prefix = self.env.name.lower() or "dcb"
|
|
696
|
+
|
|
697
|
+
dcb_table_name = prefix + "_events"
|
|
698
|
+
recorder = PostgresDCBRecorderTT(
|
|
699
|
+
datastore=self.datastore,
|
|
700
|
+
events_table_name=dcb_table_name,
|
|
701
|
+
)
|
|
702
|
+
if self.env_create_table():
|
|
703
|
+
recorder.create_table()
|
|
704
|
+
return recorder
|