eventsourcing 9.4.6__py3-none-any.whl → 9.5.0a0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eventsourcing might be problematic. Click here for more details.
- eventsourcing/application.py +15 -2
- eventsourcing/dcb/__init__.py +0 -0
- eventsourcing/dcb/api.py +65 -0
- eventsourcing/dcb/application.py +116 -0
- eventsourcing/dcb/domain.py +381 -0
- eventsourcing/dcb/persistence.py +146 -0
- eventsourcing/dcb/popo.py +95 -0
- eventsourcing/dcb/postgres_tt.py +643 -0
- eventsourcing/domain.py +89 -29
- eventsourcing/persistence.py +20 -25
- eventsourcing/popo.py +2 -2
- eventsourcing/postgres.py +355 -132
- eventsourcing/sqlite.py +25 -3
- eventsourcing/tests/application.py +5 -1
- eventsourcing/tests/persistence.py +53 -80
- eventsourcing/tests/postgres_utils.py +59 -1
- eventsourcing/utils.py +7 -3
- {eventsourcing-9.4.6.dist-info → eventsourcing-9.5.0a0.dist-info}/METADATA +2 -2
- eventsourcing-9.5.0a0.dist-info/RECORD +33 -0
- eventsourcing-9.4.6.dist-info/RECORD +0 -26
- {eventsourcing-9.4.6.dist-info → eventsourcing-9.5.0a0.dist-info}/AUTHORS +0 -0
- {eventsourcing-9.4.6.dist-info → eventsourcing-9.5.0a0.dist-info}/LICENSE +0 -0
- {eventsourcing-9.4.6.dist-info → eventsourcing-9.5.0a0.dist-info}/WHEEL +0 -0
|
@@ -0,0 +1,643 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from typing import TYPE_CHECKING, NamedTuple
|
|
4
|
+
|
|
5
|
+
from psycopg.sql import SQL, Composed, Identifier
|
|
6
|
+
|
|
7
|
+
from eventsourcing.dcb.api import (
|
|
8
|
+
DCBAppendCondition,
|
|
9
|
+
DCBEvent,
|
|
10
|
+
DCBQuery,
|
|
11
|
+
DCBQueryItem,
|
|
12
|
+
DCBRecorder,
|
|
13
|
+
DCBSequencedEvent,
|
|
14
|
+
)
|
|
15
|
+
from eventsourcing.dcb.persistence import DCBInfrastructureFactory
|
|
16
|
+
from eventsourcing.persistence import IntegrityError, ProgrammingError
|
|
17
|
+
from eventsourcing.postgres import (
|
|
18
|
+
PostgresDatastore,
|
|
19
|
+
PostgresFactory,
|
|
20
|
+
PostgresRecorder,
|
|
21
|
+
PostgresTrackingRecorder,
|
|
22
|
+
)
|
|
23
|
+
|
|
24
|
+
if TYPE_CHECKING:
|
|
25
|
+
from collections.abc import Sequence
|
|
26
|
+
|
|
27
|
+
from psycopg import Cursor
|
|
28
|
+
from psycopg.abc import Params
|
|
29
|
+
from psycopg.rows import DictRow
|
|
30
|
+
|
|
31
|
+
DB_TYPE_NAME_DCB_EVENT_TT = "dcb_event_tt"
|
|
32
|
+
|
|
33
|
+
DB_TYPE_DCB_EVENT = SQL(
|
|
34
|
+
"""
|
|
35
|
+
CREATE TYPE {schema}.{event_type} AS (
|
|
36
|
+
type text,
|
|
37
|
+
data bytea,
|
|
38
|
+
tags text[]
|
|
39
|
+
)
|
|
40
|
+
"""
|
|
41
|
+
)
|
|
42
|
+
|
|
43
|
+
DB_TYPE_NAME_DCB_QUERY_ITEM_TT = "dcb_query_item_tt"
|
|
44
|
+
|
|
45
|
+
DB_TYPE_DCB_QUERY_ITEM = SQL(
|
|
46
|
+
"""
|
|
47
|
+
CREATE TYPE {schema}.{query_item_type} AS (
|
|
48
|
+
types text[],
|
|
49
|
+
tags text[]
|
|
50
|
+
)
|
|
51
|
+
"""
|
|
52
|
+
)
|
|
53
|
+
|
|
54
|
+
DB_TABLE_DCB_EVENTS = SQL(
|
|
55
|
+
"""
|
|
56
|
+
CREATE TABLE IF NOT EXISTS {schema}.{events_table} (
|
|
57
|
+
id bigserial,
|
|
58
|
+
type text NOT NULL ,
|
|
59
|
+
data bytea,
|
|
60
|
+
tags text[] NOT NULL
|
|
61
|
+
) WITH (
|
|
62
|
+
autovacuum_enabled = true,
|
|
63
|
+
autovacuum_vacuum_threshold = 100000000, -- Effectively disables VACUUM
|
|
64
|
+
autovacuum_vacuum_scale_factor = 0.5, -- Same here, high scale factor
|
|
65
|
+
autovacuum_analyze_threshold = 1000, -- Triggers ANALYZE more often
|
|
66
|
+
autovacuum_analyze_scale_factor = 0.01 -- Triggers after 1% new rows
|
|
67
|
+
)
|
|
68
|
+
"""
|
|
69
|
+
)
|
|
70
|
+
|
|
71
|
+
DB_INDEX_UNIQUE_ID_COVER_TYPE = SQL(
|
|
72
|
+
"""
|
|
73
|
+
CREATE UNIQUE INDEX IF NOT EXISTS {id_cover_type_index} ON
|
|
74
|
+
{schema}.{events_table} (id) INCLUDE (type)
|
|
75
|
+
"""
|
|
76
|
+
)
|
|
77
|
+
|
|
78
|
+
DB_TABLE_DCB_TAGS = SQL(
|
|
79
|
+
"""
|
|
80
|
+
CREATE TABLE IF NOT EXISTS {schema}.{tags_table} (
|
|
81
|
+
tag text,
|
|
82
|
+
type text,
|
|
83
|
+
main_id bigint REFERENCES {events_table} (id)
|
|
84
|
+
) WITH (
|
|
85
|
+
autovacuum_enabled = true,
|
|
86
|
+
autovacuum_vacuum_threshold = 100000000, -- Effectively disables VACUUM
|
|
87
|
+
autovacuum_vacuum_scale_factor = 0.5, -- Same here, high scale factor
|
|
88
|
+
autovacuum_analyze_threshold = 1000, -- Triggers ANALYZE more often
|
|
89
|
+
autovacuum_analyze_scale_factor = 0.01 -- Triggers after 1% new rows
|
|
90
|
+
)
|
|
91
|
+
"""
|
|
92
|
+
)
|
|
93
|
+
|
|
94
|
+
DB_INDEX_TAG_MAIN_ID = SQL(
|
|
95
|
+
"""
|
|
96
|
+
CREATE INDEX IF NOT EXISTS {tag_main_id_index} ON
|
|
97
|
+
{schema}.{tags_table} (tag, main_id)
|
|
98
|
+
"""
|
|
99
|
+
)
|
|
100
|
+
|
|
101
|
+
SQL_SELECT_ALL = SQL(
|
|
102
|
+
"""
|
|
103
|
+
SELECT * FROM {schema}.{events_table}
|
|
104
|
+
WHERE id > COALESCE(%(after)s, 0)
|
|
105
|
+
ORDER BY id ASC
|
|
106
|
+
LIMIT COALESCE(%(limit)s, 9223372036854775807)
|
|
107
|
+
"""
|
|
108
|
+
)
|
|
109
|
+
|
|
110
|
+
SQL_SELECT_EVENTS_BY_TYPE = SQL(
|
|
111
|
+
"""
|
|
112
|
+
SELECT * FROM {schema}.{events_table}
|
|
113
|
+
WHERE type = %(event_type)s
|
|
114
|
+
AND id > COALESCE(%(after)s, 0)
|
|
115
|
+
ORDER BY id ASC
|
|
116
|
+
LIMIT COALESCE(%(limit)s, 9223372036854775807)
|
|
117
|
+
"""
|
|
118
|
+
)
|
|
119
|
+
|
|
120
|
+
SQL_SELECT_MAX_ID = SQL(
|
|
121
|
+
"""
|
|
122
|
+
SELECT MAX(id) FROM {schema}.{events_table}
|
|
123
|
+
"""
|
|
124
|
+
)
|
|
125
|
+
|
|
126
|
+
SQL_SELECT_BY_TAGS = SQL(
|
|
127
|
+
"""
|
|
128
|
+
WITH query_items AS (
|
|
129
|
+
SELECT * FROM unnest(
|
|
130
|
+
%(query_items)s::{schema}.{query_item_type}[]
|
|
131
|
+
) WITH ORDINALITY
|
|
132
|
+
),
|
|
133
|
+
initial_matches AS (
|
|
134
|
+
SELECT
|
|
135
|
+
t.main_id,
|
|
136
|
+
qi.ordinality,
|
|
137
|
+
t.type,
|
|
138
|
+
t.tag,
|
|
139
|
+
qi.tags AS required_tags,
|
|
140
|
+
qi.types AS allowed_types
|
|
141
|
+
FROM query_items qi
|
|
142
|
+
JOIN {schema}.{tags_table} t
|
|
143
|
+
ON t.tag = ANY(qi.tags)
|
|
144
|
+
WHERE t.main_id > COALESCE(%(after)s, 0)
|
|
145
|
+
),
|
|
146
|
+
matched_groups AS (
|
|
147
|
+
SELECT
|
|
148
|
+
main_id,
|
|
149
|
+
ordinality,
|
|
150
|
+
COUNT(DISTINCT tag) AS matched_tag_count,
|
|
151
|
+
array_length(required_tags, 1) AS required_tag_count,
|
|
152
|
+
allowed_types
|
|
153
|
+
FROM initial_matches
|
|
154
|
+
GROUP BY main_id, ordinality, required_tag_count, allowed_types
|
|
155
|
+
),
|
|
156
|
+
qualified_ids AS (
|
|
157
|
+
SELECT main_id, allowed_types
|
|
158
|
+
FROM matched_groups
|
|
159
|
+
WHERE matched_tag_count = required_tag_count
|
|
160
|
+
),
|
|
161
|
+
filtered_ids AS (
|
|
162
|
+
SELECT m.id
|
|
163
|
+
FROM {schema}.{events_table} m
|
|
164
|
+
JOIN qualified_ids q ON q.main_id = m.id
|
|
165
|
+
WHERE
|
|
166
|
+
m.id > COALESCE(%(after)s, 0)
|
|
167
|
+
AND (
|
|
168
|
+
array_length(q.allowed_types, 1) IS NULL
|
|
169
|
+
OR array_length(q.allowed_types, 1) = 0
|
|
170
|
+
OR m.type = ANY(q.allowed_types)
|
|
171
|
+
)
|
|
172
|
+
ORDER BY m.id ASC
|
|
173
|
+
LIMIT COALESCE(%(limit)s, 9223372036854775807)
|
|
174
|
+
)
|
|
175
|
+
SELECT *
|
|
176
|
+
FROM {schema}.{events_table} m
|
|
177
|
+
WHERE m.id IN (SELECT id FROM filtered_ids)
|
|
178
|
+
ORDER BY m.id ASC;
|
|
179
|
+
"""
|
|
180
|
+
)
|
|
181
|
+
|
|
182
|
+
SQL_UNCONDITIONAL_APPEND = SQL(
|
|
183
|
+
"""
|
|
184
|
+
WITH input AS (
|
|
185
|
+
SELECT * FROM unnest(%(events)s::{event_type}[])
|
|
186
|
+
),
|
|
187
|
+
inserted AS (
|
|
188
|
+
INSERT INTO {schema}.{events_table} (type, data, tags)
|
|
189
|
+
SELECT i.type, i.data, i.tags
|
|
190
|
+
FROM input i
|
|
191
|
+
RETURNING id, type, tags
|
|
192
|
+
),
|
|
193
|
+
expanded_tags AS (
|
|
194
|
+
SELECT
|
|
195
|
+
ins.id AS main_id,
|
|
196
|
+
ins.type,
|
|
197
|
+
tag
|
|
198
|
+
FROM inserted ins,
|
|
199
|
+
unnest(ins.tags) AS tag
|
|
200
|
+
),
|
|
201
|
+
tag_insert AS (
|
|
202
|
+
INSERT INTO {schema}.{tags_table} (tag, type, main_id)
|
|
203
|
+
SELECT tag, type, main_id
|
|
204
|
+
FROM expanded_tags
|
|
205
|
+
)
|
|
206
|
+
SELECT id FROM inserted
|
|
207
|
+
"""
|
|
208
|
+
)
|
|
209
|
+
|
|
210
|
+
SQL_CONDITIONAL_APPEND = SQL(
|
|
211
|
+
"""
|
|
212
|
+
SELECT * FROM {schema}.{conditional_append}(%(query_items)s, %(after)s, %(events)s)
|
|
213
|
+
"""
|
|
214
|
+
)
|
|
215
|
+
DB_FUNCTION_NAME_DCB_CONDITIONAL_APPEND_TT = "dcb_conditional_append_tt"
|
|
216
|
+
DB_FUNCTION_CONDITIONAL_APPEND = SQL(
|
|
217
|
+
"""
|
|
218
|
+
CREATE OR REPLACE FUNCTION {schema}.{conditional_append}(
|
|
219
|
+
query_items {schema}.{query_item_type}[],
|
|
220
|
+
after_id bigint,
|
|
221
|
+
new_events {schema}.{event_type}[]
|
|
222
|
+
) RETURNS SETOF bigint
|
|
223
|
+
LANGUAGE plpgsql AS $$
|
|
224
|
+
DECLARE
|
|
225
|
+
conflict_exists boolean;
|
|
226
|
+
BEGIN
|
|
227
|
+
-- Step 0: Lock table in exclusive mode (reads can still read)
|
|
228
|
+
SET LOCAL lock_timeout = '{lock_timeout}s';
|
|
229
|
+
LOCK TABLE {schema}.{events_table} IN EXCLUSIVE MODE;
|
|
230
|
+
|
|
231
|
+
-- Step 1: Check for conflicts
|
|
232
|
+
WITH query_items_cte AS (
|
|
233
|
+
SELECT * FROM unnest(query_items) WITH ORDINALITY
|
|
234
|
+
),
|
|
235
|
+
initial_matches AS (
|
|
236
|
+
SELECT
|
|
237
|
+
t.main_id,
|
|
238
|
+
qi.ordinality,
|
|
239
|
+
t.type,
|
|
240
|
+
t.tag,
|
|
241
|
+
qi.tags AS required_tags,
|
|
242
|
+
qi.types AS allowed_types
|
|
243
|
+
FROM query_items_cte qi
|
|
244
|
+
JOIN {schema}.{tags_table} t
|
|
245
|
+
ON t.tag = ANY(qi.tags)
|
|
246
|
+
WHERE t.main_id > COALESCE(after_id, 0)
|
|
247
|
+
),
|
|
248
|
+
matched_groups AS (
|
|
249
|
+
SELECT
|
|
250
|
+
main_id,
|
|
251
|
+
ordinality,
|
|
252
|
+
COUNT(DISTINCT tag) AS matched_tag_count,
|
|
253
|
+
array_length(required_tags, 1) AS required_tag_count,
|
|
254
|
+
allowed_types
|
|
255
|
+
FROM initial_matches
|
|
256
|
+
GROUP BY main_id, ordinality, required_tag_count, allowed_types
|
|
257
|
+
),
|
|
258
|
+
qualified_ids AS (
|
|
259
|
+
SELECT main_id, allowed_types
|
|
260
|
+
FROM matched_groups
|
|
261
|
+
WHERE matched_tag_count = required_tag_count
|
|
262
|
+
),
|
|
263
|
+
filtered_ids AS (
|
|
264
|
+
SELECT m.id
|
|
265
|
+
FROM {schema}.{events_table} m
|
|
266
|
+
JOIN qualified_ids q ON q.main_id = m.id
|
|
267
|
+
WHERE
|
|
268
|
+
m.id > COALESCE(after_id, 0)
|
|
269
|
+
AND (
|
|
270
|
+
array_length(q.allowed_types, 1) IS NULL
|
|
271
|
+
OR array_length(q.allowed_types, 1) = 0
|
|
272
|
+
OR m.type = ANY(q.allowed_types)
|
|
273
|
+
)
|
|
274
|
+
LIMIT 1
|
|
275
|
+
)
|
|
276
|
+
SELECT EXISTS (SELECT 1 FROM filtered_ids)
|
|
277
|
+
INTO conflict_exists;
|
|
278
|
+
|
|
279
|
+
-- Step 2: Insert if no conflicts
|
|
280
|
+
IF NOT conflict_exists THEN
|
|
281
|
+
RETURN QUERY
|
|
282
|
+
WITH new_data AS (
|
|
283
|
+
SELECT * FROM unnest(new_events)
|
|
284
|
+
),
|
|
285
|
+
inserted AS (
|
|
286
|
+
INSERT INTO {schema}.{events_table} (type, data, tags)
|
|
287
|
+
SELECT type, data, tags
|
|
288
|
+
FROM new_data
|
|
289
|
+
RETURNING id, type, tags
|
|
290
|
+
),
|
|
291
|
+
expanded_tags AS (
|
|
292
|
+
SELECT ins.id AS main_id, ins.type, tag
|
|
293
|
+
FROM inserted ins,
|
|
294
|
+
unnest(ins.tags) AS tag
|
|
295
|
+
),
|
|
296
|
+
tag_insert AS (
|
|
297
|
+
INSERT INTO {schema}.{tags_table} (tag, type, main_id)
|
|
298
|
+
SELECT tag, type, main_id
|
|
299
|
+
FROM expanded_tags
|
|
300
|
+
)
|
|
301
|
+
SELECT id FROM inserted;
|
|
302
|
+
END IF;
|
|
303
|
+
|
|
304
|
+
-- If conflict exists, return empty result
|
|
305
|
+
RETURN;
|
|
306
|
+
END
|
|
307
|
+
$$;
|
|
308
|
+
"""
|
|
309
|
+
)
|
|
310
|
+
|
|
311
|
+
|
|
312
|
+
SQL_SET_LOCAL_LOCK_TIMEOUT = SQL("SET LOCAL lock_timeout = '{lock_timeout}s'")
|
|
313
|
+
SQL_LOCK_TABLE = SQL("LOCK TABLE {schema}.{events_table} IN EXCLUSIVE MODE")
|
|
314
|
+
|
|
315
|
+
SQL_EXPLAIN = SQL("EXPLAIN")
|
|
316
|
+
SQL_EXPLAIN_ANALYZE = SQL("EXPLAIN (ANALYZE, BUFFERS, VERBOSE)")
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
class PostgresDCBRecorderTT(DCBRecorder, PostgresRecorder):
|
|
320
|
+
def __init__(
|
|
321
|
+
self,
|
|
322
|
+
datastore: PostgresDatastore,
|
|
323
|
+
*,
|
|
324
|
+
events_table_name: str = "dcb_events",
|
|
325
|
+
):
|
|
326
|
+
super().__init__(datastore)
|
|
327
|
+
# Define identifiers.
|
|
328
|
+
self.events_table_name = events_table_name + "_tt_main"
|
|
329
|
+
self.tags_table_name = events_table_name + "_tt_tag"
|
|
330
|
+
self.index_name_id_cover_type = self.events_table_name + "_idx_id_type"
|
|
331
|
+
self.index_name_tag_main_id = self.tags_table_name + "_idx_tag_main_id"
|
|
332
|
+
|
|
333
|
+
# Check identifier lengths.
|
|
334
|
+
self.check_identifier_length(self.events_table_name)
|
|
335
|
+
self.check_identifier_length(self.tags_table_name)
|
|
336
|
+
self.check_identifier_length(self.index_name_id_cover_type)
|
|
337
|
+
self.check_identifier_length(self.index_name_tag_main_id)
|
|
338
|
+
self.check_identifier_length(DB_TYPE_NAME_DCB_EVENT_TT)
|
|
339
|
+
self.check_identifier_length(DB_TYPE_NAME_DCB_QUERY_ITEM_TT)
|
|
340
|
+
|
|
341
|
+
# Register composite database types.
|
|
342
|
+
self.datastore.db_type_names.add(DB_TYPE_NAME_DCB_EVENT_TT)
|
|
343
|
+
self.datastore.db_type_names.add(DB_TYPE_NAME_DCB_QUERY_ITEM_TT)
|
|
344
|
+
self.datastore.register_type_adapters()
|
|
345
|
+
|
|
346
|
+
# Define SQL template keyword arguments.
|
|
347
|
+
self.sql_kwargs = {
|
|
348
|
+
"schema": Identifier(self.datastore.schema),
|
|
349
|
+
"events_table": Identifier(self.events_table_name),
|
|
350
|
+
"tags_table": Identifier(self.tags_table_name),
|
|
351
|
+
"event_type": Identifier(DB_TYPE_NAME_DCB_EVENT_TT),
|
|
352
|
+
"query_item_type": Identifier(DB_TYPE_NAME_DCB_QUERY_ITEM_TT),
|
|
353
|
+
"id_cover_type_index": Identifier(self.index_name_id_cover_type),
|
|
354
|
+
"tag_main_id_index": Identifier(self.index_name_tag_main_id),
|
|
355
|
+
"conditional_append": Identifier(
|
|
356
|
+
DB_FUNCTION_NAME_DCB_CONDITIONAL_APPEND_TT
|
|
357
|
+
),
|
|
358
|
+
"lock_timeout": self.datastore.lock_timeout,
|
|
359
|
+
}
|
|
360
|
+
|
|
361
|
+
# Format and extend SQL create statements.
|
|
362
|
+
self.sql_create_statements.extend(
|
|
363
|
+
[
|
|
364
|
+
self.format(DB_TYPE_DCB_EVENT),
|
|
365
|
+
self.format(DB_TYPE_DCB_QUERY_ITEM),
|
|
366
|
+
self.format(DB_TABLE_DCB_EVENTS),
|
|
367
|
+
self.format(DB_INDEX_UNIQUE_ID_COVER_TYPE),
|
|
368
|
+
self.format(DB_TABLE_DCB_TAGS),
|
|
369
|
+
self.format(DB_INDEX_TAG_MAIN_ID),
|
|
370
|
+
self.format(DB_FUNCTION_CONDITIONAL_APPEND),
|
|
371
|
+
]
|
|
372
|
+
)
|
|
373
|
+
|
|
374
|
+
# Format other SQL statements.
|
|
375
|
+
self.sql_select_by_tags = self.format(SQL_SELECT_BY_TAGS)
|
|
376
|
+
self.sql_select_all = self.format(SQL_SELECT_ALL)
|
|
377
|
+
self.sql_select_by_type = self.format(SQL_SELECT_EVENTS_BY_TYPE)
|
|
378
|
+
self.sql_select_max_id = self.format(SQL_SELECT_MAX_ID)
|
|
379
|
+
self.sql_unconditional_append = self.format(SQL_UNCONDITIONAL_APPEND)
|
|
380
|
+
self.sql_conditional_append = self.format(SQL_CONDITIONAL_APPEND)
|
|
381
|
+
self.sql_set_local_lock_timeout = self.format(SQL_SET_LOCAL_LOCK_TIMEOUT)
|
|
382
|
+
self.sql_lock_table = self.format(SQL_LOCK_TABLE)
|
|
383
|
+
|
|
384
|
+
def format(self, sql: SQL) -> Composed:
|
|
385
|
+
return sql.format(**self.sql_kwargs)
|
|
386
|
+
|
|
387
|
+
def read(
|
|
388
|
+
self,
|
|
389
|
+
query: DCBQuery | None = None,
|
|
390
|
+
*,
|
|
391
|
+
after: int | None = None,
|
|
392
|
+
limit: int | None = None,
|
|
393
|
+
) -> tuple[Sequence[DCBSequencedEvent], int | None]:
|
|
394
|
+
with self.datastore.cursor() as curs:
|
|
395
|
+
return self._read(
|
|
396
|
+
curs=curs,
|
|
397
|
+
query=query,
|
|
398
|
+
after=after,
|
|
399
|
+
limit=limit,
|
|
400
|
+
return_head=True,
|
|
401
|
+
)
|
|
402
|
+
|
|
403
|
+
def _read(
|
|
404
|
+
self,
|
|
405
|
+
curs: Cursor[DictRow],
|
|
406
|
+
query: DCBQuery | None = None,
|
|
407
|
+
*,
|
|
408
|
+
after: int | None = None,
|
|
409
|
+
limit: int | None = None,
|
|
410
|
+
return_head: bool = True,
|
|
411
|
+
) -> tuple[Sequence[DCBSequencedEvent], int | None]:
|
|
412
|
+
if return_head and limit is None:
|
|
413
|
+
self.execute(curs, self.sql_select_max_id, explain=False)
|
|
414
|
+
row = curs.fetchone()
|
|
415
|
+
head = None if row is None else row["max"]
|
|
416
|
+
else:
|
|
417
|
+
head = None
|
|
418
|
+
|
|
419
|
+
if not query or not query.items:
|
|
420
|
+
# Select all.
|
|
421
|
+
self.execute(
|
|
422
|
+
curs,
|
|
423
|
+
self.sql_select_all,
|
|
424
|
+
{
|
|
425
|
+
"after": after,
|
|
426
|
+
"limit": limit,
|
|
427
|
+
},
|
|
428
|
+
explain=False,
|
|
429
|
+
)
|
|
430
|
+
rows = curs.fetchall()
|
|
431
|
+
|
|
432
|
+
elif self.all_query_items_have_tags(query):
|
|
433
|
+
# Select with tags.
|
|
434
|
+
psycopg_dcb_query_items = self.construct_psycopg_query_items(query.items)
|
|
435
|
+
|
|
436
|
+
self.execute(
|
|
437
|
+
curs,
|
|
438
|
+
self.sql_select_by_tags,
|
|
439
|
+
{
|
|
440
|
+
"query_items": psycopg_dcb_query_items,
|
|
441
|
+
"after": after,
|
|
442
|
+
"limit": limit,
|
|
443
|
+
},
|
|
444
|
+
explain=False,
|
|
445
|
+
)
|
|
446
|
+
rows = curs.fetchall()
|
|
447
|
+
|
|
448
|
+
elif self.has_one_query_item_one_type(query):
|
|
449
|
+
# Select for one type.
|
|
450
|
+
self.execute(
|
|
451
|
+
curs,
|
|
452
|
+
self.sql_select_by_type,
|
|
453
|
+
{
|
|
454
|
+
"event_type": query.items[0].types[0],
|
|
455
|
+
"after": after,
|
|
456
|
+
"limit": limit,
|
|
457
|
+
},
|
|
458
|
+
explain=False,
|
|
459
|
+
)
|
|
460
|
+
rows = curs.fetchall()
|
|
461
|
+
|
|
462
|
+
else:
|
|
463
|
+
msg = f"Unsupported query: {query}"
|
|
464
|
+
raise ProgrammingError(msg)
|
|
465
|
+
|
|
466
|
+
events = [
|
|
467
|
+
DCBSequencedEvent(
|
|
468
|
+
event=DCBEvent(
|
|
469
|
+
type=row["type"],
|
|
470
|
+
data=row["data"],
|
|
471
|
+
tags=row["tags"],
|
|
472
|
+
),
|
|
473
|
+
position=row["id"],
|
|
474
|
+
)
|
|
475
|
+
for row in rows
|
|
476
|
+
]
|
|
477
|
+
|
|
478
|
+
# Maybe update head.
|
|
479
|
+
if return_head and events:
|
|
480
|
+
head = max(head or 0, *[e.position for e in events])
|
|
481
|
+
|
|
482
|
+
return events, head
|
|
483
|
+
|
|
484
|
+
def append(
|
|
485
|
+
self, events: Sequence[DCBEvent], condition: DCBAppendCondition | None = None
|
|
486
|
+
) -> int:
|
|
487
|
+
assert len(events) > 0
|
|
488
|
+
psycopg_dcb_events = self.construct_psycopg_dcb_events(events)
|
|
489
|
+
|
|
490
|
+
# Do single-statement "unconditional append".
|
|
491
|
+
if condition is None:
|
|
492
|
+
with self.datastore.cursor() as curs:
|
|
493
|
+
return self._unconditional_append(curs, psycopg_dcb_events)
|
|
494
|
+
|
|
495
|
+
if self.all_query_items_have_tags(condition.fail_if_events_match):
|
|
496
|
+
# Do single-statement "conditional append".
|
|
497
|
+
psycopg_dcb_query_items = self.construct_psycopg_query_items(
|
|
498
|
+
condition.fail_if_events_match.items
|
|
499
|
+
)
|
|
500
|
+
with self.datastore.cursor() as curs:
|
|
501
|
+
self.execute(
|
|
502
|
+
curs,
|
|
503
|
+
self.sql_conditional_append,
|
|
504
|
+
{
|
|
505
|
+
"query_items": psycopg_dcb_query_items,
|
|
506
|
+
"after": condition.after,
|
|
507
|
+
"events": psycopg_dcb_events,
|
|
508
|
+
},
|
|
509
|
+
explain=False,
|
|
510
|
+
)
|
|
511
|
+
row = curs.fetchone()
|
|
512
|
+
if row is None:
|
|
513
|
+
raise IntegrityError
|
|
514
|
+
|
|
515
|
+
return row[DB_FUNCTION_NAME_DCB_CONDITIONAL_APPEND_TT]
|
|
516
|
+
|
|
517
|
+
# Do separate "read" and "append" operations in a transaction.
|
|
518
|
+
with self.datastore.transaction(commit=True) as curs:
|
|
519
|
+
|
|
520
|
+
# Lock the table in exclusive mode (readers can still read) to ensure
|
|
521
|
+
# nothing else will execute an append condition statement until after
|
|
522
|
+
# we have finished inserting new events, whilst expecting that others
|
|
523
|
+
# are playing by the same game. By the way, this is how optimistic
|
|
524
|
+
# locking works.
|
|
525
|
+
if self.datastore.lock_timeout:
|
|
526
|
+
curs.execute(self.sql_set_local_lock_timeout)
|
|
527
|
+
curs.execute(self.sql_lock_table)
|
|
528
|
+
|
|
529
|
+
# Check the append condition.
|
|
530
|
+
failed, head = self._read(
|
|
531
|
+
curs=curs,
|
|
532
|
+
query=condition.fail_if_events_match,
|
|
533
|
+
after=condition.after,
|
|
534
|
+
limit=1,
|
|
535
|
+
return_head=False,
|
|
536
|
+
)
|
|
537
|
+
if failed:
|
|
538
|
+
raise IntegrityError(failed)
|
|
539
|
+
|
|
540
|
+
# If okay, then do an "unconditional append".
|
|
541
|
+
return self._unconditional_append(curs, psycopg_dcb_events)
|
|
542
|
+
|
|
543
|
+
def _unconditional_append(
|
|
544
|
+
self, curs: Cursor[DictRow], psycopg_dcb_events: list[PsycopgDCBEvent]
|
|
545
|
+
) -> int:
|
|
546
|
+
self.execute(
|
|
547
|
+
curs,
|
|
548
|
+
self.sql_unconditional_append,
|
|
549
|
+
{
|
|
550
|
+
"events": psycopg_dcb_events,
|
|
551
|
+
},
|
|
552
|
+
explain=False,
|
|
553
|
+
)
|
|
554
|
+
rows = curs.fetchall()
|
|
555
|
+
assert len(rows) > 0
|
|
556
|
+
return max(row["id"] for row in rows)
|
|
557
|
+
|
|
558
|
+
def construct_psycopg_dcb_events(
|
|
559
|
+
self, dcb_events: Sequence[DCBEvent]
|
|
560
|
+
) -> list[PsycopgDCBEvent]:
|
|
561
|
+
return [
|
|
562
|
+
self.datastore.psycopg_python_types[DB_TYPE_NAME_DCB_EVENT_TT](
|
|
563
|
+
type=e.type,
|
|
564
|
+
data=e.data,
|
|
565
|
+
tags=e.tags,
|
|
566
|
+
)
|
|
567
|
+
for e in dcb_events
|
|
568
|
+
]
|
|
569
|
+
|
|
570
|
+
def construct_psycopg_query_items(
|
|
571
|
+
self, query_items: Sequence[DCBQueryItem]
|
|
572
|
+
) -> list[PsycopgDCBQueryItem]:
|
|
573
|
+
return [
|
|
574
|
+
self.datastore.psycopg_python_types[DB_TYPE_NAME_DCB_QUERY_ITEM_TT](
|
|
575
|
+
types=q.types,
|
|
576
|
+
tags=q.tags,
|
|
577
|
+
)
|
|
578
|
+
for q in query_items
|
|
579
|
+
]
|
|
580
|
+
|
|
581
|
+
def has_one_query_item_one_type(self, query: DCBQuery) -> bool:
|
|
582
|
+
return (
|
|
583
|
+
len(query.items) == 1
|
|
584
|
+
and len(query.items[0].types) == 1
|
|
585
|
+
and len(query.items[0].tags) == 0
|
|
586
|
+
)
|
|
587
|
+
|
|
588
|
+
def all_query_items_have_tags(self, query: DCBQuery) -> bool:
|
|
589
|
+
return all(len(q.tags) > 0 for q in query.items) and len(query.items) > 0
|
|
590
|
+
|
|
591
|
+
def execute(
|
|
592
|
+
self,
|
|
593
|
+
cursor: Cursor[DictRow],
|
|
594
|
+
statement: Composed,
|
|
595
|
+
params: Params | None = None,
|
|
596
|
+
*,
|
|
597
|
+
explain: bool = False,
|
|
598
|
+
prepare: bool = True,
|
|
599
|
+
) -> None:
|
|
600
|
+
if explain: # pragma: no cover
|
|
601
|
+
print() # noqa: T201
|
|
602
|
+
print("Statement:", statement.as_string().strip()) # noqa: T201
|
|
603
|
+
print("Params:", params) # noqa: T201
|
|
604
|
+
print() # noqa: T201
|
|
605
|
+
# with self.datastore.transaction(commit=False) as explain_cursor:
|
|
606
|
+
# explain_cursor.execute(SQL_EXPLAIN + statement, params)
|
|
607
|
+
# rows = explain_cursor.fetchall()
|
|
608
|
+
# print("\n".join([r["QUERY PLAN"] for r in rows])) # no qa: T201
|
|
609
|
+
# print() # no qa: T201
|
|
610
|
+
# with self.datastore.transaction(commit=False) as explain_cursor:
|
|
611
|
+
cursor.execute(SQL_EXPLAIN + statement, params)
|
|
612
|
+
rows = cursor.fetchall()
|
|
613
|
+
print("\n".join([r["QUERY PLAN"] for r in rows])) # noqa: T201
|
|
614
|
+
print() # noqa: T201
|
|
615
|
+
cursor.execute(statement, params, prepare=prepare)
|
|
616
|
+
|
|
617
|
+
|
|
618
|
+
class PsycopgDCBEvent(NamedTuple):
|
|
619
|
+
type: str
|
|
620
|
+
data: bytes
|
|
621
|
+
tags: list[str]
|
|
622
|
+
|
|
623
|
+
|
|
624
|
+
class PsycopgDCBQueryItem(NamedTuple):
|
|
625
|
+
types: list[str]
|
|
626
|
+
tags: list[str]
|
|
627
|
+
|
|
628
|
+
|
|
629
|
+
class PostgresTTDCBFactory(
|
|
630
|
+
PostgresFactory,
|
|
631
|
+
DCBInfrastructureFactory[PostgresTrackingRecorder],
|
|
632
|
+
):
|
|
633
|
+
def dcb_event_store(self) -> DCBRecorder:
|
|
634
|
+
prefix = self.env.name.lower() or "dcb"
|
|
635
|
+
|
|
636
|
+
dcb_table_name = prefix + "_events"
|
|
637
|
+
recorder = PostgresDCBRecorderTT(
|
|
638
|
+
datastore=self.datastore,
|
|
639
|
+
events_table_name=dcb_table_name,
|
|
640
|
+
)
|
|
641
|
+
if self.env_create_table():
|
|
642
|
+
recorder.create_table()
|
|
643
|
+
return recorder
|