eventsourcing 9.4.0a7__py3-none-any.whl → 9.4.0b1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eventsourcing might be problematic. Click here for more details.
- eventsourcing/application.py +22 -30
- eventsourcing/cipher.py +3 -1
- eventsourcing/dispatch.py +52 -11
- eventsourcing/domain.py +373 -360
- eventsourcing/interface.py +1 -1
- eventsourcing/persistence.py +26 -28
- eventsourcing/popo.py +5 -1
- eventsourcing/postgres.py +174 -127
- eventsourcing/projection.py +82 -26
- eventsourcing/sqlite.py +5 -1
- eventsourcing/system.py +14 -9
- eventsourcing/tests/application.py +57 -49
- eventsourcing/tests/domain.py +8 -6
- eventsourcing/tests/persistence.py +170 -143
- eventsourcing/tests/postgres_utils.py +12 -9
- eventsourcing/utils.py +27 -17
- {eventsourcing-9.4.0a7.dist-info → eventsourcing-9.4.0b1.dist-info}/METADATA +2 -2
- eventsourcing-9.4.0b1.dist-info/RECORD +26 -0
- eventsourcing-9.4.0a7.dist-info/RECORD +0 -26
- {eventsourcing-9.4.0a7.dist-info → eventsourcing-9.4.0b1.dist-info}/AUTHORS +0 -0
- {eventsourcing-9.4.0a7.dist-info → eventsourcing-9.4.0b1.dist-info}/LICENSE +0 -0
- {eventsourcing-9.4.0a7.dist-info → eventsourcing-9.4.0b1.dist-info}/WHEEL +0 -0
eventsourcing/postgres.py
CHANGED
|
@@ -1,10 +1,11 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import contextlib
|
|
3
4
|
import logging
|
|
4
5
|
from asyncio import CancelledError
|
|
5
6
|
from contextlib import contextmanager
|
|
6
7
|
from threading import Thread
|
|
7
|
-
from typing import TYPE_CHECKING, Any, Callable
|
|
8
|
+
from typing import TYPE_CHECKING, Any, Callable, cast
|
|
8
9
|
|
|
9
10
|
import psycopg
|
|
10
11
|
import psycopg.errors
|
|
@@ -12,6 +13,8 @@ import psycopg_pool
|
|
|
12
13
|
from psycopg import Connection, Cursor, Error
|
|
13
14
|
from psycopg.generators import notifies
|
|
14
15
|
from psycopg.rows import DictRow, dict_row
|
|
16
|
+
from psycopg.sql import SQL, Composed, Identifier
|
|
17
|
+
from typing_extensions import TypeVar
|
|
15
18
|
|
|
16
19
|
from eventsourcing.persistence import (
|
|
17
20
|
AggregateRecorder,
|
|
@@ -40,6 +43,7 @@ if TYPE_CHECKING:
|
|
|
40
43
|
from collections.abc import Iterator, Sequence
|
|
41
44
|
from uuid import UUID
|
|
42
45
|
|
|
46
|
+
from psycopg.abc import Query
|
|
43
47
|
from typing_extensions import Self
|
|
44
48
|
|
|
45
49
|
logging.getLogger("psycopg.pool").setLevel(logging.CRITICAL)
|
|
@@ -117,12 +121,11 @@ class PostgresDatastore:
|
|
|
117
121
|
check=check,
|
|
118
122
|
)
|
|
119
123
|
self.lock_timeout = lock_timeout
|
|
120
|
-
self.schema = schema.strip()
|
|
124
|
+
self.schema = schema.strip() or "public"
|
|
121
125
|
|
|
122
126
|
def after_connect_func(self) -> Callable[[Connection[Any]], None]:
|
|
123
|
-
statement = (
|
|
124
|
-
|
|
125
|
-
f"'{self.idle_in_transaction_session_timeout}s'"
|
|
127
|
+
statement = SQL("SET idle_in_transaction_session_timeout = '{0}s'").format(
|
|
128
|
+
self.idle_in_transaction_session_timeout
|
|
126
129
|
)
|
|
127
130
|
|
|
128
131
|
def after_connect(conn: Connection[DictRow]) -> None:
|
|
@@ -167,7 +170,6 @@ class PostgresDatastore:
|
|
|
167
170
|
|
|
168
171
|
@contextmanager
|
|
169
172
|
def transaction(self, *, commit: bool = False) -> Iterator[Cursor[DictRow]]:
|
|
170
|
-
conn: Connection[DictRow]
|
|
171
173
|
with self.get_connection() as conn, conn.transaction(force_rollback=not commit):
|
|
172
174
|
yield conn.cursor()
|
|
173
175
|
|
|
@@ -180,6 +182,9 @@ class PostgresDatastore:
|
|
|
180
182
|
def __exit__(self, *args: object, **kwargs: Any) -> None:
|
|
181
183
|
self.close()
|
|
182
184
|
|
|
185
|
+
def __del__(self) -> None:
|
|
186
|
+
self.close()
|
|
187
|
+
|
|
183
188
|
|
|
184
189
|
class PostgresRecorder:
|
|
185
190
|
"""Base class for recorders that use PostgreSQL."""
|
|
@@ -191,18 +196,12 @@ class PostgresRecorder:
|
|
|
191
196
|
self.datastore = datastore
|
|
192
197
|
self.create_table_statements = self.construct_create_table_statements()
|
|
193
198
|
|
|
194
|
-
def construct_create_table_statements(self) -> list[
|
|
199
|
+
def construct_create_table_statements(self) -> list[Composed]:
|
|
195
200
|
return []
|
|
196
201
|
|
|
197
|
-
|
|
198
|
-
|
|
199
|
-
|
|
200
|
-
if table_name.startswith(schema_prefix):
|
|
201
|
-
unqualified_table_name = table_name[len(schema_prefix) :]
|
|
202
|
-
else:
|
|
203
|
-
unqualified_table_name = table_name
|
|
204
|
-
if len(unqualified_table_name) > 63:
|
|
205
|
-
msg = f"Table name too long: {unqualified_table_name}"
|
|
202
|
+
def check_table_name_length(self, table_name: str) -> None:
|
|
203
|
+
if len(table_name) > 63:
|
|
204
|
+
msg = f"Table name too long: {table_name}"
|
|
206
205
|
raise ProgrammingError(msg)
|
|
207
206
|
|
|
208
207
|
def create_table(self) -> None:
|
|
@@ -219,42 +218,49 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
|
|
|
219
218
|
events_table_name: str = "stored_events",
|
|
220
219
|
):
|
|
221
220
|
super().__init__(datastore)
|
|
222
|
-
self.check_table_name_length(events_table_name
|
|
221
|
+
self.check_table_name_length(events_table_name)
|
|
223
222
|
self.events_table_name = events_table_name
|
|
224
223
|
# Index names can't be qualified names, but
|
|
225
224
|
# are created in the same schema as the table.
|
|
226
|
-
if "." in self.events_table_name:
|
|
227
|
-
unqualified_table_name = self.events_table_name.split(".")[-1]
|
|
228
|
-
else:
|
|
229
|
-
unqualified_table_name = self.events_table_name
|
|
230
225
|
self.notification_id_index_name = (
|
|
231
|
-
f"{
|
|
226
|
+
f"{self.events_table_name}_notification_id_idx"
|
|
232
227
|
)
|
|
233
228
|
self.create_table_statements.append(
|
|
234
|
-
|
|
235
|
-
|
|
236
|
-
|
|
237
|
-
|
|
238
|
-
|
|
239
|
-
|
|
240
|
-
|
|
241
|
-
|
|
242
|
-
|
|
229
|
+
SQL(
|
|
230
|
+
"CREATE TABLE IF NOT EXISTS {0}.{1} ("
|
|
231
|
+
"originator_id uuid NOT NULL, "
|
|
232
|
+
"originator_version bigint NOT NULL, "
|
|
233
|
+
"topic text, "
|
|
234
|
+
"state bytea, "
|
|
235
|
+
"PRIMARY KEY "
|
|
236
|
+
"(originator_id, originator_version)) "
|
|
237
|
+
"WITH (autovacuum_enabled=false)"
|
|
238
|
+
).format(
|
|
239
|
+
Identifier(self.datastore.schema),
|
|
240
|
+
Identifier(self.events_table_name),
|
|
241
|
+
)
|
|
243
242
|
)
|
|
244
243
|
|
|
245
|
-
self.insert_events_statement = (
|
|
246
|
-
|
|
244
|
+
self.insert_events_statement = SQL(
|
|
245
|
+
"INSERT INTO {0}.{1} VALUES (%s, %s, %s, %s)"
|
|
246
|
+
).format(
|
|
247
|
+
Identifier(self.datastore.schema),
|
|
248
|
+
Identifier(self.events_table_name),
|
|
247
249
|
)
|
|
248
|
-
|
|
249
|
-
|
|
250
|
+
|
|
251
|
+
self.select_events_statement = SQL(
|
|
252
|
+
"SELECT * FROM {0}.{1} WHERE originator_id = %s"
|
|
253
|
+
).format(
|
|
254
|
+
Identifier(self.datastore.schema),
|
|
255
|
+
Identifier(self.events_table_name),
|
|
250
256
|
)
|
|
251
|
-
|
|
257
|
+
|
|
258
|
+
self.lock_table_statements: list[Query] = []
|
|
252
259
|
|
|
253
260
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
254
261
|
def insert_events(
|
|
255
262
|
self, stored_events: list[StoredEvent], **kwargs: Any
|
|
256
263
|
) -> Sequence[int] | None:
|
|
257
|
-
conn: Connection[DictRow]
|
|
258
264
|
exc: Exception | None = None
|
|
259
265
|
notification_ids: Sequence[int] | None = None
|
|
260
266
|
with self.datastore.get_connection() as conn:
|
|
@@ -283,7 +289,7 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
|
|
|
283
289
|
|
|
284
290
|
def _insert_events(
|
|
285
291
|
self,
|
|
286
|
-
|
|
292
|
+
curs: Cursor[DictRow],
|
|
287
293
|
stored_events: list[StoredEvent],
|
|
288
294
|
**_: Any,
|
|
289
295
|
) -> None:
|
|
@@ -291,18 +297,18 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
|
|
|
291
297
|
|
|
292
298
|
def _insert_stored_events(
|
|
293
299
|
self,
|
|
294
|
-
|
|
300
|
+
curs: Cursor[DictRow],
|
|
295
301
|
stored_events: list[StoredEvent],
|
|
296
302
|
**_: Any,
|
|
297
303
|
) -> None:
|
|
298
304
|
# Only do something if there is something to do.
|
|
299
305
|
if len(stored_events) > 0:
|
|
300
|
-
self._lock_table(
|
|
306
|
+
self._lock_table(curs)
|
|
301
307
|
|
|
302
|
-
self._notify_channel(
|
|
308
|
+
self._notify_channel(curs)
|
|
303
309
|
|
|
304
310
|
# Insert events.
|
|
305
|
-
|
|
311
|
+
curs.executemany(
|
|
306
312
|
query=self.insert_events_statement,
|
|
307
313
|
params_seq=[
|
|
308
314
|
(
|
|
@@ -313,18 +319,18 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
|
|
|
313
319
|
)
|
|
314
320
|
for stored_event in stored_events
|
|
315
321
|
],
|
|
316
|
-
returning="RETURNING" in self.insert_events_statement,
|
|
322
|
+
returning="RETURNING" in self.insert_events_statement.as_string(),
|
|
317
323
|
)
|
|
318
324
|
|
|
319
|
-
def _lock_table(self,
|
|
325
|
+
def _lock_table(self, curs: Cursor[DictRow]) -> None:
|
|
320
326
|
pass
|
|
321
327
|
|
|
322
|
-
def _notify_channel(self,
|
|
328
|
+
def _notify_channel(self, curs: Cursor[DictRow]) -> None:
|
|
323
329
|
pass
|
|
324
330
|
|
|
325
331
|
def _fetch_ids_after_insert_events(
|
|
326
332
|
self,
|
|
327
|
-
|
|
333
|
+
curs: Cursor[DictRow],
|
|
328
334
|
stored_events: list[StoredEvent],
|
|
329
335
|
**kwargs: Any,
|
|
330
336
|
) -> Sequence[int] | None:
|
|
@@ -344,18 +350,18 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
|
|
|
344
350
|
params: list[Any] = [originator_id]
|
|
345
351
|
if gt is not None:
|
|
346
352
|
params.append(gt)
|
|
347
|
-
statement += " AND originator_version > %s"
|
|
353
|
+
statement += SQL(" AND originator_version > %s")
|
|
348
354
|
if lte is not None:
|
|
349
355
|
params.append(lte)
|
|
350
|
-
statement += " AND originator_version <= %s"
|
|
351
|
-
statement += " ORDER BY originator_version"
|
|
356
|
+
statement += SQL(" AND originator_version <= %s")
|
|
357
|
+
statement += SQL(" ORDER BY originator_version")
|
|
352
358
|
if desc is False:
|
|
353
|
-
statement += " ASC"
|
|
359
|
+
statement += SQL(" ASC")
|
|
354
360
|
else:
|
|
355
|
-
statement += " DESC"
|
|
361
|
+
statement += SQL(" DESC")
|
|
356
362
|
if limit is not None:
|
|
357
363
|
params.append(limit)
|
|
358
|
-
statement += " LIMIT %s"
|
|
364
|
+
statement += SQL(" LIMIT %s")
|
|
359
365
|
|
|
360
366
|
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
361
367
|
curs.execute(statement, params, prepare=True)
|
|
@@ -378,9 +384,8 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
378
384
|
events_table_name: str = "stored_events",
|
|
379
385
|
):
|
|
380
386
|
super().__init__(datastore, events_table_name=events_table_name)
|
|
381
|
-
self.create_table_statements[-1] = (
|
|
382
|
-
"CREATE TABLE IF NOT EXISTS "
|
|
383
|
-
f"{self.events_table_name} ("
|
|
387
|
+
self.create_table_statements[-1] = SQL(
|
|
388
|
+
"CREATE TABLE IF NOT EXISTS {0}.{1} ("
|
|
384
389
|
"originator_id uuid NOT NULL, "
|
|
385
390
|
"originator_version bigint NOT NULL, "
|
|
386
391
|
"topic text, "
|
|
@@ -389,20 +394,40 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
389
394
|
"PRIMARY KEY "
|
|
390
395
|
"(originator_id, originator_version)) "
|
|
391
396
|
"WITH (autovacuum_enabled=false)"
|
|
397
|
+
).format(
|
|
398
|
+
Identifier(self.datastore.schema),
|
|
399
|
+
Identifier(self.events_table_name),
|
|
392
400
|
)
|
|
401
|
+
|
|
393
402
|
self.create_table_statements.append(
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
403
|
+
SQL(
|
|
404
|
+
"CREATE UNIQUE INDEX IF NOT EXISTS {0} "
|
|
405
|
+
"ON {1}.{2} (notification_id ASC);"
|
|
406
|
+
).format(
|
|
407
|
+
Identifier(self.notification_id_index_name),
|
|
408
|
+
Identifier(self.datastore.schema),
|
|
409
|
+
Identifier(self.events_table_name),
|
|
410
|
+
)
|
|
397
411
|
)
|
|
412
|
+
|
|
398
413
|
self.channel_name = self.events_table_name.replace(".", "_")
|
|
399
|
-
self.insert_events_statement
|
|
400
|
-
|
|
401
|
-
|
|
414
|
+
self.insert_events_statement = self.insert_events_statement + SQL(
|
|
415
|
+
" RETURNING notification_id"
|
|
416
|
+
)
|
|
417
|
+
|
|
418
|
+
self.max_notification_id_statement = SQL(
|
|
419
|
+
"SELECT MAX(notification_id) FROM {0}.{1}"
|
|
420
|
+
).format(
|
|
421
|
+
Identifier(self.datastore.schema),
|
|
422
|
+
Identifier(self.events_table_name),
|
|
402
423
|
)
|
|
424
|
+
|
|
403
425
|
self.lock_table_statements = [
|
|
404
|
-
|
|
405
|
-
|
|
426
|
+
SQL("SET LOCAL lock_timeout = '{0}s'").format(self.datastore.lock_timeout),
|
|
427
|
+
SQL("LOCK TABLE {0}.{1} IN EXCLUSIVE MODE").format(
|
|
428
|
+
Identifier(self.datastore.schema),
|
|
429
|
+
Identifier(self.events_table_name),
|
|
430
|
+
),
|
|
406
431
|
]
|
|
407
432
|
|
|
408
433
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
@@ -421,37 +446,44 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
421
446
|
"""
|
|
422
447
|
|
|
423
448
|
params: list[int | str | Sequence[str]] = []
|
|
424
|
-
statement =
|
|
449
|
+
statement = SQL("SELECT * FROM {0}.{1}").format(
|
|
450
|
+
Identifier(self.datastore.schema),
|
|
451
|
+
Identifier(self.events_table_name),
|
|
452
|
+
)
|
|
425
453
|
has_where = False
|
|
426
454
|
if start is not None:
|
|
427
|
-
statement += " WHERE"
|
|
455
|
+
statement += SQL(" WHERE")
|
|
428
456
|
has_where = True
|
|
429
457
|
params.append(start)
|
|
430
458
|
if inclusive_of_start:
|
|
431
|
-
statement += " notification_id>=%s"
|
|
459
|
+
statement += SQL(" notification_id>=%s")
|
|
432
460
|
else:
|
|
433
|
-
statement += " notification_id>%s"
|
|
461
|
+
statement += SQL(" notification_id>%s")
|
|
434
462
|
|
|
435
463
|
if stop is not None:
|
|
436
464
|
if not has_where:
|
|
437
465
|
has_where = True
|
|
438
|
-
statement += " WHERE"
|
|
466
|
+
statement += SQL(" WHERE")
|
|
439
467
|
else:
|
|
440
|
-
statement += " AND"
|
|
468
|
+
statement += SQL(" AND")
|
|
441
469
|
|
|
442
470
|
params.append(stop)
|
|
443
|
-
statement += " notification_id <= %s"
|
|
471
|
+
statement += SQL(" notification_id <= %s")
|
|
444
472
|
|
|
445
473
|
if topics:
|
|
474
|
+
# Check sequence and ensure list of strings.
|
|
475
|
+
assert isinstance(topics, (tuple, list)), topics
|
|
476
|
+
topics = list(topics) if isinstance(topics, tuple) else topics
|
|
477
|
+
assert all(isinstance(t, str) for t in topics), topics
|
|
446
478
|
if not has_where:
|
|
447
|
-
statement += " WHERE"
|
|
479
|
+
statement += SQL(" WHERE")
|
|
448
480
|
else:
|
|
449
|
-
statement += " AND"
|
|
481
|
+
statement += SQL(" AND")
|
|
450
482
|
params.append(topics)
|
|
451
|
-
statement += " topic = ANY(%s)"
|
|
483
|
+
statement += SQL(" topic = ANY(%s)")
|
|
452
484
|
|
|
453
485
|
params.append(limit)
|
|
454
|
-
statement += " ORDER BY notification_id LIMIT %s"
|
|
486
|
+
statement += SQL(" ORDER BY notification_id LIMIT %s")
|
|
455
487
|
|
|
456
488
|
connection = self.datastore.get_connection()
|
|
457
489
|
with connection as conn, conn.cursor() as curs:
|
|
@@ -472,14 +504,13 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
472
504
|
"""
|
|
473
505
|
Returns the maximum notification ID.
|
|
474
506
|
"""
|
|
475
|
-
conn: Connection[DictRow]
|
|
476
507
|
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
477
508
|
curs.execute(self.max_notification_id_statement)
|
|
478
509
|
fetchone = curs.fetchone()
|
|
479
510
|
assert fetchone is not None
|
|
480
511
|
return fetchone["max"]
|
|
481
512
|
|
|
482
|
-
def _lock_table(self,
|
|
513
|
+
def _lock_table(self, curs: Cursor[DictRow]) -> None:
|
|
483
514
|
# Acquire "EXCLUSIVE" table lock, to serialize transactions that insert
|
|
484
515
|
# stored events, so that readers don't pass over gaps that are filled in
|
|
485
516
|
# later. We want each transaction that will be issued with notifications
|
|
@@ -501,23 +532,23 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
501
532
|
# https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
|
|
502
533
|
# -postgresql-serial-column-values-by-commit-order
|
|
503
534
|
for lock_statement in self.lock_table_statements:
|
|
504
|
-
|
|
535
|
+
curs.execute(lock_statement, prepare=True)
|
|
505
536
|
|
|
506
|
-
def _notify_channel(self,
|
|
507
|
-
|
|
537
|
+
def _notify_channel(self, curs: Cursor[DictRow]) -> None:
|
|
538
|
+
curs.execute(SQL("NOTIFY {0}").format(Identifier(self.channel_name)))
|
|
508
539
|
|
|
509
540
|
def _fetch_ids_after_insert_events(
|
|
510
541
|
self,
|
|
511
|
-
|
|
542
|
+
curs: Cursor[DictRow],
|
|
512
543
|
stored_events: list[StoredEvent],
|
|
513
544
|
**kwargs: Any,
|
|
514
545
|
) -> Sequence[int] | None:
|
|
515
546
|
notification_ids: list[int] = []
|
|
516
547
|
len_events = len(stored_events)
|
|
517
548
|
if len_events:
|
|
518
|
-
while
|
|
519
|
-
if
|
|
520
|
-
row =
|
|
549
|
+
while curs.nextset() and len(notification_ids) != len_events:
|
|
550
|
+
if curs.statusmessage and curs.statusmessage.startswith("INSERT"):
|
|
551
|
+
row = curs.fetchone()
|
|
521
552
|
assert row is not None
|
|
522
553
|
notification_ids.append(row["notification_id"])
|
|
523
554
|
if len(notification_ids) != len(stored_events):
|
|
@@ -551,7 +582,9 @@ class PostgresSubscription(ListenNotifySubscription[PostgresApplicationRecorder]
|
|
|
551
582
|
def _listen(self) -> None:
|
|
552
583
|
try:
|
|
553
584
|
with self._recorder.datastore.get_connection() as conn:
|
|
554
|
-
conn.execute(
|
|
585
|
+
conn.execute(
|
|
586
|
+
SQL("LISTEN {0}").format(Identifier(self._recorder.channel_name))
|
|
587
|
+
)
|
|
555
588
|
while not self._has_been_stopped and not self._thread_error:
|
|
556
589
|
# This block simplifies psycopg's conn.notifies(), because
|
|
557
590
|
# we aren't interested in the actual notify messages, and
|
|
@@ -579,42 +612,58 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
|
|
|
579
612
|
**kwargs: Any,
|
|
580
613
|
):
|
|
581
614
|
super().__init__(datastore, **kwargs)
|
|
582
|
-
self.check_table_name_length(tracking_table_name
|
|
615
|
+
self.check_table_name_length(tracking_table_name)
|
|
583
616
|
self.tracking_table_name = tracking_table_name
|
|
584
617
|
self.create_table_statements.append(
|
|
585
|
-
|
|
586
|
-
|
|
587
|
-
|
|
588
|
-
|
|
589
|
-
|
|
590
|
-
|
|
618
|
+
SQL(
|
|
619
|
+
"CREATE TABLE IF NOT EXISTS {0}.{1} ("
|
|
620
|
+
"application_name text, "
|
|
621
|
+
"notification_id bigint, "
|
|
622
|
+
"PRIMARY KEY "
|
|
623
|
+
"(application_name, notification_id))"
|
|
624
|
+
).format(
|
|
625
|
+
Identifier(self.datastore.schema),
|
|
626
|
+
Identifier(self.tracking_table_name),
|
|
627
|
+
)
|
|
591
628
|
)
|
|
592
|
-
|
|
593
|
-
|
|
629
|
+
|
|
630
|
+
self.insert_tracking_statement = SQL(
|
|
631
|
+
"INSERT INTO {0}.{1} VALUES (%s, %s)"
|
|
632
|
+
).format(
|
|
633
|
+
Identifier(self.datastore.schema),
|
|
634
|
+
Identifier(self.tracking_table_name),
|
|
594
635
|
)
|
|
595
|
-
|
|
596
|
-
|
|
597
|
-
|
|
598
|
-
|
|
636
|
+
|
|
637
|
+
self.max_tracking_id_statement = SQL(
|
|
638
|
+
"SELECT MAX(notification_id) FROM {0}.{1} WHERE application_name=%s"
|
|
639
|
+
).format(
|
|
640
|
+
Identifier(self.datastore.schema),
|
|
641
|
+
Identifier(self.tracking_table_name),
|
|
599
642
|
)
|
|
600
|
-
|
|
601
|
-
|
|
602
|
-
|
|
643
|
+
|
|
644
|
+
self.count_tracking_id_statement = SQL(
|
|
645
|
+
"SELECT COUNT(*) FROM {0}.{1} "
|
|
603
646
|
"WHERE application_name=%s AND notification_id=%s"
|
|
647
|
+
).format(
|
|
648
|
+
Identifier(self.datastore.schema),
|
|
649
|
+
Identifier(self.tracking_table_name),
|
|
604
650
|
)
|
|
605
651
|
|
|
606
652
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
607
653
|
def insert_tracking(self, tracking: Tracking) -> None:
|
|
608
|
-
|
|
609
|
-
|
|
654
|
+
with (
|
|
655
|
+
self.datastore.get_connection() as conn,
|
|
656
|
+
conn.transaction(),
|
|
657
|
+
conn.cursor() as curs,
|
|
658
|
+
):
|
|
610
659
|
self._insert_tracking(curs, tracking)
|
|
611
660
|
|
|
612
661
|
def _insert_tracking(
|
|
613
662
|
self,
|
|
614
|
-
|
|
663
|
+
curs: Cursor[DictRow],
|
|
615
664
|
tracking: Tracking,
|
|
616
665
|
) -> None:
|
|
617
|
-
|
|
666
|
+
curs.execute(
|
|
618
667
|
query=self.insert_tracking_statement,
|
|
619
668
|
params=(
|
|
620
669
|
tracking.application_name,
|
|
@@ -636,8 +685,11 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
|
|
|
636
685
|
return fetchone["max"]
|
|
637
686
|
|
|
638
687
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
639
|
-
def has_tracking_id(
|
|
640
|
-
|
|
688
|
+
def has_tracking_id(
|
|
689
|
+
self, application_name: str, notification_id: int | None
|
|
690
|
+
) -> bool:
|
|
691
|
+
if notification_id is None:
|
|
692
|
+
return True
|
|
641
693
|
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
642
694
|
curs.execute(
|
|
643
695
|
query=self.count_tracking_id_statement,
|
|
@@ -649,6 +701,13 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
|
|
|
649
701
|
return bool(fetchone["count"])
|
|
650
702
|
|
|
651
703
|
|
|
704
|
+
TPostgresTrackingRecorder = TypeVar(
|
|
705
|
+
"TPostgresTrackingRecorder",
|
|
706
|
+
bound=PostgresTrackingRecorder,
|
|
707
|
+
default=PostgresTrackingRecorder,
|
|
708
|
+
)
|
|
709
|
+
|
|
710
|
+
|
|
652
711
|
class PostgresProcessRecorder(
|
|
653
712
|
PostgresTrackingRecorder, PostgresApplicationRecorder, ProcessRecorder
|
|
654
713
|
):
|
|
@@ -667,14 +726,14 @@ class PostgresProcessRecorder(
|
|
|
667
726
|
|
|
668
727
|
def _insert_events(
|
|
669
728
|
self,
|
|
670
|
-
|
|
729
|
+
curs: Cursor[DictRow],
|
|
671
730
|
stored_events: list[StoredEvent],
|
|
672
731
|
**kwargs: Any,
|
|
673
732
|
) -> None:
|
|
674
733
|
tracking: Tracking | None = kwargs.get("tracking", None)
|
|
675
734
|
if tracking is not None:
|
|
676
|
-
self._insert_tracking(
|
|
677
|
-
super()._insert_events(
|
|
735
|
+
self._insert_tracking(curs, tracking=tracking)
|
|
736
|
+
super()._insert_events(curs, stored_events, **kwargs)
|
|
678
737
|
|
|
679
738
|
|
|
680
739
|
class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
|
|
@@ -876,8 +935,6 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
|
|
|
876
935
|
def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
|
|
877
936
|
prefix = self.env.name.lower() or "stored"
|
|
878
937
|
events_table_name = prefix + "_" + purpose
|
|
879
|
-
if self.datastore.schema:
|
|
880
|
-
events_table_name = f"{self.datastore.schema}.{events_table_name}"
|
|
881
938
|
recorder = type(self).aggregate_recorder_class(
|
|
882
939
|
datastore=self.datastore,
|
|
883
940
|
events_table_name=events_table_name,
|
|
@@ -889,9 +946,6 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
|
|
|
889
946
|
def application_recorder(self) -> ApplicationRecorder:
|
|
890
947
|
prefix = self.env.name.lower() or "stored"
|
|
891
948
|
events_table_name = prefix + "_events"
|
|
892
|
-
if self.datastore.schema:
|
|
893
|
-
events_table_name = f"{self.datastore.schema}.{events_table_name}"
|
|
894
|
-
|
|
895
949
|
application_recorder_topic = self.env.get(self.APPLICATION_RECORDER_TOPIC)
|
|
896
950
|
if application_recorder_topic:
|
|
897
951
|
application_recorder_class: type[PostgresApplicationRecorder] = (
|
|
@@ -910,18 +964,18 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
|
|
|
910
964
|
return recorder
|
|
911
965
|
|
|
912
966
|
def tracking_recorder(
|
|
913
|
-
self, tracking_recorder_class: type[
|
|
914
|
-
) ->
|
|
967
|
+
self, tracking_recorder_class: type[TPostgresTrackingRecorder] | None = None
|
|
968
|
+
) -> TPostgresTrackingRecorder:
|
|
915
969
|
prefix = self.env.name.lower() or "notification"
|
|
916
970
|
tracking_table_name = prefix + "_tracking"
|
|
917
|
-
if self.datastore.schema:
|
|
918
|
-
tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
|
|
919
971
|
if tracking_recorder_class is None:
|
|
920
972
|
tracking_recorder_topic = self.env.get(self.TRACKING_RECORDER_TOPIC)
|
|
921
973
|
if tracking_recorder_topic:
|
|
922
974
|
tracking_recorder_class = resolve_topic(tracking_recorder_topic)
|
|
923
975
|
else:
|
|
924
|
-
tracking_recorder_class =
|
|
976
|
+
tracking_recorder_class = cast(
|
|
977
|
+
type[TPostgresTrackingRecorder], type(self).tracking_recorder_class
|
|
978
|
+
)
|
|
925
979
|
assert tracking_recorder_class is not None
|
|
926
980
|
assert issubclass(tracking_recorder_class, PostgresTrackingRecorder)
|
|
927
981
|
recorder = tracking_recorder_class(
|
|
@@ -937,10 +991,6 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
|
|
|
937
991
|
events_table_name = prefix + "_events"
|
|
938
992
|
prefix = self.env.name.lower() or "notification"
|
|
939
993
|
tracking_table_name = prefix + "_tracking"
|
|
940
|
-
if self.datastore.schema:
|
|
941
|
-
events_table_name = f"{self.datastore.schema}.{events_table_name}"
|
|
942
|
-
tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
|
|
943
|
-
|
|
944
994
|
process_recorder_topic = self.env.get(self.PROCESS_RECORDER_TOPIC)
|
|
945
995
|
if process_recorder_topic:
|
|
946
996
|
process_recorder_class: type[PostgresTrackingRecorder] = resolve_topic(
|
|
@@ -960,11 +1010,8 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
|
|
|
960
1010
|
return recorder
|
|
961
1011
|
|
|
962
1012
|
def close(self) -> None:
|
|
963
|
-
|
|
1013
|
+
with contextlib.suppress(AttributeError):
|
|
964
1014
|
self.datastore.close()
|
|
965
1015
|
|
|
966
|
-
def __del__(self) -> None:
|
|
967
|
-
self.close()
|
|
968
|
-
|
|
969
1016
|
|
|
970
1017
|
Factory = PostgresFactory
|