eventsourcing 9.3.5__py3-none-any.whl → 9.4.0a2__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eventsourcing might be problematic. Click here for more details.
- eventsourcing/__init__.py +0 -1
- eventsourcing/application.py +26 -10
- eventsourcing/cipher.py +4 -2
- eventsourcing/cryptography.py +96 -0
- eventsourcing/domain.py +29 -9
- eventsourcing/interface.py +23 -5
- eventsourcing/persistence.py +292 -71
- eventsourcing/popo.py +113 -32
- eventsourcing/postgres.py +265 -103
- eventsourcing/projection.py +200 -0
- eventsourcing/sqlite.py +143 -36
- eventsourcing/system.py +64 -42
- eventsourcing/tests/application.py +7 -12
- eventsourcing/tests/persistence.py +304 -75
- eventsourcing/utils.py +1 -1
- {eventsourcing-9.3.5.dist-info → eventsourcing-9.4.0a2.dist-info}/LICENSE +1 -1
- {eventsourcing-9.3.5.dist-info → eventsourcing-9.4.0a2.dist-info}/METADATA +9 -7
- eventsourcing-9.4.0a2.dist-info/RECORD +26 -0
- eventsourcing-9.3.5.dist-info/RECORD +0 -24
- {eventsourcing-9.3.5.dist-info → eventsourcing-9.4.0a2.dist-info}/AUTHORS +0 -0
- {eventsourcing-9.3.5.dist-info → eventsourcing-9.4.0a2.dist-info}/WHEEL +0 -0
eventsourcing/postgres.py
CHANGED
|
@@ -1,13 +1,16 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
3
|
import logging
|
|
4
|
+
from asyncio import CancelledError
|
|
4
5
|
from contextlib import contextmanager
|
|
5
|
-
from
|
|
6
|
+
from threading import Thread
|
|
7
|
+
from typing import TYPE_CHECKING, Any, Callable, Iterator, List, Sequence, Type
|
|
6
8
|
|
|
7
9
|
import psycopg
|
|
8
10
|
import psycopg.errors
|
|
9
11
|
import psycopg_pool
|
|
10
|
-
from psycopg import Connection, Cursor
|
|
12
|
+
from psycopg import Connection, Cursor, Error
|
|
13
|
+
from psycopg.generators import notifies
|
|
11
14
|
from psycopg.rows import DictRow, dict_row
|
|
12
15
|
|
|
13
16
|
from eventsourcing.persistence import (
|
|
@@ -19,6 +22,7 @@ from eventsourcing.persistence import (
|
|
|
19
22
|
IntegrityError,
|
|
20
23
|
InterfaceError,
|
|
21
24
|
InternalError,
|
|
25
|
+
ListenNotifySubscription,
|
|
22
26
|
Notification,
|
|
23
27
|
NotSupportedError,
|
|
24
28
|
OperationalError,
|
|
@@ -27,10 +31,11 @@ from eventsourcing.persistence import (
|
|
|
27
31
|
ProgrammingError,
|
|
28
32
|
StoredEvent,
|
|
29
33
|
Tracking,
|
|
34
|
+
TrackingRecorder,
|
|
30
35
|
)
|
|
31
36
|
from eventsourcing.utils import Environment, resolve_topic, retry, strtobool
|
|
32
37
|
|
|
33
|
-
if TYPE_CHECKING:
|
|
38
|
+
if TYPE_CHECKING:
|
|
34
39
|
from uuid import UUID
|
|
35
40
|
|
|
36
41
|
from typing_extensions import Self
|
|
@@ -38,6 +43,14 @@ if TYPE_CHECKING: # pragma: nocover
|
|
|
38
43
|
logging.getLogger("psycopg.pool").setLevel(logging.CRITICAL)
|
|
39
44
|
logging.getLogger("psycopg").setLevel(logging.CRITICAL)
|
|
40
45
|
|
|
46
|
+
# Copy of "private" psycopg.errors._NO_TRACEBACK (in case it changes)
|
|
47
|
+
# From psycopg: "Don't show a complete traceback upon raising these exception.
|
|
48
|
+
# Usually the traceback starts from internal functions (for instance in the
|
|
49
|
+
# server communication callbacks) but, for the end user, it's more important
|
|
50
|
+
# to get the high level information about where the exception was raised, for
|
|
51
|
+
# instance in a certain `Cursor.execute()`."
|
|
52
|
+
NO_TRACEBACK = (Error, KeyboardInterrupt, CancelledError)
|
|
53
|
+
|
|
41
54
|
|
|
42
55
|
class ConnectionPool(psycopg_pool.ConnectionPool[Any]):
|
|
43
56
|
def __init__(
|
|
@@ -60,7 +73,7 @@ class PostgresDatastore:
|
|
|
60
73
|
self,
|
|
61
74
|
dbname: str,
|
|
62
75
|
host: str,
|
|
63
|
-
port: str,
|
|
76
|
+
port: str | int,
|
|
64
77
|
user: str,
|
|
65
78
|
password: str,
|
|
66
79
|
*,
|
|
@@ -95,7 +108,7 @@ class PostgresDatastore:
|
|
|
95
108
|
min_size=pool_size,
|
|
96
109
|
max_size=pool_size + max_overflow,
|
|
97
110
|
open=False,
|
|
98
|
-
configure=self.
|
|
111
|
+
configure=self.after_connect_func(),
|
|
99
112
|
timeout=connect_timeout,
|
|
100
113
|
max_waiting=max_waiting,
|
|
101
114
|
max_lifetime=conn_max_age,
|
|
@@ -104,13 +117,18 @@ class PostgresDatastore:
|
|
|
104
117
|
self.lock_timeout = lock_timeout
|
|
105
118
|
self.schema = schema.strip()
|
|
106
119
|
|
|
107
|
-
def
|
|
108
|
-
|
|
109
|
-
conn.cursor().execute(
|
|
120
|
+
def after_connect_func(self) -> Callable[[Connection[Any]], None]:
|
|
121
|
+
statement = (
|
|
110
122
|
"SET idle_in_transaction_session_timeout = "
|
|
111
123
|
f"'{self.idle_in_transaction_session_timeout}s'"
|
|
112
124
|
)
|
|
113
125
|
|
|
126
|
+
def after_connect(conn: Connection[DictRow]) -> None:
|
|
127
|
+
conn.autocommit = True
|
|
128
|
+
conn.cursor().execute(statement)
|
|
129
|
+
|
|
130
|
+
return after_connect
|
|
131
|
+
|
|
114
132
|
@contextmanager
|
|
115
133
|
def get_connection(self) -> Iterator[Connection[DictRow]]:
|
|
116
134
|
try:
|
|
@@ -154,9 +172,6 @@ class PostgresDatastore:
|
|
|
154
172
|
def close(self) -> None:
|
|
155
173
|
self.pool.close()
|
|
156
174
|
|
|
157
|
-
def __del__(self) -> None:
|
|
158
|
-
self.close()
|
|
159
|
-
|
|
160
175
|
def __enter__(self) -> Self:
|
|
161
176
|
return self
|
|
162
177
|
|
|
@@ -164,33 +179,18 @@ class PostgresDatastore:
|
|
|
164
179
|
self.close()
|
|
165
180
|
|
|
166
181
|
|
|
167
|
-
class
|
|
182
|
+
class PostgresRecorder:
|
|
183
|
+
"""Base class for recorders that use PostgreSQL."""
|
|
184
|
+
|
|
168
185
|
def __init__(
|
|
169
186
|
self,
|
|
170
187
|
datastore: PostgresDatastore,
|
|
171
|
-
events_table_name: str,
|
|
172
188
|
):
|
|
173
|
-
self.check_table_name_length(events_table_name, datastore.schema)
|
|
174
189
|
self.datastore = datastore
|
|
175
|
-
self.events_table_name = events_table_name
|
|
176
|
-
# Index names can't be qualified names, but
|
|
177
|
-
# are created in the same schema as the table.
|
|
178
|
-
if "." in self.events_table_name:
|
|
179
|
-
unqualified_table_name = self.events_table_name.split(".")[-1]
|
|
180
|
-
else:
|
|
181
|
-
unqualified_table_name = self.events_table_name
|
|
182
|
-
self.notification_id_index_name = (
|
|
183
|
-
f"{unqualified_table_name}_notification_id_idx "
|
|
184
|
-
)
|
|
185
|
-
|
|
186
190
|
self.create_table_statements = self.construct_create_table_statements()
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
self.select_events_statement = (
|
|
191
|
-
f"SELECT * FROM {self.events_table_name} WHERE originator_id = %s"
|
|
192
|
-
)
|
|
193
|
-
self.lock_table_statements: List[str] = []
|
|
191
|
+
|
|
192
|
+
def construct_create_table_statements(self) -> List[str]:
|
|
193
|
+
return []
|
|
194
194
|
|
|
195
195
|
@staticmethod
|
|
196
196
|
def check_table_name_length(table_name: str, schema_name: str) -> None:
|
|
@@ -203,8 +203,32 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
203
203
|
msg = f"Table name too long: {unqualified_table_name}"
|
|
204
204
|
raise ProgrammingError(msg)
|
|
205
205
|
|
|
206
|
-
def
|
|
207
|
-
|
|
206
|
+
def create_table(self) -> None:
|
|
207
|
+
with self.datastore.transaction(commit=True) as curs:
|
|
208
|
+
for statement in self.create_table_statements:
|
|
209
|
+
curs.execute(statement, prepare=False)
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
|
|
213
|
+
def __init__(
|
|
214
|
+
self,
|
|
215
|
+
datastore: PostgresDatastore,
|
|
216
|
+
*,
|
|
217
|
+
events_table_name: str = "stored_events",
|
|
218
|
+
):
|
|
219
|
+
super().__init__(datastore)
|
|
220
|
+
self.check_table_name_length(events_table_name, datastore.schema)
|
|
221
|
+
self.events_table_name = events_table_name
|
|
222
|
+
# Index names can't be qualified names, but
|
|
223
|
+
# are created in the same schema as the table.
|
|
224
|
+
if "." in self.events_table_name:
|
|
225
|
+
unqualified_table_name = self.events_table_name.split(".")[-1]
|
|
226
|
+
else:
|
|
227
|
+
unqualified_table_name = self.events_table_name
|
|
228
|
+
self.notification_id_index_name = (
|
|
229
|
+
f"{unqualified_table_name}_notification_id_idx "
|
|
230
|
+
)
|
|
231
|
+
self.create_table_statements.append(
|
|
208
232
|
"CREATE TABLE IF NOT EXISTS "
|
|
209
233
|
f"{self.events_table_name} ("
|
|
210
234
|
"originator_id uuid NOT NULL, "
|
|
@@ -215,12 +239,14 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
215
239
|
"(originator_id, originator_version)) "
|
|
216
240
|
"WITH (autovacuum_enabled=false)"
|
|
217
241
|
)
|
|
218
|
-
return [statement]
|
|
219
242
|
|
|
220
|
-
|
|
221
|
-
|
|
222
|
-
|
|
223
|
-
|
|
243
|
+
self.insert_events_statement = (
|
|
244
|
+
f"INSERT INTO {self.events_table_name} VALUES (%s, %s, %s, %s)"
|
|
245
|
+
)
|
|
246
|
+
self.select_events_statement = (
|
|
247
|
+
f"SELECT * FROM {self.events_table_name} WHERE originator_id = %s"
|
|
248
|
+
)
|
|
249
|
+
self.lock_table_statements: List[str] = []
|
|
224
250
|
|
|
225
251
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
226
252
|
def insert_events(
|
|
@@ -257,7 +283,7 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
257
283
|
self,
|
|
258
284
|
c: Cursor[DictRow],
|
|
259
285
|
stored_events: List[StoredEvent],
|
|
260
|
-
**
|
|
286
|
+
**_: Any,
|
|
261
287
|
) -> None:
|
|
262
288
|
pass
|
|
263
289
|
|
|
@@ -271,6 +297,8 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
271
297
|
if len(stored_events) > 0:
|
|
272
298
|
self._lock_table(c)
|
|
273
299
|
|
|
300
|
+
self._notify_channel(c)
|
|
301
|
+
|
|
274
302
|
# Insert events.
|
|
275
303
|
c.executemany(
|
|
276
304
|
query=self.insert_events_statement,
|
|
@@ -289,6 +317,9 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
289
317
|
def _lock_table(self, c: Cursor[DictRow]) -> None:
|
|
290
318
|
pass
|
|
291
319
|
|
|
320
|
+
def _notify_channel(self, c: Cursor[DictRow]) -> None:
|
|
321
|
+
pass
|
|
322
|
+
|
|
292
323
|
def _fetch_ids_after_insert_events(
|
|
293
324
|
self,
|
|
294
325
|
c: Cursor[DictRow],
|
|
@@ -341,9 +372,28 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
341
372
|
def __init__(
|
|
342
373
|
self,
|
|
343
374
|
datastore: PostgresDatastore,
|
|
375
|
+
*,
|
|
344
376
|
events_table_name: str = "stored_events",
|
|
345
377
|
):
|
|
346
|
-
super().__init__(datastore, events_table_name)
|
|
378
|
+
super().__init__(datastore, events_table_name=events_table_name)
|
|
379
|
+
self.create_table_statements[-1] = (
|
|
380
|
+
"CREATE TABLE IF NOT EXISTS "
|
|
381
|
+
f"{self.events_table_name} ("
|
|
382
|
+
"originator_id uuid NOT NULL, "
|
|
383
|
+
"originator_version bigint NOT NULL, "
|
|
384
|
+
"topic text, "
|
|
385
|
+
"state bytea, "
|
|
386
|
+
"notification_id bigserial, "
|
|
387
|
+
"PRIMARY KEY "
|
|
388
|
+
"(originator_id, originator_version)) "
|
|
389
|
+
"WITH (autovacuum_enabled=false)"
|
|
390
|
+
)
|
|
391
|
+
self.create_table_statements.append(
|
|
392
|
+
"CREATE UNIQUE INDEX IF NOT EXISTS "
|
|
393
|
+
f"{self.notification_id_index_name}"
|
|
394
|
+
f"ON {self.events_table_name} (notification_id ASC);"
|
|
395
|
+
)
|
|
396
|
+
self.channel_name = self.events_table_name.replace(".", "_")
|
|
347
397
|
self.insert_events_statement += " RETURNING notification_id"
|
|
348
398
|
self.max_notification_id_statement = (
|
|
349
399
|
f"SELECT MAX(notification_id) FROM {self.events_table_name}"
|
|
@@ -353,50 +403,50 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
353
403
|
f"LOCK TABLE {self.events_table_name} IN EXCLUSIVE MODE",
|
|
354
404
|
]
|
|
355
405
|
|
|
356
|
-
def construct_create_table_statements(self) -> List[str]:
|
|
357
|
-
return [
|
|
358
|
-
(
|
|
359
|
-
"CREATE TABLE IF NOT EXISTS "
|
|
360
|
-
f"{self.events_table_name} ("
|
|
361
|
-
"originator_id uuid NOT NULL, "
|
|
362
|
-
"originator_version bigint NOT NULL, "
|
|
363
|
-
"topic text, "
|
|
364
|
-
"state bytea, "
|
|
365
|
-
"notification_id bigserial, "
|
|
366
|
-
"PRIMARY KEY "
|
|
367
|
-
"(originator_id, originator_version)) "
|
|
368
|
-
"WITH (autovacuum_enabled=false)"
|
|
369
|
-
),
|
|
370
|
-
(
|
|
371
|
-
"CREATE UNIQUE INDEX IF NOT EXISTS "
|
|
372
|
-
f"{self.notification_id_index_name}"
|
|
373
|
-
f"ON {self.events_table_name} (notification_id ASC);"
|
|
374
|
-
),
|
|
375
|
-
]
|
|
376
|
-
|
|
377
406
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
378
407
|
def select_notifications(
|
|
379
408
|
self,
|
|
380
|
-
start: int,
|
|
409
|
+
start: int | None,
|
|
381
410
|
limit: int,
|
|
382
411
|
stop: int | None = None,
|
|
383
412
|
topics: Sequence[str] = (),
|
|
413
|
+
*,
|
|
414
|
+
inclusive_of_start: bool = True,
|
|
384
415
|
) -> List[Notification]:
|
|
385
416
|
"""
|
|
386
417
|
Returns a list of event notifications
|
|
387
418
|
from 'start', limited by 'limit'.
|
|
388
419
|
"""
|
|
389
420
|
|
|
390
|
-
params: List[int | str | Sequence[str]] = [
|
|
391
|
-
statement = f"SELECT * FROM {self.events_table_name}
|
|
421
|
+
params: List[int | str | Sequence[str]] = []
|
|
422
|
+
statement = f"SELECT * FROM {self.events_table_name}"
|
|
423
|
+
has_where = False
|
|
424
|
+
if start is not None:
|
|
425
|
+
statement += " WHERE"
|
|
426
|
+
has_where = True
|
|
427
|
+
params.append(start)
|
|
428
|
+
if inclusive_of_start:
|
|
429
|
+
statement += " notification_id>=%s"
|
|
430
|
+
else:
|
|
431
|
+
statement += " notification_id>%s"
|
|
392
432
|
|
|
393
433
|
if stop is not None:
|
|
434
|
+
if not has_where:
|
|
435
|
+
has_where = True
|
|
436
|
+
statement += " WHERE"
|
|
437
|
+
else:
|
|
438
|
+
statement += " AND"
|
|
439
|
+
|
|
394
440
|
params.append(stop)
|
|
395
|
-
statement += "
|
|
441
|
+
statement += " notification_id <= %s"
|
|
396
442
|
|
|
397
443
|
if topics:
|
|
444
|
+
if not has_where:
|
|
445
|
+
statement += " WHERE"
|
|
446
|
+
else:
|
|
447
|
+
statement += " AND"
|
|
398
448
|
params.append(topics)
|
|
399
|
-
statement += "
|
|
449
|
+
statement += " topic = ANY(%s)"
|
|
400
450
|
|
|
401
451
|
params.append(limit)
|
|
402
452
|
statement += " ORDER BY notification_id LIMIT %s"
|
|
@@ -416,7 +466,7 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
416
466
|
]
|
|
417
467
|
|
|
418
468
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
419
|
-
def max_notification_id(self) -> int:
|
|
469
|
+
def max_notification_id(self) -> int | None:
|
|
420
470
|
"""
|
|
421
471
|
Returns the maximum notification ID.
|
|
422
472
|
"""
|
|
@@ -425,7 +475,7 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
425
475
|
curs.execute(self.max_notification_id_statement)
|
|
426
476
|
fetchone = curs.fetchone()
|
|
427
477
|
assert fetchone is not None
|
|
428
|
-
return fetchone["max"]
|
|
478
|
+
return fetchone["max"]
|
|
429
479
|
|
|
430
480
|
def _lock_table(self, c: Cursor[DictRow]) -> None:
|
|
431
481
|
# Acquire "EXCLUSIVE" table lock, to serialize transactions that insert
|
|
@@ -451,6 +501,9 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
451
501
|
for lock_statement in self.lock_table_statements:
|
|
452
502
|
c.execute(lock_statement, prepare=True)
|
|
453
503
|
|
|
504
|
+
def _notify_channel(self, c: Cursor[DictRow]) -> None:
|
|
505
|
+
c.execute("NOTIFY " + self.channel_name)
|
|
506
|
+
|
|
454
507
|
def _fetch_ids_after_insert_events(
|
|
455
508
|
self,
|
|
456
509
|
c: Cursor[DictRow],
|
|
@@ -460,31 +513,75 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
460
513
|
notification_ids: List[int] = []
|
|
461
514
|
len_events = len(stored_events)
|
|
462
515
|
if len_events:
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
and c.nextset()
|
|
466
|
-
and (c.statusmessage == "LOCK TABLE")
|
|
467
|
-
):
|
|
468
|
-
while c.nextset() and len(notification_ids) != len_events:
|
|
516
|
+
while c.nextset() and len(notification_ids) != len_events:
|
|
517
|
+
if c.statusmessage and c.statusmessage.startswith("INSERT"):
|
|
469
518
|
row = c.fetchone()
|
|
470
519
|
assert row is not None
|
|
471
520
|
notification_ids.append(row["notification_id"])
|
|
472
521
|
if len(notification_ids) != len(stored_events):
|
|
473
|
-
msg = "Couldn't get all notification IDs"
|
|
522
|
+
msg = "Couldn't get all notification IDs "
|
|
523
|
+
msg += f"(got {len(notification_ids)}, expected {len(stored_events)}"
|
|
474
524
|
raise ProgrammingError(msg)
|
|
475
525
|
return notification_ids
|
|
476
526
|
|
|
527
|
+
def subscribe(self, gt: int | None = None) -> PostgresSubscription:
|
|
528
|
+
return PostgresSubscription(self, gt)
|
|
529
|
+
|
|
530
|
+
|
|
531
|
+
class PostgresSubscription(ListenNotifySubscription[PostgresApplicationRecorder]):
|
|
532
|
+
def __init__(
|
|
533
|
+
self, recorder: PostgresApplicationRecorder, gt: int | None = None
|
|
534
|
+
) -> None:
|
|
535
|
+
assert isinstance(recorder, PostgresApplicationRecorder)
|
|
536
|
+
super().__init__(recorder=recorder, gt=gt)
|
|
537
|
+
self._listen_thread = Thread(target=self._listen)
|
|
538
|
+
self._listen_thread.start()
|
|
539
|
+
|
|
540
|
+
def __exit__(self, *args: object, **kwargs: Any) -> None:
|
|
541
|
+
super().__exit__(*args, **kwargs)
|
|
542
|
+
self._listen_thread.join()
|
|
477
543
|
|
|
478
|
-
|
|
544
|
+
def _listen(self) -> None:
|
|
545
|
+
try:
|
|
546
|
+
with self._recorder.datastore.get_connection() as conn:
|
|
547
|
+
conn.execute("LISTEN " + self._recorder.channel_name)
|
|
548
|
+
while not self._has_been_stopped and not self._thread_error:
|
|
549
|
+
# This block simplifies psycopg's conn.notifies(), because
|
|
550
|
+
# we aren't interested in the actual notify messages, and
|
|
551
|
+
# also we want to stop consuming notify messages when the
|
|
552
|
+
# subscription has an error or is otherwise stopped.
|
|
553
|
+
with conn.lock:
|
|
554
|
+
try:
|
|
555
|
+
if conn.wait(notifies(conn.pgconn), interval=0.1):
|
|
556
|
+
self._has_been_notified.set()
|
|
557
|
+
except NO_TRACEBACK as ex: # pragma: no cover
|
|
558
|
+
raise ex.with_traceback(None) from None
|
|
559
|
+
|
|
560
|
+
except BaseException as e:
|
|
561
|
+
if self._thread_error is None:
|
|
562
|
+
self._thread_error = e
|
|
563
|
+
self.stop()
|
|
564
|
+
|
|
565
|
+
|
|
566
|
+
class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
|
|
479
567
|
def __init__(
|
|
480
568
|
self,
|
|
481
569
|
datastore: PostgresDatastore,
|
|
482
|
-
|
|
483
|
-
tracking_table_name: str,
|
|
570
|
+
*,
|
|
571
|
+
tracking_table_name: str = "notification_tracking",
|
|
572
|
+
**kwargs: Any,
|
|
484
573
|
):
|
|
574
|
+
super().__init__(datastore, **kwargs)
|
|
485
575
|
self.check_table_name_length(tracking_table_name, datastore.schema)
|
|
486
576
|
self.tracking_table_name = tracking_table_name
|
|
487
|
-
|
|
577
|
+
self.create_table_statements.append(
|
|
578
|
+
"CREATE TABLE IF NOT EXISTS "
|
|
579
|
+
f"{self.tracking_table_name} ("
|
|
580
|
+
"application_name text, "
|
|
581
|
+
"notification_id bigint, "
|
|
582
|
+
"PRIMARY KEY "
|
|
583
|
+
"(application_name, notification_id))"
|
|
584
|
+
)
|
|
488
585
|
self.insert_tracking_statement = (
|
|
489
586
|
f"INSERT INTO {self.tracking_table_name} VALUES (%s, %s)"
|
|
490
587
|
)
|
|
@@ -499,20 +596,28 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
|
|
|
499
596
|
"WHERE application_name=%s AND notification_id=%s"
|
|
500
597
|
)
|
|
501
598
|
|
|
502
|
-
|
|
503
|
-
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
599
|
+
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
600
|
+
def insert_tracking(self, tracking: Tracking) -> None:
|
|
601
|
+
c: Connection[DictRow]
|
|
602
|
+
with self.datastore.get_connection() as c, c.transaction(), c.cursor() as curs:
|
|
603
|
+
self._insert_tracking(curs, tracking)
|
|
604
|
+
|
|
605
|
+
def _insert_tracking(
|
|
606
|
+
self,
|
|
607
|
+
c: Cursor[DictRow],
|
|
608
|
+
tracking: Tracking,
|
|
609
|
+
) -> None:
|
|
610
|
+
c.execute(
|
|
611
|
+
query=self.insert_tracking_statement,
|
|
612
|
+
params=(
|
|
613
|
+
tracking.application_name,
|
|
614
|
+
tracking.notification_id,
|
|
615
|
+
),
|
|
616
|
+
prepare=True,
|
|
511
617
|
)
|
|
512
|
-
return statements
|
|
513
618
|
|
|
514
619
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
515
|
-
def max_tracking_id(self, application_name: str) -> int:
|
|
620
|
+
def max_tracking_id(self, application_name: str) -> int | None:
|
|
516
621
|
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
517
622
|
curs.execute(
|
|
518
623
|
query=self.max_tracking_id_statement,
|
|
@@ -521,7 +626,7 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
|
|
|
521
626
|
)
|
|
522
627
|
fetchone = curs.fetchone()
|
|
523
628
|
assert fetchone is not None
|
|
524
|
-
return fetchone["max"]
|
|
629
|
+
return fetchone["max"]
|
|
525
630
|
|
|
526
631
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
527
632
|
def has_tracking_id(self, application_name: str, notification_id: int) -> bool:
|
|
@@ -536,6 +641,23 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
|
|
|
536
641
|
assert fetchone is not None
|
|
537
642
|
return bool(fetchone["count"])
|
|
538
643
|
|
|
644
|
+
|
|
645
|
+
class PostgresProcessRecorder(
|
|
646
|
+
PostgresTrackingRecorder, PostgresApplicationRecorder, ProcessRecorder
|
|
647
|
+
):
|
|
648
|
+
def __init__(
|
|
649
|
+
self,
|
|
650
|
+
datastore: PostgresDatastore,
|
|
651
|
+
*,
|
|
652
|
+
events_table_name: str = "stored_events",
|
|
653
|
+
tracking_table_name: str = "notification_tracking",
|
|
654
|
+
):
|
|
655
|
+
super().__init__(
|
|
656
|
+
datastore,
|
|
657
|
+
tracking_table_name=tracking_table_name,
|
|
658
|
+
events_table_name=events_table_name,
|
|
659
|
+
)
|
|
660
|
+
|
|
539
661
|
def _insert_events(
|
|
540
662
|
self,
|
|
541
663
|
c: Cursor[DictRow],
|
|
@@ -544,18 +666,11 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
|
|
|
544
666
|
) -> None:
|
|
545
667
|
tracking: Tracking | None = kwargs.get("tracking", None)
|
|
546
668
|
if tracking is not None:
|
|
547
|
-
|
|
548
|
-
query=self.insert_tracking_statement,
|
|
549
|
-
params=(
|
|
550
|
-
tracking.application_name,
|
|
551
|
-
tracking.notification_id,
|
|
552
|
-
),
|
|
553
|
-
prepare=True,
|
|
554
|
-
)
|
|
669
|
+
self._insert_tracking(c, tracking=tracking)
|
|
555
670
|
super()._insert_events(c, stored_events, **kwargs)
|
|
556
671
|
|
|
557
672
|
|
|
558
|
-
class
|
|
673
|
+
class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
|
|
559
674
|
POSTGRES_DBNAME = "POSTGRES_DBNAME"
|
|
560
675
|
POSTGRES_HOST = "POSTGRES_HOST"
|
|
561
676
|
POSTGRES_PORT = "POSTGRES_PORT"
|
|
@@ -577,6 +692,7 @@ class Factory(InfrastructureFactory):
|
|
|
577
692
|
|
|
578
693
|
aggregate_recorder_class = PostgresAggregateRecorder
|
|
579
694
|
application_recorder_class = PostgresApplicationRecorder
|
|
695
|
+
tracking_recorder_class = PostgresTrackingRecorder
|
|
580
696
|
process_recorder_class = PostgresProcessRecorder
|
|
581
697
|
|
|
582
698
|
def __init__(self, env: Environment):
|
|
@@ -768,7 +884,17 @@ class Factory(InfrastructureFactory):
|
|
|
768
884
|
events_table_name = prefix + "_events"
|
|
769
885
|
if self.datastore.schema:
|
|
770
886
|
events_table_name = f"{self.datastore.schema}.{events_table_name}"
|
|
771
|
-
|
|
887
|
+
|
|
888
|
+
application_recorder_topic = self.env.get(self.APPLICATION_RECORDER_TOPIC)
|
|
889
|
+
if application_recorder_topic:
|
|
890
|
+
application_recorder_class: Type[PostgresApplicationRecorder] = (
|
|
891
|
+
resolve_topic(application_recorder_topic)
|
|
892
|
+
)
|
|
893
|
+
assert issubclass(application_recorder_class, PostgresApplicationRecorder)
|
|
894
|
+
else:
|
|
895
|
+
application_recorder_class = type(self).application_recorder_class
|
|
896
|
+
|
|
897
|
+
recorder = application_recorder_class(
|
|
772
898
|
datastore=self.datastore,
|
|
773
899
|
events_table_name=events_table_name,
|
|
774
900
|
)
|
|
@@ -776,6 +902,29 @@ class Factory(InfrastructureFactory):
|
|
|
776
902
|
recorder.create_table()
|
|
777
903
|
return recorder
|
|
778
904
|
|
|
905
|
+
def tracking_recorder(
|
|
906
|
+
self, tracking_recorder_class: Type[PostgresTrackingRecorder] | None = None
|
|
907
|
+
) -> PostgresTrackingRecorder:
|
|
908
|
+
prefix = self.env.name.lower() or "notification"
|
|
909
|
+
tracking_table_name = prefix + "_tracking"
|
|
910
|
+
if self.datastore.schema:
|
|
911
|
+
tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
|
|
912
|
+
if tracking_recorder_class is None:
|
|
913
|
+
tracking_recorder_topic = self.env.get(self.TRACKING_RECORDER_TOPIC)
|
|
914
|
+
if tracking_recorder_topic:
|
|
915
|
+
tracking_recorder_class = resolve_topic(tracking_recorder_topic)
|
|
916
|
+
else:
|
|
917
|
+
tracking_recorder_class = type(self).tracking_recorder_class
|
|
918
|
+
assert tracking_recorder_class is not None
|
|
919
|
+
assert issubclass(tracking_recorder_class, PostgresTrackingRecorder)
|
|
920
|
+
recorder = tracking_recorder_class(
|
|
921
|
+
datastore=self.datastore,
|
|
922
|
+
tracking_table_name=tracking_table_name,
|
|
923
|
+
)
|
|
924
|
+
if self.env_create_table():
|
|
925
|
+
recorder.create_table()
|
|
926
|
+
return recorder
|
|
927
|
+
|
|
779
928
|
def process_recorder(self) -> ProcessRecorder:
|
|
780
929
|
prefix = self.env.name.lower() or "stored"
|
|
781
930
|
events_table_name = prefix + "_events"
|
|
@@ -784,7 +933,17 @@ class Factory(InfrastructureFactory):
|
|
|
784
933
|
if self.datastore.schema:
|
|
785
934
|
events_table_name = f"{self.datastore.schema}.{events_table_name}"
|
|
786
935
|
tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
|
|
787
|
-
|
|
936
|
+
|
|
937
|
+
process_recorder_topic = self.env.get(self.PROCESS_RECORDER_TOPIC)
|
|
938
|
+
if process_recorder_topic:
|
|
939
|
+
process_recorder_class: Type[PostgresTrackingRecorder] = resolve_topic(
|
|
940
|
+
process_recorder_topic
|
|
941
|
+
)
|
|
942
|
+
assert issubclass(process_recorder_class, PostgresProcessRecorder)
|
|
943
|
+
else:
|
|
944
|
+
process_recorder_class = type(self).process_recorder_class
|
|
945
|
+
|
|
946
|
+
recorder = process_recorder_class(
|
|
788
947
|
datastore=self.datastore,
|
|
789
948
|
events_table_name=events_table_name,
|
|
790
949
|
tracking_table_name=tracking_table_name,
|
|
@@ -799,3 +958,6 @@ class Factory(InfrastructureFactory):
|
|
|
799
958
|
|
|
800
959
|
def __del__(self) -> None:
|
|
801
960
|
self.close()
|
|
961
|
+
|
|
962
|
+
|
|
963
|
+
Factory = PostgresFactory
|