eventsourcing 9.2.22__py3-none-any.whl → 9.3.0a1__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eventsourcing might be problematic. Click here for more details.
- eventsourcing/__init__.py +1 -1
- eventsourcing/application.py +106 -135
- eventsourcing/cipher.py +15 -12
- eventsourcing/dispatch.py +31 -91
- eventsourcing/domain.py +138 -143
- eventsourcing/examples/__init__.py +0 -0
- eventsourcing/examples/aggregate1/__init__.py +0 -0
- eventsourcing/examples/aggregate1/application.py +27 -0
- eventsourcing/examples/aggregate1/domainmodel.py +16 -0
- eventsourcing/examples/aggregate1/test_application.py +37 -0
- eventsourcing/examples/aggregate2/__init__.py +0 -0
- eventsourcing/examples/aggregate2/application.py +27 -0
- eventsourcing/examples/aggregate2/domainmodel.py +22 -0
- eventsourcing/examples/aggregate2/test_application.py +37 -0
- eventsourcing/examples/aggregate3/__init__.py +0 -0
- eventsourcing/examples/aggregate3/application.py +27 -0
- eventsourcing/examples/aggregate3/domainmodel.py +38 -0
- eventsourcing/examples/aggregate3/test_application.py +37 -0
- eventsourcing/examples/aggregate4/__init__.py +0 -0
- eventsourcing/examples/aggregate4/application.py +27 -0
- eventsourcing/examples/aggregate4/domainmodel.py +128 -0
- eventsourcing/examples/aggregate4/test_application.py +38 -0
- eventsourcing/examples/aggregate5/__init__.py +0 -0
- eventsourcing/examples/aggregate5/application.py +27 -0
- eventsourcing/examples/aggregate5/domainmodel.py +131 -0
- eventsourcing/examples/aggregate5/test_application.py +38 -0
- eventsourcing/examples/aggregate6/__init__.py +0 -0
- eventsourcing/examples/aggregate6/application.py +30 -0
- eventsourcing/examples/aggregate6/domainmodel.py +123 -0
- eventsourcing/examples/aggregate6/test_application.py +38 -0
- eventsourcing/examples/aggregate6a/__init__.py +0 -0
- eventsourcing/examples/aggregate6a/application.py +40 -0
- eventsourcing/examples/aggregate6a/domainmodel.py +149 -0
- eventsourcing/examples/aggregate6a/test_application.py +45 -0
- eventsourcing/examples/aggregate7/__init__.py +0 -0
- eventsourcing/examples/aggregate7/application.py +48 -0
- eventsourcing/examples/aggregate7/domainmodel.py +144 -0
- eventsourcing/examples/aggregate7/persistence.py +57 -0
- eventsourcing/examples/aggregate7/test_application.py +38 -0
- eventsourcing/examples/aggregate7/test_compression_and_encryption.py +45 -0
- eventsourcing/examples/aggregate7/test_snapshotting_intervals.py +67 -0
- eventsourcing/examples/aggregate7a/__init__.py +0 -0
- eventsourcing/examples/aggregate7a/application.py +56 -0
- eventsourcing/examples/aggregate7a/domainmodel.py +170 -0
- eventsourcing/examples/aggregate7a/test_application.py +46 -0
- eventsourcing/examples/aggregate7a/test_compression_and_encryption.py +45 -0
- eventsourcing/examples/aggregate8/__init__.py +0 -0
- eventsourcing/examples/aggregate8/application.py +47 -0
- eventsourcing/examples/aggregate8/domainmodel.py +65 -0
- eventsourcing/examples/aggregate8/persistence.py +57 -0
- eventsourcing/examples/aggregate8/test_application.py +37 -0
- eventsourcing/examples/aggregate8/test_compression_and_encryption.py +44 -0
- eventsourcing/examples/aggregate8/test_snapshotting_intervals.py +38 -0
- eventsourcing/examples/bankaccounts/__init__.py +0 -0
- eventsourcing/examples/bankaccounts/application.py +70 -0
- eventsourcing/examples/bankaccounts/domainmodel.py +56 -0
- eventsourcing/examples/bankaccounts/test.py +173 -0
- eventsourcing/examples/cargoshipping/__init__.py +0 -0
- eventsourcing/examples/cargoshipping/application.py +126 -0
- eventsourcing/examples/cargoshipping/domainmodel.py +330 -0
- eventsourcing/examples/cargoshipping/interface.py +143 -0
- eventsourcing/examples/cargoshipping/test.py +231 -0
- eventsourcing/examples/contentmanagement/__init__.py +0 -0
- eventsourcing/examples/contentmanagement/application.py +118 -0
- eventsourcing/examples/contentmanagement/domainmodel.py +69 -0
- eventsourcing/examples/contentmanagement/test.py +180 -0
- eventsourcing/examples/contentmanagement/utils.py +26 -0
- eventsourcing/examples/contentmanagementsystem/__init__.py +0 -0
- eventsourcing/examples/contentmanagementsystem/application.py +54 -0
- eventsourcing/examples/contentmanagementsystem/postgres.py +17 -0
- eventsourcing/examples/contentmanagementsystem/sqlite.py +17 -0
- eventsourcing/examples/contentmanagementsystem/system.py +14 -0
- eventsourcing/examples/contentmanagementsystem/test_system.py +174 -0
- eventsourcing/examples/searchablecontent/__init__.py +0 -0
- eventsourcing/examples/searchablecontent/application.py +45 -0
- eventsourcing/examples/searchablecontent/persistence.py +23 -0
- eventsourcing/examples/searchablecontent/postgres.py +118 -0
- eventsourcing/examples/searchablecontent/sqlite.py +136 -0
- eventsourcing/examples/searchablecontent/test_application.py +111 -0
- eventsourcing/examples/searchablecontent/test_recorder.py +69 -0
- eventsourcing/examples/searchabletimestamps/__init__.py +0 -0
- eventsourcing/examples/searchabletimestamps/application.py +32 -0
- eventsourcing/examples/searchabletimestamps/persistence.py +20 -0
- eventsourcing/examples/searchabletimestamps/postgres.py +110 -0
- eventsourcing/examples/searchabletimestamps/sqlite.py +99 -0
- eventsourcing/examples/searchabletimestamps/test_searchabletimestamps.py +91 -0
- eventsourcing/examples/test_invoice.py +176 -0
- eventsourcing/examples/test_parking_lot.py +206 -0
- eventsourcing/interface.py +2 -2
- eventsourcing/persistence.py +85 -81
- eventsourcing/popo.py +30 -31
- eventsourcing/postgres.py +361 -578
- eventsourcing/sqlite.py +91 -99
- eventsourcing/system.py +42 -57
- eventsourcing/tests/application.py +20 -32
- eventsourcing/tests/application_tests/__init__.py +0 -0
- eventsourcing/tests/application_tests/test_application_with_automatic_snapshotting.py +55 -0
- eventsourcing/tests/application_tests/test_application_with_popo.py +22 -0
- eventsourcing/tests/application_tests/test_application_with_postgres.py +75 -0
- eventsourcing/tests/application_tests/test_application_with_sqlite.py +72 -0
- eventsourcing/tests/application_tests/test_cache.py +134 -0
- eventsourcing/tests/application_tests/test_event_sourced_log.py +162 -0
- eventsourcing/tests/application_tests/test_notificationlog.py +232 -0
- eventsourcing/tests/application_tests/test_notificationlogreader.py +126 -0
- eventsourcing/tests/application_tests/test_processapplication.py +110 -0
- eventsourcing/tests/application_tests/test_processingpolicy.py +109 -0
- eventsourcing/tests/application_tests/test_repository.py +504 -0
- eventsourcing/tests/application_tests/test_snapshotting.py +68 -0
- eventsourcing/tests/application_tests/test_upcasting.py +459 -0
- eventsourcing/tests/docs_tests/__init__.py +0 -0
- eventsourcing/tests/docs_tests/test_docs.py +293 -0
- eventsourcing/tests/domain.py +1 -1
- eventsourcing/tests/domain_tests/__init__.py +0 -0
- eventsourcing/tests/domain_tests/test_aggregate.py +1159 -0
- eventsourcing/tests/domain_tests/test_aggregate_decorators.py +1604 -0
- eventsourcing/tests/domain_tests/test_domainevent.py +80 -0
- eventsourcing/tests/interface_tests/__init__.py +0 -0
- eventsourcing/tests/interface_tests/test_remotenotificationlog.py +258 -0
- eventsourcing/tests/persistence.py +49 -50
- eventsourcing/tests/persistence_tests/__init__.py +0 -0
- eventsourcing/tests/persistence_tests/test_aes.py +93 -0
- eventsourcing/tests/persistence_tests/test_connection_pool.py +722 -0
- eventsourcing/tests/persistence_tests/test_eventstore.py +72 -0
- eventsourcing/tests/persistence_tests/test_infrastructure_factory.py +21 -0
- eventsourcing/tests/persistence_tests/test_mapper.py +113 -0
- eventsourcing/tests/persistence_tests/test_noninterleaving_notification_ids.py +69 -0
- eventsourcing/tests/persistence_tests/test_popo.py +124 -0
- eventsourcing/tests/persistence_tests/test_postgres.py +1121 -0
- eventsourcing/tests/persistence_tests/test_sqlite.py +348 -0
- eventsourcing/tests/persistence_tests/test_transcoder.py +44 -0
- eventsourcing/tests/postgres_utils.py +7 -7
- eventsourcing/tests/system_tests/__init__.py +0 -0
- eventsourcing/tests/system_tests/test_runner.py +935 -0
- eventsourcing/tests/system_tests/test_system.py +287 -0
- eventsourcing/tests/utils_tests/__init__.py +0 -0
- eventsourcing/tests/utils_tests/test_utils.py +226 -0
- eventsourcing/utils.py +47 -50
- {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0a1.dist-info}/METADATA +28 -80
- eventsourcing-9.3.0a1.dist-info/RECORD +144 -0
- {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0a1.dist-info}/WHEEL +1 -2
- eventsourcing-9.2.22.dist-info/AUTHORS +0 -10
- eventsourcing-9.2.22.dist-info/RECORD +0 -25
- eventsourcing-9.2.22.dist-info/top_level.txt +0 -1
- {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0a1.dist-info}/LICENSE +0 -0
eventsourcing/postgres.py
CHANGED
|
@@ -1,35 +1,18 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
+
import logging
|
|
3
4
|
from contextlib import contextmanager
|
|
4
|
-
from
|
|
5
|
-
from threading import Lock
|
|
6
|
-
from types import TracebackType
|
|
7
|
-
from typing import (
|
|
8
|
-
Any,
|
|
9
|
-
Dict,
|
|
10
|
-
Iterator,
|
|
11
|
-
List,
|
|
12
|
-
Optional,
|
|
13
|
-
Sequence,
|
|
14
|
-
Set,
|
|
15
|
-
Tuple,
|
|
16
|
-
Type,
|
|
17
|
-
Union,
|
|
18
|
-
)
|
|
19
|
-
from uuid import NAMESPACE_URL, UUID, uuid5
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Sequence
|
|
20
6
|
|
|
21
|
-
import
|
|
22
|
-
import
|
|
23
|
-
import
|
|
24
|
-
from
|
|
25
|
-
from
|
|
7
|
+
import psycopg
|
|
8
|
+
import psycopg.errors
|
|
9
|
+
import psycopg_pool
|
|
10
|
+
from psycopg import Connection, Cursor
|
|
11
|
+
from psycopg.rows import DictRow, dict_row
|
|
26
12
|
|
|
27
13
|
from eventsourcing.persistence import (
|
|
28
14
|
AggregateRecorder,
|
|
29
15
|
ApplicationRecorder,
|
|
30
|
-
Connection,
|
|
31
|
-
ConnectionPool,
|
|
32
|
-
Cursor,
|
|
33
16
|
DatabaseError,
|
|
34
17
|
DataError,
|
|
35
18
|
InfrastructureFactory,
|
|
@@ -45,74 +28,32 @@ from eventsourcing.persistence import (
|
|
|
45
28
|
StoredEvent,
|
|
46
29
|
Tracking,
|
|
47
30
|
)
|
|
48
|
-
from eventsourcing.utils import Environment, retry, strtobool
|
|
49
|
-
|
|
50
|
-
psycopg2.extras.register_uuid()
|
|
51
|
-
|
|
52
|
-
|
|
53
|
-
class PostgresCursor(Cursor):
|
|
54
|
-
def __init__(self, pg_cursor: cursor):
|
|
55
|
-
self.pg_cursor = pg_cursor
|
|
56
|
-
|
|
57
|
-
def __enter__(self, *args: Any, **kwargs: Any) -> "PostgresCursor":
|
|
58
|
-
self.pg_cursor.__enter__(*args, **kwargs)
|
|
59
|
-
return self
|
|
60
|
-
|
|
61
|
-
def __exit__(self, *args: Any, **kwargs: Any) -> None:
|
|
62
|
-
return self.pg_cursor.__exit__(*args, **kwargs)
|
|
63
|
-
|
|
64
|
-
def mogrify(self, statement: str, params: Any = None) -> bytes:
|
|
65
|
-
return self.pg_cursor.mogrify(statement, vars=params)
|
|
66
|
-
|
|
67
|
-
def execute(self, statement: Union[str, bytes], params: Any = None) -> None:
|
|
68
|
-
self.pg_cursor.execute(query=statement, vars=params)
|
|
69
|
-
|
|
70
|
-
def fetchall(self) -> Any:
|
|
71
|
-
return self.pg_cursor.fetchall()
|
|
72
|
-
|
|
73
|
-
def fetchone(self) -> Any:
|
|
74
|
-
return self.pg_cursor.fetchone()
|
|
75
|
-
|
|
76
|
-
@property
|
|
77
|
-
def closed(self) -> bool:
|
|
78
|
-
return self.pg_cursor.closed
|
|
31
|
+
from eventsourcing.utils import Environment, resolve_topic, retry, strtobool
|
|
79
32
|
|
|
33
|
+
if TYPE_CHECKING: # pragma: nocover
|
|
34
|
+
from uuid import UUID
|
|
80
35
|
|
|
81
|
-
|
|
82
|
-
|
|
83
|
-
super().__init__(max_age=max_age)
|
|
84
|
-
self._pg_conn = pg_conn
|
|
85
|
-
self.is_prepared: Set[str] = set()
|
|
36
|
+
logging.getLogger("psycopg.pool").setLevel(logging.CRITICAL)
|
|
37
|
+
logging.getLogger("psycopg").setLevel(logging.CRITICAL)
|
|
86
38
|
|
|
87
|
-
@contextmanager
|
|
88
|
-
def transaction(self, commit: bool) -> Iterator[PostgresCursor]:
|
|
89
|
-
# Context managed transaction.
|
|
90
|
-
with PostgresTransaction(self, commit) as curs:
|
|
91
|
-
# Context managed cursor.
|
|
92
|
-
with curs:
|
|
93
|
-
yield curs
|
|
94
|
-
|
|
95
|
-
def cursor(self) -> PostgresCursor:
|
|
96
|
-
return PostgresCursor(
|
|
97
|
-
self._pg_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
|
|
98
|
-
)
|
|
99
|
-
|
|
100
|
-
def rollback(self) -> None:
|
|
101
|
-
self._pg_conn.rollback()
|
|
102
39
|
|
|
103
|
-
|
|
104
|
-
|
|
105
|
-
|
|
106
|
-
|
|
107
|
-
|
|
108
|
-
|
|
40
|
+
class ConnectionPool(psycopg_pool.ConnectionPool[Any]):
|
|
41
|
+
def __init__(
|
|
42
|
+
self,
|
|
43
|
+
*args: Any,
|
|
44
|
+
get_password_func: Callable[[], str] | None = None,
|
|
45
|
+
**kwargs: Any,
|
|
46
|
+
) -> None:
|
|
47
|
+
self.get_password_func = get_password_func
|
|
48
|
+
super().__init__(*args, **kwargs)
|
|
109
49
|
|
|
110
|
-
|
|
111
|
-
|
|
112
|
-
|
|
50
|
+
def _connect(self, timeout: float | None = None) -> Connection[Any]:
|
|
51
|
+
if self.get_password_func:
|
|
52
|
+
self.kwargs["password"] = self.get_password_func()
|
|
53
|
+
return super()._connect(timeout=timeout)
|
|
113
54
|
|
|
114
55
|
|
|
115
|
-
class
|
|
56
|
+
class PostgresDatastore:
|
|
116
57
|
def __init__(
|
|
117
58
|
self,
|
|
118
59
|
dbname: str,
|
|
@@ -120,152 +61,94 @@ class PostgresConnectionPool(ConnectionPool[PostgresConnection]):
|
|
|
120
61
|
port: str,
|
|
121
62
|
user: str,
|
|
122
63
|
password: str,
|
|
64
|
+
*,
|
|
123
65
|
connect_timeout: int = 5,
|
|
124
66
|
idle_in_transaction_session_timeout: int = 0,
|
|
125
|
-
pool_size: int =
|
|
126
|
-
max_overflow: int =
|
|
67
|
+
pool_size: int = 2,
|
|
68
|
+
max_overflow: int = 2,
|
|
127
69
|
pool_timeout: float = 5.0,
|
|
128
|
-
|
|
70
|
+
conn_max_age: float = 60 * 60.0,
|
|
129
71
|
pre_ping: bool = False,
|
|
72
|
+
lock_timeout: int = 0,
|
|
73
|
+
schema: str = "",
|
|
74
|
+
pool_open_timeout: int | None = None,
|
|
75
|
+
get_password_func: Callable[[], str] | None = None,
|
|
130
76
|
):
|
|
131
|
-
self.dbname = dbname
|
|
132
|
-
self.host = host
|
|
133
|
-
self.port = port
|
|
134
|
-
self.user = user
|
|
135
|
-
self.password = password
|
|
136
|
-
self.connect_timeout = connect_timeout
|
|
137
77
|
self.idle_in_transaction_session_timeout = idle_in_transaction_session_timeout
|
|
138
|
-
|
|
139
|
-
|
|
140
|
-
|
|
141
|
-
|
|
142
|
-
|
|
143
|
-
|
|
144
|
-
|
|
78
|
+
self.pre_ping = pre_ping
|
|
79
|
+
self.pool_open_timeout = pool_open_timeout
|
|
80
|
+
|
|
81
|
+
check = ConnectionPool.check_connection if pre_ping else None
|
|
82
|
+
kwargs: Dict[str, Any] = {"check": check}
|
|
83
|
+
self.pool = ConnectionPool(
|
|
84
|
+
get_password_func=get_password_func,
|
|
85
|
+
connection_class=Connection[DictRow],
|
|
86
|
+
kwargs={
|
|
87
|
+
"dbname": dbname,
|
|
88
|
+
"host": host,
|
|
89
|
+
"port": port,
|
|
90
|
+
"user": user,
|
|
91
|
+
"password": password,
|
|
92
|
+
"row_factory": dict_row,
|
|
93
|
+
},
|
|
94
|
+
min_size=pool_size,
|
|
95
|
+
max_size=pool_size + max_overflow,
|
|
96
|
+
open=False,
|
|
97
|
+
configure=self.after_connect,
|
|
98
|
+
timeout=connect_timeout,
|
|
99
|
+
max_waiting=round(pool_timeout),
|
|
100
|
+
max_lifetime=conn_max_age,
|
|
101
|
+
**kwargs, # use the 'check' argument when no longer supporting Python 3.7
|
|
145
102
|
)
|
|
103
|
+
self.lock_timeout = lock_timeout
|
|
104
|
+
self.schema = schema.strip()
|
|
146
105
|
|
|
147
|
-
def
|
|
148
|
-
|
|
149
|
-
|
|
150
|
-
|
|
151
|
-
dbname=self.dbname,
|
|
152
|
-
host=self.host,
|
|
153
|
-
port=self.port,
|
|
154
|
-
user=self.user,
|
|
155
|
-
password=self.password,
|
|
156
|
-
connect_timeout=self.connect_timeout,
|
|
157
|
-
)
|
|
158
|
-
except psycopg2.OperationalError as e:
|
|
159
|
-
raise OperationalError(e) from e
|
|
160
|
-
pg_conn.cursor().execute(
|
|
161
|
-
f"SET idle_in_transaction_session_timeout = "
|
|
106
|
+
def after_connect(self, conn: Connection[DictRow]) -> None:
|
|
107
|
+
conn.autocommit = True
|
|
108
|
+
conn.cursor().execute(
|
|
109
|
+
"SET idle_in_transaction_session_timeout = "
|
|
162
110
|
f"'{self.idle_in_transaction_session_timeout}s'"
|
|
163
111
|
)
|
|
164
|
-
return PostgresConnection(pg_conn, max_age=self.max_age)
|
|
165
|
-
|
|
166
|
-
|
|
167
|
-
class PostgresTransaction:
|
|
168
|
-
def __init__(self, conn: PostgresConnection, commit: bool):
|
|
169
|
-
self.conn = conn
|
|
170
|
-
self.commit = commit
|
|
171
|
-
self.has_entered = False
|
|
172
112
|
|
|
173
|
-
|
|
174
|
-
|
|
175
|
-
return self.conn.cursor()
|
|
176
|
-
|
|
177
|
-
def __exit__(
|
|
178
|
-
self,
|
|
179
|
-
exc_type: Type[BaseException],
|
|
180
|
-
exc_val: BaseException,
|
|
181
|
-
exc_tb: TracebackType,
|
|
182
|
-
) -> None:
|
|
113
|
+
@contextmanager
|
|
114
|
+
def get_connection(self) -> Iterator[Connection[DictRow]]:
|
|
183
115
|
try:
|
|
184
|
-
|
|
185
|
-
|
|
186
|
-
|
|
187
|
-
|
|
188
|
-
|
|
189
|
-
|
|
190
|
-
|
|
191
|
-
|
|
192
|
-
self.conn.close()
|
|
116
|
+
wait = self.pool_open_timeout is not None
|
|
117
|
+
timeout = self.pool_open_timeout or 30.0
|
|
118
|
+
self.pool.open(wait, timeout)
|
|
119
|
+
|
|
120
|
+
with self.pool.connection() as conn:
|
|
121
|
+
yield conn
|
|
122
|
+
except psycopg.InterfaceError as e:
|
|
123
|
+
# conn.close()
|
|
193
124
|
raise InterfaceError(str(e)) from e
|
|
194
|
-
except
|
|
195
|
-
|
|
196
|
-
except psycopg2.OperationalError as e:
|
|
197
|
-
self.conn.close()
|
|
125
|
+
except psycopg.OperationalError as e:
|
|
126
|
+
# conn.close()
|
|
198
127
|
raise OperationalError(str(e)) from e
|
|
199
|
-
except
|
|
128
|
+
except psycopg.DataError as e:
|
|
129
|
+
raise DataError(str(e)) from e
|
|
130
|
+
except psycopg.IntegrityError as e:
|
|
200
131
|
raise IntegrityError(str(e)) from e
|
|
201
|
-
except
|
|
132
|
+
except psycopg.InternalError as e:
|
|
202
133
|
raise InternalError(str(e)) from e
|
|
203
|
-
except
|
|
134
|
+
except psycopg.ProgrammingError as e:
|
|
204
135
|
raise ProgrammingError(str(e)) from e
|
|
205
|
-
except
|
|
136
|
+
except psycopg.NotSupportedError as e:
|
|
206
137
|
raise NotSupportedError(str(e)) from e
|
|
207
|
-
except
|
|
138
|
+
except psycopg.DatabaseError as e:
|
|
208
139
|
raise DatabaseError(str(e)) from e
|
|
209
|
-
except
|
|
140
|
+
except psycopg.Error as e:
|
|
141
|
+
# conn.close()
|
|
210
142
|
raise PersistenceError(str(e)) from e
|
|
211
|
-
|
|
212
|
-
|
|
213
|
-
|
|
214
|
-
def __init__(
|
|
215
|
-
self,
|
|
216
|
-
dbname: str,
|
|
217
|
-
host: str,
|
|
218
|
-
port: str,
|
|
219
|
-
user: str,
|
|
220
|
-
password: str,
|
|
221
|
-
connect_timeout: int = 5,
|
|
222
|
-
idle_in_transaction_session_timeout: int = 0,
|
|
223
|
-
pool_size: int = 2,
|
|
224
|
-
max_overflow: int = 2,
|
|
225
|
-
pool_timeout: float = 5.0,
|
|
226
|
-
conn_max_age: Optional[float] = None,
|
|
227
|
-
pre_ping: bool = False,
|
|
228
|
-
lock_timeout: int = 0,
|
|
229
|
-
schema: str = "",
|
|
230
|
-
):
|
|
231
|
-
self.pool = PostgresConnectionPool(
|
|
232
|
-
dbname=dbname,
|
|
233
|
-
host=host,
|
|
234
|
-
port=port,
|
|
235
|
-
user=user,
|
|
236
|
-
password=password,
|
|
237
|
-
connect_timeout=connect_timeout,
|
|
238
|
-
idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
|
|
239
|
-
pool_size=pool_size,
|
|
240
|
-
max_overflow=max_overflow,
|
|
241
|
-
pool_timeout=pool_timeout,
|
|
242
|
-
max_age=conn_max_age,
|
|
243
|
-
pre_ping=pre_ping,
|
|
244
|
-
)
|
|
245
|
-
self.lock_timeout = lock_timeout
|
|
246
|
-
self.schema = schema.strip()
|
|
143
|
+
except Exception:
|
|
144
|
+
# conn.close()
|
|
145
|
+
raise
|
|
247
146
|
|
|
248
147
|
@contextmanager
|
|
249
|
-
def transaction(self, commit: bool) -> Iterator[
|
|
250
|
-
|
|
251
|
-
|
|
252
|
-
|
|
253
|
-
|
|
254
|
-
@contextmanager
|
|
255
|
-
def get_connection(self) -> Iterator[PostgresConnection]:
|
|
256
|
-
conn = self.pool.get_connection()
|
|
257
|
-
try:
|
|
258
|
-
yield conn
|
|
259
|
-
finally:
|
|
260
|
-
self.pool.put_connection(conn)
|
|
261
|
-
|
|
262
|
-
def report_on_prepared_statements(
|
|
263
|
-
self,
|
|
264
|
-
) -> Tuple[List[List[Union[bool, str]]], List[str]]:
|
|
265
|
-
with self.get_connection() as conn:
|
|
266
|
-
with conn.cursor() as curs:
|
|
267
|
-
curs.execute("SELECT * from pg_prepared_statements")
|
|
268
|
-
return sorted(curs.fetchall()), sorted(conn.is_prepared)
|
|
148
|
+
def transaction(self, *, commit: bool = False) -> Iterator[Cursor[DictRow]]:
|
|
149
|
+
conn: Connection[DictRow]
|
|
150
|
+
with self.get_connection() as conn, conn.transaction(force_rollback=not commit):
|
|
151
|
+
yield conn.cursor()
|
|
269
152
|
|
|
270
153
|
def close(self) -> None:
|
|
271
154
|
self.pool.close()
|
|
@@ -274,17 +157,12 @@ class PostgresDatastore:
|
|
|
274
157
|
self.close()
|
|
275
158
|
|
|
276
159
|
|
|
277
|
-
PG_IDENTIFIER_MAX_LEN = 63
|
|
278
|
-
|
|
279
|
-
|
|
280
160
|
class PostgresAggregateRecorder(AggregateRecorder):
|
|
281
161
|
def __init__(
|
|
282
162
|
self,
|
|
283
163
|
datastore: PostgresDatastore,
|
|
284
164
|
events_table_name: str,
|
|
285
165
|
):
|
|
286
|
-
self.statement_name_aliases: Dict[str, str] = {}
|
|
287
|
-
self.statement_name_aliases_lock = Lock()
|
|
288
166
|
self.check_table_name_length(events_table_name, datastore.schema)
|
|
289
167
|
self.datastore = datastore
|
|
290
168
|
self.events_table_name = events_table_name
|
|
@@ -300,15 +178,12 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
300
178
|
|
|
301
179
|
self.create_table_statements = self.construct_create_table_statements()
|
|
302
180
|
self.insert_events_statement = (
|
|
303
|
-
f"INSERT INTO {self.events_table_name} VALUES (
|
|
304
|
-
)
|
|
305
|
-
self.insert_events_statement_name = f"insert_{events_table_name}".replace(
|
|
306
|
-
".", "_"
|
|
181
|
+
f"INSERT INTO {self.events_table_name} VALUES (%s, %s, %s, %s)"
|
|
307
182
|
)
|
|
308
183
|
self.select_events_statement = (
|
|
309
|
-
f"SELECT * FROM {self.events_table_name} WHERE originator_id =
|
|
184
|
+
f"SELECT * FROM {self.events_table_name} WHERE originator_id = %s"
|
|
310
185
|
)
|
|
311
|
-
self.
|
|
186
|
+
self.lock_table_statements: List[str] = []
|
|
312
187
|
|
|
313
188
|
@staticmethod
|
|
314
189
|
def check_table_name_length(table_name: str, schema_name: str) -> None:
|
|
@@ -318,47 +193,8 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
318
193
|
else:
|
|
319
194
|
unqualified_table_name = table_name
|
|
320
195
|
if len(unqualified_table_name) > 63:
|
|
321
|
-
|
|
322
|
-
|
|
323
|
-
def get_statement_alias(self, statement_name: str) -> str:
|
|
324
|
-
try:
|
|
325
|
-
alias = self.statement_name_aliases[statement_name]
|
|
326
|
-
except KeyError:
|
|
327
|
-
with self.statement_name_aliases_lock:
|
|
328
|
-
try:
|
|
329
|
-
alias = self.statement_name_aliases[statement_name]
|
|
330
|
-
except KeyError:
|
|
331
|
-
existing_aliases = self.statement_name_aliases.values()
|
|
332
|
-
if (
|
|
333
|
-
len(statement_name) <= PG_IDENTIFIER_MAX_LEN
|
|
334
|
-
and statement_name not in existing_aliases
|
|
335
|
-
):
|
|
336
|
-
alias = statement_name
|
|
337
|
-
self.statement_name_aliases[statement_name] = alias
|
|
338
|
-
else:
|
|
339
|
-
uid = uuid5(
|
|
340
|
-
NAMESPACE_URL, f"/statement_names/{statement_name}"
|
|
341
|
-
).hex
|
|
342
|
-
alias = uid
|
|
343
|
-
for i in range(len(uid)): # pragma: no cover
|
|
344
|
-
preserve_end = 21
|
|
345
|
-
preserve_start = (
|
|
346
|
-
PG_IDENTIFIER_MAX_LEN - preserve_end - i - 2
|
|
347
|
-
)
|
|
348
|
-
uuid5_tail = i
|
|
349
|
-
candidate = (
|
|
350
|
-
statement_name[:preserve_start]
|
|
351
|
-
+ "_"
|
|
352
|
-
+ (uid[-uuid5_tail:] if i else "")
|
|
353
|
-
+ "_"
|
|
354
|
-
+ statement_name[-preserve_end:]
|
|
355
|
-
)
|
|
356
|
-
assert len(alias) <= PG_IDENTIFIER_MAX_LEN
|
|
357
|
-
if candidate not in existing_aliases:
|
|
358
|
-
alias = candidate
|
|
359
|
-
break
|
|
360
|
-
self.statement_name_aliases[statement_name] = alias
|
|
361
|
-
return alias
|
|
196
|
+
msg = f"Table name too long: {unqualified_table_name}"
|
|
197
|
+
raise ProgrammingError(msg)
|
|
362
198
|
|
|
363
199
|
def construct_create_table_statements(self) -> List[str]:
|
|
364
200
|
statement = (
|
|
@@ -377,155 +213,121 @@ class PostgresAggregateRecorder(AggregateRecorder):
|
|
|
377
213
|
def create_table(self) -> None:
|
|
378
214
|
with self.datastore.transaction(commit=True) as curs:
|
|
379
215
|
for statement in self.create_table_statements:
|
|
380
|
-
curs.execute(statement)
|
|
381
|
-
pass # for Coverage 5.5 bug with CPython 3.10.0rc1
|
|
216
|
+
curs.execute(statement, prepare=False)
|
|
382
217
|
|
|
383
218
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
384
219
|
def insert_events(
|
|
385
220
|
self, stored_events: List[StoredEvent], **kwargs: Any
|
|
386
|
-
) ->
|
|
221
|
+
) -> Sequence[int] | None:
|
|
222
|
+
conn: Connection[DictRow]
|
|
223
|
+
exc: Exception | None = None
|
|
224
|
+
notification_ids: Sequence[int] | None = None
|
|
387
225
|
with self.datastore.get_connection() as conn:
|
|
388
|
-
|
|
389
|
-
|
|
390
|
-
|
|
391
|
-
|
|
392
|
-
|
|
393
|
-
|
|
394
|
-
|
|
395
|
-
|
|
396
|
-
|
|
397
|
-
|
|
398
|
-
|
|
399
|
-
|
|
400
|
-
|
|
401
|
-
|
|
402
|
-
|
|
403
|
-
|
|
404
|
-
|
|
405
|
-
|
|
406
|
-
|
|
407
|
-
|
|
408
|
-
|
|
409
|
-
|
|
410
|
-
except psycopg2.errors.lookup(DUPLICATE_PREPARED_STATEMENT): # noqa
|
|
411
|
-
pass
|
|
412
|
-
conn.is_prepared.add(statement_name)
|
|
413
|
-
return statement_name_alias
|
|
226
|
+
with conn.pipeline() as pipeline, conn.transaction():
|
|
227
|
+
# Do other things first, so they can be pipelined too.
|
|
228
|
+
with conn.cursor() as curs:
|
|
229
|
+
self._insert_events(curs, stored_events, **kwargs)
|
|
230
|
+
# Then use a different cursor for the executemany() call.
|
|
231
|
+
with conn.cursor() as curs:
|
|
232
|
+
try:
|
|
233
|
+
self._insert_stored_events(curs, stored_events, **kwargs)
|
|
234
|
+
# Sync now, so any uniqueness constraint violation causes an
|
|
235
|
+
# IntegrityError to be raised here, rather an InternalError
|
|
236
|
+
# being raised sometime later e.g. when commit() is called.
|
|
237
|
+
pipeline.sync()
|
|
238
|
+
notification_ids = self._fetch_ids_after_insert_events(
|
|
239
|
+
curs, stored_events, **kwargs
|
|
240
|
+
)
|
|
241
|
+
except Exception as e:
|
|
242
|
+
# Avoid psycopg emitting a pipeline warning.
|
|
243
|
+
exc = e
|
|
244
|
+
if exc:
|
|
245
|
+
# Reraise exception after pipeline context manager has exited.
|
|
246
|
+
raise exc
|
|
247
|
+
return notification_ids
|
|
414
248
|
|
|
415
249
|
def _insert_events(
|
|
416
250
|
self,
|
|
417
|
-
c:
|
|
251
|
+
c: Cursor[DictRow],
|
|
418
252
|
stored_events: List[StoredEvent],
|
|
419
253
|
**kwargs: Any,
|
|
420
|
-
) ->
|
|
421
|
-
|
|
422
|
-
# insertion of notification IDs is monotonic for notification log
|
|
423
|
-
# readers. We want concurrent transactions to commit inserted
|
|
424
|
-
# notification_id values in order, and by locking the table for writes,
|
|
425
|
-
# it can be guaranteed. The EXCLUSIVE lock mode does not block
|
|
426
|
-
# the ACCESS SHARE lock which is acquired during SELECT statements,
|
|
427
|
-
# so the table can be read concurrently. However, INSERT normally
|
|
428
|
-
# just acquires ROW EXCLUSIVE locks, which risks interleaving of
|
|
429
|
-
# many inserts in one transaction with many insert in another
|
|
430
|
-
# transaction. Since one transaction will commit before another,
|
|
431
|
-
# the possibility arises for readers that are tailing a notification
|
|
432
|
-
# log to miss items inserted later but with lower notification IDs.
|
|
433
|
-
# https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
|
|
434
|
-
# https://www.postgresql.org/docs/9.1/sql-lock.html
|
|
435
|
-
# https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
|
|
436
|
-
# -postgresql-serial-column-values-by-commit-order
|
|
437
|
-
|
|
438
|
-
len_stored_events = len(stored_events)
|
|
254
|
+
) -> None:
|
|
255
|
+
pass
|
|
439
256
|
|
|
257
|
+
def _insert_stored_events(
|
|
258
|
+
self,
|
|
259
|
+
c: Cursor[DictRow],
|
|
260
|
+
stored_events: List[StoredEvent],
|
|
261
|
+
**_: Any,
|
|
262
|
+
) -> None:
|
|
440
263
|
# Only do something if there is something to do.
|
|
441
|
-
if
|
|
442
|
-
|
|
443
|
-
|
|
444
|
-
|
|
445
|
-
|
|
446
|
-
|
|
447
|
-
|
|
448
|
-
|
|
449
|
-
|
|
450
|
-
|
|
451
|
-
|
|
452
|
-
|
|
453
|
-
stored_event.originator_id,
|
|
454
|
-
stored_event.originator_version,
|
|
455
|
-
stored_event.topic,
|
|
456
|
-
stored_event.state,
|
|
457
|
-
),
|
|
264
|
+
if len(stored_events) > 0:
|
|
265
|
+
self._lock_table(c)
|
|
266
|
+
|
|
267
|
+
# Insert events.
|
|
268
|
+
c.executemany(
|
|
269
|
+
query=self.insert_events_statement,
|
|
270
|
+
params_seq=[
|
|
271
|
+
(
|
|
272
|
+
stored_event.originator_id,
|
|
273
|
+
stored_event.originator_version,
|
|
274
|
+
stored_event.topic,
|
|
275
|
+
stored_event.state,
|
|
458
276
|
)
|
|
459
|
-
for stored_event in
|
|
460
|
-
|
|
461
|
-
|
|
462
|
-
|
|
463
|
-
|
|
464
|
-
|
|
465
|
-
|
|
466
|
-
commands = [
|
|
467
|
-
b"; ".join(page)
|
|
468
|
-
for page in chain([chain(lock_sqls, pages[0])], pages[1:])
|
|
469
|
-
]
|
|
277
|
+
for stored_event in stored_events
|
|
278
|
+
],
|
|
279
|
+
returning="RETURNING" in self.insert_events_statement,
|
|
280
|
+
)
|
|
281
|
+
|
|
282
|
+
def _lock_table(self, c: Cursor[DictRow]) -> None:
|
|
283
|
+
pass
|
|
470
284
|
|
|
471
|
-
|
|
472
|
-
|
|
473
|
-
|
|
285
|
+
def _fetch_ids_after_insert_events(
|
|
286
|
+
self,
|
|
287
|
+
c: Cursor[DictRow],
|
|
288
|
+
stored_events: List[StoredEvent],
|
|
289
|
+
**kwargs: Any,
|
|
290
|
+
) -> Sequence[int] | None:
|
|
474
291
|
return None
|
|
475
292
|
|
|
476
293
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
477
294
|
def select_events(
|
|
478
295
|
self,
|
|
479
296
|
originator_id: UUID,
|
|
480
|
-
|
|
481
|
-
|
|
297
|
+
*,
|
|
298
|
+
gt: int | None = None,
|
|
299
|
+
lte: int | None = None,
|
|
482
300
|
desc: bool = False,
|
|
483
|
-
limit:
|
|
301
|
+
limit: int | None = None,
|
|
484
302
|
) -> List[StoredEvent]:
|
|
485
|
-
|
|
303
|
+
statement = self.select_events_statement
|
|
486
304
|
params: List[Any] = [originator_id]
|
|
487
|
-
statement_name = f"select_{self.events_table_name}".replace(".", "_")
|
|
488
305
|
if gt is not None:
|
|
489
306
|
params.append(gt)
|
|
490
|
-
|
|
491
|
-
statement_name += "_gt"
|
|
307
|
+
statement += " AND originator_version > %s"
|
|
492
308
|
if lte is not None:
|
|
493
309
|
params.append(lte)
|
|
494
|
-
|
|
495
|
-
|
|
496
|
-
parts.append("ORDER BY originator_version")
|
|
310
|
+
statement += " AND originator_version <= %s"
|
|
311
|
+
statement += " ORDER BY originator_version"
|
|
497
312
|
if desc is False:
|
|
498
|
-
|
|
313
|
+
statement += " ASC"
|
|
499
314
|
else:
|
|
500
|
-
|
|
501
|
-
statement_name += "_desc"
|
|
315
|
+
statement += " DESC"
|
|
502
316
|
if limit is not None:
|
|
503
317
|
params.append(limit)
|
|
504
|
-
|
|
505
|
-
|
|
506
|
-
|
|
507
|
-
|
|
508
|
-
|
|
509
|
-
|
|
510
|
-
|
|
511
|
-
|
|
512
|
-
|
|
513
|
-
|
|
514
|
-
curs.execute(
|
|
515
|
-
f"EXECUTE {alias}({', '.join(['%s' for _ in params])})",
|
|
516
|
-
params,
|
|
318
|
+
statement += " LIMIT %s"
|
|
319
|
+
|
|
320
|
+
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
321
|
+
curs.execute(statement, params, prepare=True)
|
|
322
|
+
return [
|
|
323
|
+
StoredEvent(
|
|
324
|
+
originator_id=row["originator_id"],
|
|
325
|
+
originator_version=row["originator_version"],
|
|
326
|
+
topic=row["topic"],
|
|
327
|
+
state=bytes(row["state"]),
|
|
517
328
|
)
|
|
518
|
-
for row in curs.fetchall()
|
|
519
|
-
|
|
520
|
-
StoredEvent(
|
|
521
|
-
originator_id=row["originator_id"],
|
|
522
|
-
originator_version=row["originator_version"],
|
|
523
|
-
topic=row["topic"],
|
|
524
|
-
state=bytes(row["state"]),
|
|
525
|
-
)
|
|
526
|
-
)
|
|
527
|
-
pass # for Coverage 5.5 bug with CPython 3.10.0rc1
|
|
528
|
-
return stored_events
|
|
329
|
+
for row in curs.fetchall()
|
|
330
|
+
]
|
|
529
331
|
|
|
530
332
|
|
|
531
333
|
class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder):
|
|
@@ -535,45 +337,42 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
535
337
|
events_table_name: str = "stored_events",
|
|
536
338
|
):
|
|
537
339
|
super().__init__(datastore, events_table_name)
|
|
538
|
-
self.insert_events_statement
|
|
539
|
-
f"INSERT INTO {self.events_table_name} VALUES ($1, $2, $3, $4) "
|
|
540
|
-
f"RETURNING notification_id"
|
|
541
|
-
)
|
|
340
|
+
self.insert_events_statement += " RETURNING notification_id"
|
|
542
341
|
self.max_notification_id_statement = (
|
|
543
342
|
f"SELECT MAX(notification_id) FROM {self.events_table_name}"
|
|
544
343
|
)
|
|
545
|
-
self.
|
|
546
|
-
f"max_notification_id_{events_table_name}".replace(".", "_")
|
|
547
|
-
)
|
|
548
|
-
self.lock_statements = [
|
|
344
|
+
self.lock_table_statements = [
|
|
549
345
|
f"SET LOCAL lock_timeout = '{self.datastore.lock_timeout}s'",
|
|
550
346
|
f"LOCK TABLE {self.events_table_name} IN EXCLUSIVE MODE",
|
|
551
347
|
]
|
|
552
348
|
|
|
553
349
|
def construct_create_table_statements(self) -> List[str]:
|
|
554
|
-
|
|
555
|
-
|
|
556
|
-
|
|
557
|
-
|
|
558
|
-
|
|
559
|
-
|
|
560
|
-
|
|
561
|
-
|
|
562
|
-
|
|
563
|
-
|
|
564
|
-
|
|
565
|
-
|
|
566
|
-
|
|
567
|
-
|
|
350
|
+
return [
|
|
351
|
+
(
|
|
352
|
+
"CREATE TABLE IF NOT EXISTS "
|
|
353
|
+
f"{self.events_table_name} ("
|
|
354
|
+
"originator_id uuid NOT NULL, "
|
|
355
|
+
"originator_version bigint NOT NULL, "
|
|
356
|
+
"topic text, "
|
|
357
|
+
"state bytea, "
|
|
358
|
+
"notification_id bigserial, "
|
|
359
|
+
"PRIMARY KEY "
|
|
360
|
+
"(originator_id, originator_version)) "
|
|
361
|
+
"WITH (autovacuum_enabled=false)"
|
|
362
|
+
),
|
|
363
|
+
(
|
|
364
|
+
"CREATE UNIQUE INDEX IF NOT EXISTS "
|
|
365
|
+
f"{self.notification_id_index_name}"
|
|
366
|
+
f"ON {self.events_table_name} (notification_id ASC);"
|
|
367
|
+
),
|
|
568
368
|
]
|
|
569
|
-
return statements
|
|
570
369
|
|
|
571
370
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
572
371
|
def select_notifications(
|
|
573
372
|
self,
|
|
574
373
|
start: int,
|
|
575
374
|
limit: int,
|
|
576
|
-
stop:
|
|
375
|
+
stop: int | None = None,
|
|
577
376
|
topics: Sequence[str] = (),
|
|
578
377
|
) -> List[Notification]:
|
|
579
378
|
"""
|
|
@@ -581,86 +380,91 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
|
|
|
581
380
|
from 'start', limited by 'limit'.
|
|
582
381
|
"""
|
|
583
382
|
|
|
584
|
-
params: List[
|
|
585
|
-
statement =
|
|
586
|
-
"SELECT * " f"FROM {self.events_table_name} " "WHERE notification_id>=$1 "
|
|
587
|
-
)
|
|
588
|
-
statement_name = f"select_notifications_{self.events_table_name}".replace(
|
|
589
|
-
".", "_"
|
|
590
|
-
)
|
|
383
|
+
params: List[int | str | Sequence[str]] = [start]
|
|
384
|
+
statement = f"SELECT * FROM {self.events_table_name} WHERE notification_id>=%s"
|
|
591
385
|
|
|
592
386
|
if stop is not None:
|
|
593
387
|
params.append(stop)
|
|
594
|
-
statement +=
|
|
595
|
-
statement_name += "_stop"
|
|
388
|
+
statement += " AND notification_id <= %s"
|
|
596
389
|
|
|
597
390
|
if topics:
|
|
598
391
|
params.append(topics)
|
|
599
|
-
statement +=
|
|
600
|
-
statement_name += "_topics"
|
|
392
|
+
statement += " AND topic = ANY(%s)"
|
|
601
393
|
|
|
602
394
|
params.append(limit)
|
|
603
|
-
statement += "ORDER BY notification_id
|
|
604
|
-
|
|
605
|
-
|
|
606
|
-
with
|
|
607
|
-
|
|
608
|
-
|
|
609
|
-
|
|
610
|
-
|
|
611
|
-
|
|
612
|
-
|
|
613
|
-
|
|
614
|
-
|
|
615
|
-
params,
|
|
395
|
+
statement += " ORDER BY notification_id LIMIT %s"
|
|
396
|
+
|
|
397
|
+
connection = self.datastore.get_connection()
|
|
398
|
+
with connection as conn, conn.cursor() as curs:
|
|
399
|
+
curs.execute(statement, params, prepare=True)
|
|
400
|
+
return [
|
|
401
|
+
Notification(
|
|
402
|
+
id=row["notification_id"],
|
|
403
|
+
originator_id=row["originator_id"],
|
|
404
|
+
originator_version=row["originator_version"],
|
|
405
|
+
topic=row["topic"],
|
|
406
|
+
state=bytes(row["state"]),
|
|
616
407
|
)
|
|
617
|
-
for row in curs.fetchall()
|
|
618
|
-
|
|
619
|
-
Notification(
|
|
620
|
-
id=row["notification_id"],
|
|
621
|
-
originator_id=row["originator_id"],
|
|
622
|
-
originator_version=row["originator_version"],
|
|
623
|
-
topic=row["topic"],
|
|
624
|
-
state=bytes(row["state"]),
|
|
625
|
-
)
|
|
626
|
-
)
|
|
627
|
-
pass # for Coverage 5.5 bug with CPython 3.10.0rc1
|
|
628
|
-
return notifications
|
|
408
|
+
for row in curs.fetchall()
|
|
409
|
+
]
|
|
629
410
|
|
|
630
411
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
631
412
|
def max_notification_id(self) -> int:
|
|
632
413
|
"""
|
|
633
414
|
Returns the maximum notification ID.
|
|
634
415
|
"""
|
|
635
|
-
|
|
636
|
-
with self.datastore.get_connection() as conn:
|
|
637
|
-
|
|
638
|
-
|
|
639
|
-
|
|
640
|
-
|
|
641
|
-
|
|
642
|
-
|
|
643
|
-
|
|
644
|
-
|
|
645
|
-
|
|
416
|
+
conn: Connection[DictRow]
|
|
417
|
+
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
418
|
+
curs.execute(self.max_notification_id_statement)
|
|
419
|
+
fetchone = curs.fetchone()
|
|
420
|
+
assert fetchone is not None
|
|
421
|
+
return fetchone["max"] or 0
|
|
422
|
+
|
|
423
|
+
def _lock_table(self, c: Cursor[DictRow]) -> None:
|
|
424
|
+
# Acquire "EXCLUSIVE" table lock, to serialize transactions that insert
|
|
425
|
+
# stored events, so that readers don't pass over gaps that are filled in
|
|
426
|
+
# later. We want each transaction that will be issued with notifications
|
|
427
|
+
# IDs by the notification ID sequence to receive all its notification IDs
|
|
428
|
+
# and then commit, before another transaction is issued with any notification
|
|
429
|
+
# IDs. In other words, we want the insert order to be the same as the commit
|
|
430
|
+
# order. We can accomplish this by locking the table for writes. The
|
|
431
|
+
# EXCLUSIVE lock mode does not block SELECT statements, which acquire an
|
|
432
|
+
# ACCESS SHARE lock, so the stored events table can be read concurrently
|
|
433
|
+
# with writes and other reads. However, INSERT statements normally just
|
|
434
|
+
# acquires ROW EXCLUSIVE locks, which risks the interleaving (within the
|
|
435
|
+
# recorded sequence of notification IDs) of stored events from one transaction
|
|
436
|
+
# with those of another transaction. And since one transaction will always
|
|
437
|
+
# commit before another, the possibility arises when using ROW EXCLUSIVE locks
|
|
438
|
+
# for readers that are tailing a notification log to miss items inserted later
|
|
439
|
+
# but issued with lower notification IDs.
|
|
440
|
+
# https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
|
|
441
|
+
# https://www.postgresql.org/docs/9.1/sql-lock.html
|
|
442
|
+
# https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
|
|
443
|
+
# -postgresql-serial-column-values-by-commit-order
|
|
444
|
+
for lock_statement in self.lock_table_statements:
|
|
445
|
+
c.execute(lock_statement, prepare=True)
|
|
646
446
|
|
|
647
|
-
def
|
|
447
|
+
def _fetch_ids_after_insert_events(
|
|
648
448
|
self,
|
|
649
|
-
c:
|
|
449
|
+
c: Cursor[DictRow],
|
|
650
450
|
stored_events: List[StoredEvent],
|
|
651
451
|
**kwargs: Any,
|
|
652
|
-
) ->
|
|
653
|
-
|
|
654
|
-
|
|
655
|
-
|
|
656
|
-
|
|
657
|
-
|
|
658
|
-
|
|
659
|
-
|
|
660
|
-
|
|
661
|
-
|
|
662
|
-
|
|
663
|
-
|
|
452
|
+
) -> Sequence[int] | None:
|
|
453
|
+
notification_ids: List[int] = []
|
|
454
|
+
len_events = len(stored_events)
|
|
455
|
+
if len_events:
|
|
456
|
+
if (
|
|
457
|
+
(c.statusmessage == "SET")
|
|
458
|
+
and c.nextset()
|
|
459
|
+
and (c.statusmessage == "LOCK TABLE")
|
|
460
|
+
):
|
|
461
|
+
while c.nextset() and len(notification_ids) != len_events:
|
|
462
|
+
row = c.fetchone()
|
|
463
|
+
assert row is not None
|
|
464
|
+
notification_ids.append(row["notification_id"])
|
|
465
|
+
if len(notification_ids) != len(stored_events):
|
|
466
|
+
msg = "Couldn't get all notification IDs"
|
|
467
|
+
raise ProgrammingError(msg)
|
|
664
468
|
return notification_ids
|
|
665
469
|
|
|
666
470
|
|
|
@@ -675,26 +479,17 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
|
|
|
675
479
|
self.tracking_table_name = tracking_table_name
|
|
676
480
|
super().__init__(datastore, events_table_name)
|
|
677
481
|
self.insert_tracking_statement = (
|
|
678
|
-
f"INSERT INTO {self.tracking_table_name} VALUES (
|
|
679
|
-
)
|
|
680
|
-
self.insert_tracking_statement_name = f"insert_{tracking_table_name}".replace(
|
|
681
|
-
".", "_"
|
|
482
|
+
f"INSERT INTO {self.tracking_table_name} VALUES (%s, %s)"
|
|
682
483
|
)
|
|
683
484
|
self.max_tracking_id_statement = (
|
|
684
485
|
"SELECT MAX(notification_id) "
|
|
685
486
|
f"FROM {self.tracking_table_name} "
|
|
686
|
-
"WHERE application_name
|
|
487
|
+
"WHERE application_name=%s"
|
|
687
488
|
)
|
|
688
489
|
self.count_tracking_id_statement = (
|
|
689
490
|
"SELECT COUNT(*) "
|
|
690
491
|
f"FROM {self.tracking_table_name} "
|
|
691
|
-
"WHERE application_name
|
|
692
|
-
)
|
|
693
|
-
self.max_tracking_id_statement_name = (
|
|
694
|
-
f"max_tracking_id_{tracking_table_name}".replace(".", "_")
|
|
695
|
-
)
|
|
696
|
-
self.count_tracking_id_statement_name = (
|
|
697
|
-
f"count_tracking_id_{tracking_table_name}".replace(".", "_")
|
|
492
|
+
"WHERE application_name=%s AND notification_id=%s"
|
|
698
493
|
)
|
|
699
494
|
|
|
700
495
|
def construct_create_table_statements(self) -> List[str]:
|
|
@@ -711,61 +506,46 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
|
|
|
711
506
|
|
|
712
507
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
713
508
|
def max_tracking_id(self, application_name: str) -> int:
|
|
714
|
-
|
|
715
|
-
|
|
716
|
-
|
|
717
|
-
|
|
509
|
+
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
510
|
+
curs.execute(
|
|
511
|
+
query=self.max_tracking_id_statement,
|
|
512
|
+
params=(application_name,),
|
|
513
|
+
prepare=True,
|
|
718
514
|
)
|
|
719
|
-
|
|
720
|
-
|
|
721
|
-
|
|
722
|
-
f"EXECUTE {statement_alias}(%s)",
|
|
723
|
-
(application_name,),
|
|
724
|
-
)
|
|
725
|
-
max_id = curs.fetchone()[0] or 0
|
|
726
|
-
return max_id
|
|
515
|
+
fetchone = curs.fetchone()
|
|
516
|
+
assert fetchone is not None
|
|
517
|
+
return fetchone["max"] or 0
|
|
727
518
|
|
|
728
519
|
@retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
|
|
729
520
|
def has_tracking_id(self, application_name: str, notification_id: int) -> bool:
|
|
730
|
-
|
|
731
|
-
with self.datastore.get_connection() as conn:
|
|
732
|
-
|
|
733
|
-
|
|
521
|
+
conn: Connection[DictRow]
|
|
522
|
+
with self.datastore.get_connection() as conn, conn.cursor() as curs:
|
|
523
|
+
curs.execute(
|
|
524
|
+
query=self.count_tracking_id_statement,
|
|
525
|
+
params=(application_name, notification_id),
|
|
526
|
+
prepare=True,
|
|
734
527
|
)
|
|
735
|
-
|
|
736
|
-
|
|
737
|
-
|
|
738
|
-
f"EXECUTE {statement_alias}(%s, %s)",
|
|
739
|
-
(application_name, notification_id),
|
|
740
|
-
)
|
|
741
|
-
return bool(curs.fetchone()[0])
|
|
742
|
-
|
|
743
|
-
def _prepare_insert_events(self, conn: PostgresConnection) -> None:
|
|
744
|
-
super()._prepare_insert_events(conn)
|
|
745
|
-
self._prepare(
|
|
746
|
-
conn, self.insert_tracking_statement_name, self.insert_tracking_statement
|
|
747
|
-
)
|
|
528
|
+
fetchone = curs.fetchone()
|
|
529
|
+
assert fetchone is not None
|
|
530
|
+
return bool(fetchone["count"])
|
|
748
531
|
|
|
749
532
|
def _insert_events(
|
|
750
533
|
self,
|
|
751
|
-
c:
|
|
534
|
+
c: Cursor[DictRow],
|
|
752
535
|
stored_events: List[StoredEvent],
|
|
753
536
|
**kwargs: Any,
|
|
754
|
-
) ->
|
|
755
|
-
|
|
756
|
-
tracking: Optional[Tracking] = kwargs.get("tracking", None)
|
|
537
|
+
) -> None:
|
|
538
|
+
tracking: Tracking | None = kwargs.get("tracking", None)
|
|
757
539
|
if tracking is not None:
|
|
758
|
-
statement_alias = self.statement_name_aliases[
|
|
759
|
-
self.insert_tracking_statement_name
|
|
760
|
-
]
|
|
761
540
|
c.execute(
|
|
762
|
-
|
|
763
|
-
(
|
|
541
|
+
query=self.insert_tracking_statement,
|
|
542
|
+
params=(
|
|
764
543
|
tracking.application_name,
|
|
765
544
|
tracking.notification_id,
|
|
766
545
|
),
|
|
546
|
+
prepare=True,
|
|
767
547
|
)
|
|
768
|
-
|
|
548
|
+
super()._insert_events(c, stored_events, **kwargs)
|
|
769
549
|
|
|
770
550
|
|
|
771
551
|
class Factory(InfrastructureFactory):
|
|
@@ -773,7 +553,8 @@ class Factory(InfrastructureFactory):
|
|
|
773
553
|
POSTGRES_HOST = "POSTGRES_HOST"
|
|
774
554
|
POSTGRES_PORT = "POSTGRES_PORT"
|
|
775
555
|
POSTGRES_USER = "POSTGRES_USER"
|
|
776
|
-
POSTGRES_PASSWORD = "POSTGRES_PASSWORD"
|
|
556
|
+
POSTGRES_PASSWORD = "POSTGRES_PASSWORD" # noqa: S105
|
|
557
|
+
POSTGRES_GET_PASSWORD_TOPIC = "POSTGRES_GET_PASSWORD_TOPIC" # noqa: S105
|
|
777
558
|
POSTGRES_CONNECT_TIMEOUT = "POSTGRES_CONNECT_TIMEOUT"
|
|
778
559
|
POSTGRES_CONN_MAX_AGE = "POSTGRES_CONN_MAX_AGE"
|
|
779
560
|
POSTGRES_PRE_PING = "POSTGRES_PRE_PING"
|
|
@@ -795,54 +576,61 @@ class Factory(InfrastructureFactory):
|
|
|
795
576
|
super().__init__(env)
|
|
796
577
|
dbname = self.env.get(self.POSTGRES_DBNAME)
|
|
797
578
|
if dbname is None:
|
|
798
|
-
|
|
579
|
+
msg = (
|
|
799
580
|
"Postgres database name not found "
|
|
800
581
|
"in environment with key "
|
|
801
582
|
f"'{self.POSTGRES_DBNAME}'"
|
|
802
583
|
)
|
|
584
|
+
raise OSError(msg)
|
|
803
585
|
|
|
804
586
|
host = self.env.get(self.POSTGRES_HOST)
|
|
805
587
|
if host is None:
|
|
806
|
-
|
|
588
|
+
msg = (
|
|
807
589
|
"Postgres host not found "
|
|
808
590
|
"in environment with key "
|
|
809
591
|
f"'{self.POSTGRES_HOST}'"
|
|
810
592
|
)
|
|
593
|
+
raise OSError(msg)
|
|
811
594
|
|
|
812
595
|
port = self.env.get(self.POSTGRES_PORT) or "5432"
|
|
813
596
|
|
|
814
597
|
user = self.env.get(self.POSTGRES_USER)
|
|
815
598
|
if user is None:
|
|
816
|
-
|
|
599
|
+
msg = (
|
|
817
600
|
"Postgres user not found "
|
|
818
601
|
"in environment with key "
|
|
819
602
|
f"'{self.POSTGRES_USER}'"
|
|
820
603
|
)
|
|
604
|
+
raise OSError(msg)
|
|
605
|
+
|
|
606
|
+
get_password_func = None
|
|
607
|
+
get_password_topic = self.env.get(self.POSTGRES_GET_PASSWORD_TOPIC)
|
|
608
|
+
if not get_password_topic:
|
|
609
|
+
password = self.env.get(self.POSTGRES_PASSWORD)
|
|
610
|
+
if password is None:
|
|
611
|
+
msg = (
|
|
612
|
+
"Postgres password not found "
|
|
613
|
+
"in environment with key "
|
|
614
|
+
f"'{self.POSTGRES_PASSWORD}'"
|
|
615
|
+
)
|
|
616
|
+
raise OSError(msg)
|
|
617
|
+
else:
|
|
618
|
+
get_password_func = resolve_topic(get_password_topic)
|
|
619
|
+
password = ""
|
|
821
620
|
|
|
822
|
-
|
|
823
|
-
if password is None:
|
|
824
|
-
raise EnvironmentError(
|
|
825
|
-
"Postgres password not found "
|
|
826
|
-
"in environment with key "
|
|
827
|
-
f"'{self.POSTGRES_PASSWORD}'"
|
|
828
|
-
)
|
|
829
|
-
|
|
830
|
-
connect_timeout: Optional[int]
|
|
621
|
+
connect_timeout = 5
|
|
831
622
|
connect_timeout_str = self.env.get(self.POSTGRES_CONNECT_TIMEOUT)
|
|
832
|
-
if connect_timeout_str
|
|
833
|
-
connect_timeout = 5
|
|
834
|
-
elif connect_timeout_str == "":
|
|
835
|
-
connect_timeout = 5
|
|
836
|
-
else:
|
|
623
|
+
if connect_timeout_str:
|
|
837
624
|
try:
|
|
838
625
|
connect_timeout = int(connect_timeout_str)
|
|
839
626
|
except ValueError:
|
|
840
|
-
|
|
841
|
-
|
|
627
|
+
msg = (
|
|
628
|
+
"Postgres environment value for key "
|
|
842
629
|
f"'{self.POSTGRES_CONNECT_TIMEOUT}' is invalid. "
|
|
843
|
-
|
|
630
|
+
"If set, an integer or empty string is expected: "
|
|
844
631
|
f"'{connect_timeout_str}'"
|
|
845
632
|
)
|
|
633
|
+
raise OSError(msg) from None
|
|
846
634
|
|
|
847
635
|
idle_in_transaction_session_timeout_str = (
|
|
848
636
|
self.env.get(self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT) or "5"
|
|
@@ -853,80 +641,69 @@ class Factory(InfrastructureFactory):
|
|
|
853
641
|
idle_in_transaction_session_timeout_str
|
|
854
642
|
)
|
|
855
643
|
except ValueError:
|
|
856
|
-
|
|
857
|
-
|
|
644
|
+
msg = (
|
|
645
|
+
"Postgres environment value for key "
|
|
858
646
|
f"'{self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT}' is invalid. "
|
|
859
|
-
|
|
647
|
+
"If set, an integer or empty string is expected: "
|
|
860
648
|
f"'{idle_in_transaction_session_timeout_str}'"
|
|
861
649
|
)
|
|
650
|
+
raise OSError(msg) from None
|
|
862
651
|
|
|
863
|
-
pool_size
|
|
652
|
+
pool_size = 5
|
|
864
653
|
pool_size_str = self.env.get(self.POSTGRES_POOL_SIZE)
|
|
865
|
-
if pool_size_str
|
|
866
|
-
pool_size = 5
|
|
867
|
-
elif pool_size_str == "":
|
|
868
|
-
pool_size = 5
|
|
869
|
-
else:
|
|
654
|
+
if pool_size_str:
|
|
870
655
|
try:
|
|
871
656
|
pool_size = int(pool_size_str)
|
|
872
657
|
except ValueError:
|
|
873
|
-
|
|
874
|
-
|
|
658
|
+
msg = (
|
|
659
|
+
"Postgres environment value for key "
|
|
875
660
|
f"'{self.POSTGRES_POOL_SIZE}' is invalid. "
|
|
876
|
-
|
|
661
|
+
"If set, an integer or empty string is expected: "
|
|
877
662
|
f"'{pool_size_str}'"
|
|
878
663
|
)
|
|
664
|
+
raise OSError(msg) from None
|
|
879
665
|
|
|
880
|
-
pool_max_overflow
|
|
666
|
+
pool_max_overflow = 10
|
|
881
667
|
pool_max_overflow_str = self.env.get(self.POSTGRES_POOL_MAX_OVERFLOW)
|
|
882
|
-
if pool_max_overflow_str
|
|
883
|
-
pool_max_overflow = 10
|
|
884
|
-
elif pool_max_overflow_str == "":
|
|
885
|
-
pool_max_overflow = 10
|
|
886
|
-
else:
|
|
668
|
+
if pool_max_overflow_str:
|
|
887
669
|
try:
|
|
888
670
|
pool_max_overflow = int(pool_max_overflow_str)
|
|
889
671
|
except ValueError:
|
|
890
|
-
|
|
891
|
-
|
|
672
|
+
msg = (
|
|
673
|
+
"Postgres environment value for key "
|
|
892
674
|
f"'{self.POSTGRES_POOL_MAX_OVERFLOW}' is invalid. "
|
|
893
|
-
|
|
675
|
+
"If set, an integer or empty string is expected: "
|
|
894
676
|
f"'{pool_max_overflow_str}'"
|
|
895
677
|
)
|
|
678
|
+
raise OSError(msg) from None
|
|
896
679
|
|
|
897
|
-
pool_timeout
|
|
680
|
+
pool_timeout = 30.0
|
|
898
681
|
pool_timeout_str = self.env.get(self.POSTGRES_POOL_TIMEOUT)
|
|
899
|
-
if pool_timeout_str
|
|
900
|
-
pool_timeout = 30
|
|
901
|
-
elif pool_timeout_str == "":
|
|
902
|
-
pool_timeout = 30
|
|
903
|
-
else:
|
|
682
|
+
if pool_timeout_str:
|
|
904
683
|
try:
|
|
905
684
|
pool_timeout = float(pool_timeout_str)
|
|
906
685
|
except ValueError:
|
|
907
|
-
|
|
908
|
-
|
|
686
|
+
msg = (
|
|
687
|
+
"Postgres environment value for key "
|
|
909
688
|
f"'{self.POSTGRES_POOL_TIMEOUT}' is invalid. "
|
|
910
|
-
|
|
689
|
+
"If set, a float or empty string is expected: "
|
|
911
690
|
f"'{pool_timeout_str}'"
|
|
912
691
|
)
|
|
692
|
+
raise OSError(msg) from None
|
|
913
693
|
|
|
914
|
-
conn_max_age
|
|
694
|
+
conn_max_age = 60 * 60.0
|
|
915
695
|
conn_max_age_str = self.env.get(self.POSTGRES_CONN_MAX_AGE)
|
|
916
|
-
if conn_max_age_str
|
|
917
|
-
conn_max_age = None
|
|
918
|
-
elif conn_max_age_str == "":
|
|
919
|
-
conn_max_age = None
|
|
920
|
-
else:
|
|
696
|
+
if conn_max_age_str:
|
|
921
697
|
try:
|
|
922
698
|
conn_max_age = float(conn_max_age_str)
|
|
923
699
|
except ValueError:
|
|
924
|
-
|
|
925
|
-
|
|
700
|
+
msg = (
|
|
701
|
+
"Postgres environment value for key "
|
|
926
702
|
f"'{self.POSTGRES_CONN_MAX_AGE}' is invalid. "
|
|
927
|
-
|
|
703
|
+
"If set, a float or empty string is expected: "
|
|
928
704
|
f"'{conn_max_age_str}'"
|
|
929
705
|
)
|
|
706
|
+
raise OSError(msg) from None
|
|
930
707
|
|
|
931
708
|
pre_ping = strtobool(self.env.get(self.POSTGRES_PRE_PING) or "no")
|
|
932
709
|
|
|
@@ -935,12 +712,13 @@ class Factory(InfrastructureFactory):
|
|
|
935
712
|
try:
|
|
936
713
|
lock_timeout = int(lock_timeout_str)
|
|
937
714
|
except ValueError:
|
|
938
|
-
|
|
939
|
-
|
|
715
|
+
msg = (
|
|
716
|
+
"Postgres environment value for key "
|
|
940
717
|
f"'{self.POSTGRES_LOCK_TIMEOUT}' is invalid. "
|
|
941
|
-
|
|
718
|
+
"If set, an integer or empty string is expected: "
|
|
942
719
|
f"'{lock_timeout_str}'"
|
|
943
720
|
)
|
|
721
|
+
raise OSError(msg) from None
|
|
944
722
|
|
|
945
723
|
schema = self.env.get(self.POSTGRES_SCHEMA) or ""
|
|
946
724
|
|
|
@@ -950,6 +728,7 @@ class Factory(InfrastructureFactory):
|
|
|
950
728
|
port=port,
|
|
951
729
|
user=user,
|
|
952
730
|
password=password,
|
|
731
|
+
get_password_func=get_password_func,
|
|
953
732
|
connect_timeout=connect_timeout,
|
|
954
733
|
idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
|
|
955
734
|
pool_size=pool_size,
|
|
@@ -1008,4 +787,8 @@ class Factory(InfrastructureFactory):
|
|
|
1008
787
|
return strtobool(self.env.get(self.CREATE_TABLE) or "yes")
|
|
1009
788
|
|
|
1010
789
|
def close(self) -> None:
|
|
1011
|
-
self
|
|
790
|
+
if hasattr(self, "datastore"):
|
|
791
|
+
self.datastore.close()
|
|
792
|
+
|
|
793
|
+
def __del__(self) -> None:
|
|
794
|
+
self.close()
|