eventsourcing 9.2.21__py3-none-any.whl → 9.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

Files changed (145) hide show
  1. eventsourcing/__init__.py +1 -1
  2. eventsourcing/application.py +137 -132
  3. eventsourcing/cipher.py +17 -12
  4. eventsourcing/compressor.py +2 -0
  5. eventsourcing/dispatch.py +30 -56
  6. eventsourcing/domain.py +221 -227
  7. eventsourcing/examples/__init__.py +0 -0
  8. eventsourcing/examples/aggregate1/__init__.py +0 -0
  9. eventsourcing/examples/aggregate1/application.py +27 -0
  10. eventsourcing/examples/aggregate1/domainmodel.py +16 -0
  11. eventsourcing/examples/aggregate1/test_application.py +37 -0
  12. eventsourcing/examples/aggregate2/__init__.py +0 -0
  13. eventsourcing/examples/aggregate2/application.py +27 -0
  14. eventsourcing/examples/aggregate2/domainmodel.py +22 -0
  15. eventsourcing/examples/aggregate2/test_application.py +37 -0
  16. eventsourcing/examples/aggregate3/__init__.py +0 -0
  17. eventsourcing/examples/aggregate3/application.py +27 -0
  18. eventsourcing/examples/aggregate3/domainmodel.py +38 -0
  19. eventsourcing/examples/aggregate3/test_application.py +37 -0
  20. eventsourcing/examples/aggregate4/__init__.py +0 -0
  21. eventsourcing/examples/aggregate4/application.py +27 -0
  22. eventsourcing/examples/aggregate4/domainmodel.py +114 -0
  23. eventsourcing/examples/aggregate4/test_application.py +38 -0
  24. eventsourcing/examples/aggregate5/__init__.py +0 -0
  25. eventsourcing/examples/aggregate5/application.py +27 -0
  26. eventsourcing/examples/aggregate5/domainmodel.py +131 -0
  27. eventsourcing/examples/aggregate5/test_application.py +38 -0
  28. eventsourcing/examples/aggregate6/__init__.py +0 -0
  29. eventsourcing/examples/aggregate6/application.py +30 -0
  30. eventsourcing/examples/aggregate6/domainmodel.py +123 -0
  31. eventsourcing/examples/aggregate6/test_application.py +38 -0
  32. eventsourcing/examples/aggregate6a/__init__.py +0 -0
  33. eventsourcing/examples/aggregate6a/application.py +40 -0
  34. eventsourcing/examples/aggregate6a/domainmodel.py +149 -0
  35. eventsourcing/examples/aggregate6a/test_application.py +45 -0
  36. eventsourcing/examples/aggregate7/__init__.py +0 -0
  37. eventsourcing/examples/aggregate7/application.py +48 -0
  38. eventsourcing/examples/aggregate7/domainmodel.py +144 -0
  39. eventsourcing/examples/aggregate7/persistence.py +57 -0
  40. eventsourcing/examples/aggregate7/test_application.py +38 -0
  41. eventsourcing/examples/aggregate7/test_compression_and_encryption.py +45 -0
  42. eventsourcing/examples/aggregate7/test_snapshotting_intervals.py +67 -0
  43. eventsourcing/examples/aggregate7a/__init__.py +0 -0
  44. eventsourcing/examples/aggregate7a/application.py +56 -0
  45. eventsourcing/examples/aggregate7a/domainmodel.py +170 -0
  46. eventsourcing/examples/aggregate7a/test_application.py +46 -0
  47. eventsourcing/examples/aggregate7a/test_compression_and_encryption.py +45 -0
  48. eventsourcing/examples/aggregate8/__init__.py +0 -0
  49. eventsourcing/examples/aggregate8/application.py +47 -0
  50. eventsourcing/examples/aggregate8/domainmodel.py +65 -0
  51. eventsourcing/examples/aggregate8/persistence.py +57 -0
  52. eventsourcing/examples/aggregate8/test_application.py +37 -0
  53. eventsourcing/examples/aggregate8/test_compression_and_encryption.py +44 -0
  54. eventsourcing/examples/aggregate8/test_snapshotting_intervals.py +38 -0
  55. eventsourcing/examples/bankaccounts/__init__.py +0 -0
  56. eventsourcing/examples/bankaccounts/application.py +70 -0
  57. eventsourcing/examples/bankaccounts/domainmodel.py +56 -0
  58. eventsourcing/examples/bankaccounts/test.py +173 -0
  59. eventsourcing/examples/cargoshipping/__init__.py +0 -0
  60. eventsourcing/examples/cargoshipping/application.py +126 -0
  61. eventsourcing/examples/cargoshipping/domainmodel.py +330 -0
  62. eventsourcing/examples/cargoshipping/interface.py +143 -0
  63. eventsourcing/examples/cargoshipping/test.py +231 -0
  64. eventsourcing/examples/contentmanagement/__init__.py +0 -0
  65. eventsourcing/examples/contentmanagement/application.py +118 -0
  66. eventsourcing/examples/contentmanagement/domainmodel.py +69 -0
  67. eventsourcing/examples/contentmanagement/test.py +180 -0
  68. eventsourcing/examples/contentmanagement/utils.py +26 -0
  69. eventsourcing/examples/contentmanagementsystem/__init__.py +0 -0
  70. eventsourcing/examples/contentmanagementsystem/application.py +54 -0
  71. eventsourcing/examples/contentmanagementsystem/postgres.py +17 -0
  72. eventsourcing/examples/contentmanagementsystem/sqlite.py +17 -0
  73. eventsourcing/examples/contentmanagementsystem/system.py +14 -0
  74. eventsourcing/examples/contentmanagementsystem/test_system.py +180 -0
  75. eventsourcing/examples/searchablecontent/__init__.py +0 -0
  76. eventsourcing/examples/searchablecontent/application.py +45 -0
  77. eventsourcing/examples/searchablecontent/persistence.py +23 -0
  78. eventsourcing/examples/searchablecontent/postgres.py +118 -0
  79. eventsourcing/examples/searchablecontent/sqlite.py +136 -0
  80. eventsourcing/examples/searchablecontent/test_application.py +110 -0
  81. eventsourcing/examples/searchablecontent/test_recorder.py +68 -0
  82. eventsourcing/examples/searchabletimestamps/__init__.py +0 -0
  83. eventsourcing/examples/searchabletimestamps/application.py +32 -0
  84. eventsourcing/examples/searchabletimestamps/persistence.py +20 -0
  85. eventsourcing/examples/searchabletimestamps/postgres.py +110 -0
  86. eventsourcing/examples/searchabletimestamps/sqlite.py +99 -0
  87. eventsourcing/examples/searchabletimestamps/test_searchabletimestamps.py +94 -0
  88. eventsourcing/examples/test_invoice.py +176 -0
  89. eventsourcing/examples/test_parking_lot.py +206 -0
  90. eventsourcing/interface.py +4 -2
  91. eventsourcing/persistence.py +88 -82
  92. eventsourcing/popo.py +32 -31
  93. eventsourcing/postgres.py +388 -593
  94. eventsourcing/sqlite.py +100 -102
  95. eventsourcing/system.py +66 -71
  96. eventsourcing/tests/application.py +20 -32
  97. eventsourcing/tests/application_tests/__init__.py +0 -0
  98. eventsourcing/tests/application_tests/test_application_with_automatic_snapshotting.py +55 -0
  99. eventsourcing/tests/application_tests/test_application_with_popo.py +22 -0
  100. eventsourcing/tests/application_tests/test_application_with_postgres.py +75 -0
  101. eventsourcing/tests/application_tests/test_application_with_sqlite.py +72 -0
  102. eventsourcing/tests/application_tests/test_cache.py +134 -0
  103. eventsourcing/tests/application_tests/test_event_sourced_log.py +162 -0
  104. eventsourcing/tests/application_tests/test_notificationlog.py +232 -0
  105. eventsourcing/tests/application_tests/test_notificationlogreader.py +126 -0
  106. eventsourcing/tests/application_tests/test_processapplication.py +110 -0
  107. eventsourcing/tests/application_tests/test_processingpolicy.py +109 -0
  108. eventsourcing/tests/application_tests/test_repository.py +504 -0
  109. eventsourcing/tests/application_tests/test_snapshotting.py +68 -0
  110. eventsourcing/tests/application_tests/test_upcasting.py +459 -0
  111. eventsourcing/tests/docs_tests/__init__.py +0 -0
  112. eventsourcing/tests/docs_tests/test_docs.py +293 -0
  113. eventsourcing/tests/domain.py +1 -1
  114. eventsourcing/tests/domain_tests/__init__.py +0 -0
  115. eventsourcing/tests/domain_tests/test_aggregate.py +1180 -0
  116. eventsourcing/tests/domain_tests/test_aggregate_decorators.py +1604 -0
  117. eventsourcing/tests/domain_tests/test_domainevent.py +80 -0
  118. eventsourcing/tests/interface_tests/__init__.py +0 -0
  119. eventsourcing/tests/interface_tests/test_remotenotificationlog.py +258 -0
  120. eventsourcing/tests/persistence.py +52 -50
  121. eventsourcing/tests/persistence_tests/__init__.py +0 -0
  122. eventsourcing/tests/persistence_tests/test_aes.py +93 -0
  123. eventsourcing/tests/persistence_tests/test_connection_pool.py +722 -0
  124. eventsourcing/tests/persistence_tests/test_eventstore.py +72 -0
  125. eventsourcing/tests/persistence_tests/test_infrastructure_factory.py +21 -0
  126. eventsourcing/tests/persistence_tests/test_mapper.py +113 -0
  127. eventsourcing/tests/persistence_tests/test_noninterleaving_notification_ids.py +69 -0
  128. eventsourcing/tests/persistence_tests/test_popo.py +124 -0
  129. eventsourcing/tests/persistence_tests/test_postgres.py +1119 -0
  130. eventsourcing/tests/persistence_tests/test_sqlite.py +348 -0
  131. eventsourcing/tests/persistence_tests/test_transcoder.py +44 -0
  132. eventsourcing/tests/postgres_utils.py +7 -7
  133. eventsourcing/tests/system_tests/__init__.py +0 -0
  134. eventsourcing/tests/system_tests/test_runner.py +935 -0
  135. eventsourcing/tests/system_tests/test_system.py +284 -0
  136. eventsourcing/tests/utils_tests/__init__.py +0 -0
  137. eventsourcing/tests/utils_tests/test_utils.py +226 -0
  138. eventsourcing/utils.py +49 -50
  139. {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/METADATA +30 -33
  140. eventsourcing-9.3.0.dist-info/RECORD +145 -0
  141. {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/WHEEL +1 -2
  142. eventsourcing-9.2.21.dist-info/RECORD +0 -25
  143. eventsourcing-9.2.21.dist-info/top_level.txt +0 -1
  144. {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/AUTHORS +0 -0
  145. {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/LICENSE +0 -0
eventsourcing/postgres.py CHANGED
@@ -1,33 +1,19 @@
1
+ from __future__ import annotations
2
+
3
+ import logging
1
4
  from contextlib import contextmanager
2
- from itertools import chain
3
- from threading import Lock
4
- from types import TracebackType
5
- from typing import (
6
- Any,
7
- Dict,
8
- Iterator,
9
- List,
10
- Optional,
11
- Sequence,
12
- Set,
13
- Tuple,
14
- Type,
15
- Union,
16
- )
17
- from uuid import NAMESPACE_URL, UUID, uuid5
5
+ from typing import TYPE_CHECKING, Any, Callable, Iterator, List, Sequence
18
6
 
19
- import psycopg2
20
- import psycopg2.errors
21
- import psycopg2.extras
22
- from psycopg2.errorcodes import DUPLICATE_PREPARED_STATEMENT
23
- from psycopg2.extensions import connection, cursor
7
+ import psycopg
8
+ import psycopg.errors
9
+ import psycopg_pool
10
+ from psycopg import Connection, Cursor
11
+ from psycopg.rows import DictRow, dict_row
12
+ from typing_extensions import Self
24
13
 
25
14
  from eventsourcing.persistence import (
26
15
  AggregateRecorder,
27
16
  ApplicationRecorder,
28
- Connection,
29
- ConnectionPool,
30
- Cursor,
31
17
  DatabaseError,
32
18
  DataError,
33
19
  InfrastructureFactory,
@@ -43,74 +29,32 @@ from eventsourcing.persistence import (
43
29
  StoredEvent,
44
30
  Tracking,
45
31
  )
46
- from eventsourcing.utils import Environment, retry, strtobool
47
-
48
- psycopg2.extras.register_uuid()
49
-
50
-
51
- class PostgresCursor(Cursor):
52
- def __init__(self, pg_cursor: cursor):
53
- self.pg_cursor = pg_cursor
54
-
55
- def __enter__(self, *args: Any, **kwargs: Any) -> "PostgresCursor":
56
- self.pg_cursor.__enter__(*args, **kwargs)
57
- return self
58
-
59
- def __exit__(self, *args: Any, **kwargs: Any) -> None:
60
- return self.pg_cursor.__exit__(*args, **kwargs)
61
-
62
- def mogrify(self, statement: str, params: Any = None) -> bytes:
63
- return self.pg_cursor.mogrify(statement, vars=params)
64
-
65
- def execute(self, statement: Union[str, bytes], params: Any = None) -> None:
66
- self.pg_cursor.execute(query=statement, vars=params)
67
-
68
- def fetchall(self) -> Any:
69
- return self.pg_cursor.fetchall()
32
+ from eventsourcing.utils import Environment, resolve_topic, retry, strtobool
70
33
 
71
- def fetchone(self) -> Any:
72
- return self.pg_cursor.fetchone()
34
+ if TYPE_CHECKING: # pragma: nocover
35
+ from uuid import UUID
73
36
 
74
- @property
75
- def closed(self) -> bool:
76
- return self.pg_cursor.closed
37
+ logging.getLogger("psycopg.pool").setLevel(logging.CRITICAL)
38
+ logging.getLogger("psycopg").setLevel(logging.CRITICAL)
77
39
 
78
40
 
79
- class PostgresConnection(Connection[PostgresCursor]):
80
- def __init__(self, pg_conn: connection, max_age: Optional[float]):
81
- super().__init__(max_age=max_age)
82
- self._pg_conn = pg_conn
83
- self.is_prepared: Set[str] = set()
84
-
85
- @contextmanager
86
- def transaction(self, commit: bool) -> Iterator[PostgresCursor]:
87
- # Context managed transaction.
88
- with PostgresTransaction(self, commit) as curs:
89
- # Context managed cursor.
90
- with curs:
91
- yield curs
92
-
93
- def cursor(self) -> PostgresCursor:
94
- return PostgresCursor(
95
- self._pg_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
96
- )
97
-
98
- def rollback(self) -> None:
99
- self._pg_conn.rollback()
100
-
101
- def commit(self) -> None:
102
- self._pg_conn.commit()
103
-
104
- def _close(self) -> None:
105
- self._pg_conn.close()
106
- super()._close()
41
+ class ConnectionPool(psycopg_pool.ConnectionPool[Any]):
42
+ def __init__(
43
+ self,
44
+ *args: Any,
45
+ get_password_func: Callable[[], str] | None = None,
46
+ **kwargs: Any,
47
+ ) -> None:
48
+ self.get_password_func = get_password_func
49
+ super().__init__(*args, **kwargs)
107
50
 
108
- @property
109
- def closed(self) -> bool:
110
- return bool(self._pg_conn.closed)
51
+ def _connect(self, timeout: float | None = None) -> Connection[Any]:
52
+ if self.get_password_func:
53
+ self.kwargs["password"] = self.get_password_func()
54
+ return super()._connect(timeout=timeout)
111
55
 
112
56
 
113
- class PostgresConnectionPool(ConnectionPool[PostgresConnection]):
57
+ class PostgresDatastore:
114
58
  def __init__(
115
59
  self,
116
60
  dbname: str,
@@ -118,152 +62,93 @@ class PostgresConnectionPool(ConnectionPool[PostgresConnection]):
118
62
  port: str,
119
63
  user: str,
120
64
  password: str,
121
- connect_timeout: int = 5,
65
+ *,
66
+ connect_timeout: int = 30,
122
67
  idle_in_transaction_session_timeout: int = 0,
123
- pool_size: int = 1,
124
- max_overflow: int = 0,
125
- pool_timeout: float = 5.0,
126
- max_age: Optional[float] = None,
68
+ pool_size: int = 2,
69
+ max_overflow: int = 2,
70
+ max_waiting: int = 0,
71
+ conn_max_age: float = 60 * 60.0,
127
72
  pre_ping: bool = False,
73
+ lock_timeout: int = 0,
74
+ schema: str = "",
75
+ pool_open_timeout: int | None = None,
76
+ get_password_func: Callable[[], str] | None = None,
128
77
  ):
129
- self.dbname = dbname
130
- self.host = host
131
- self.port = port
132
- self.user = user
133
- self.password = password
134
- self.connect_timeout = connect_timeout
135
78
  self.idle_in_transaction_session_timeout = idle_in_transaction_session_timeout
136
- super().__init__(
137
- pool_size=pool_size,
138
- max_overflow=max_overflow,
139
- pool_timeout=pool_timeout,
140
- max_age=max_age,
141
- pre_ping=pre_ping,
142
- mutually_exclusive_read_write=False,
79
+ self.pre_ping = pre_ping
80
+ self.pool_open_timeout = pool_open_timeout
81
+
82
+ check = ConnectionPool.check_connection if pre_ping else None
83
+ self.pool = ConnectionPool(
84
+ get_password_func=get_password_func,
85
+ connection_class=Connection[DictRow],
86
+ kwargs={
87
+ "dbname": dbname,
88
+ "host": host,
89
+ "port": port,
90
+ "user": user,
91
+ "password": password,
92
+ "row_factory": dict_row,
93
+ },
94
+ min_size=pool_size,
95
+ max_size=pool_size + max_overflow,
96
+ open=False,
97
+ configure=self.after_connect,
98
+ timeout=connect_timeout,
99
+ max_waiting=max_waiting,
100
+ max_lifetime=conn_max_age,
101
+ check=check,
143
102
  )
103
+ self.lock_timeout = lock_timeout
104
+ self.schema = schema.strip()
144
105
 
145
- def _create_connection(self) -> PostgresConnection:
146
- # Make a connection to a database.
147
- try:
148
- pg_conn = psycopg2.connect(
149
- dbname=self.dbname,
150
- host=self.host,
151
- port=self.port,
152
- user=self.user,
153
- password=self.password,
154
- connect_timeout=self.connect_timeout,
155
- )
156
- except psycopg2.OperationalError as e:
157
- raise OperationalError(e) from e
158
- pg_conn.cursor().execute(
159
- f"SET idle_in_transaction_session_timeout = "
106
+ def after_connect(self, conn: Connection[DictRow]) -> None:
107
+ conn.autocommit = True
108
+ conn.cursor().execute(
109
+ "SET idle_in_transaction_session_timeout = "
160
110
  f"'{self.idle_in_transaction_session_timeout}s'"
161
111
  )
162
- return PostgresConnection(pg_conn, max_age=self.max_age)
163
-
164
112
 
165
- class PostgresTransaction:
166
- def __init__(self, conn: PostgresConnection, commit: bool):
167
- self.conn = conn
168
- self.commit = commit
169
- self.has_entered = False
170
-
171
- def __enter__(self) -> PostgresCursor:
172
- self.has_entered = True
173
- return self.conn.cursor()
174
-
175
- def __exit__(
176
- self,
177
- exc_type: Type[BaseException],
178
- exc_val: BaseException,
179
- exc_tb: TracebackType,
180
- ) -> None:
113
+ @contextmanager
114
+ def get_connection(self) -> Iterator[Connection[DictRow]]:
181
115
  try:
182
- if exc_val:
183
- self.conn.rollback()
184
- raise exc_val
185
- elif not self.commit:
186
- self.conn.rollback()
187
- else:
188
- self.conn.commit()
189
- except psycopg2.InterfaceError as e:
190
- self.conn.close()
116
+ wait = self.pool_open_timeout is not None
117
+ timeout = self.pool_open_timeout or 30.0
118
+ self.pool.open(wait, timeout)
119
+
120
+ with self.pool.connection() as conn:
121
+ yield conn
122
+ except psycopg.InterfaceError as e:
123
+ # conn.close()
191
124
  raise InterfaceError(str(e)) from e
192
- except psycopg2.DataError as e:
193
- raise DataError(str(e)) from e
194
- except psycopg2.OperationalError as e:
195
- self.conn.close()
125
+ except psycopg.OperationalError as e:
126
+ # conn.close()
196
127
  raise OperationalError(str(e)) from e
197
- except psycopg2.IntegrityError as e:
128
+ except psycopg.DataError as e:
129
+ raise DataError(str(e)) from e
130
+ except psycopg.IntegrityError as e:
198
131
  raise IntegrityError(str(e)) from e
199
- except psycopg2.InternalError as e:
132
+ except psycopg.InternalError as e:
200
133
  raise InternalError(str(e)) from e
201
- except psycopg2.ProgrammingError as e:
134
+ except psycopg.ProgrammingError as e:
202
135
  raise ProgrammingError(str(e)) from e
203
- except psycopg2.NotSupportedError as e:
136
+ except psycopg.NotSupportedError as e:
204
137
  raise NotSupportedError(str(e)) from e
205
- except psycopg2.DatabaseError as e:
138
+ except psycopg.DatabaseError as e:
206
139
  raise DatabaseError(str(e)) from e
207
- except psycopg2.Error as e:
140
+ except psycopg.Error as e:
141
+ # conn.close()
208
142
  raise PersistenceError(str(e)) from e
209
-
210
-
211
- class PostgresDatastore:
212
- def __init__(
213
- self,
214
- dbname: str,
215
- host: str,
216
- port: str,
217
- user: str,
218
- password: str,
219
- connect_timeout: int = 5,
220
- idle_in_transaction_session_timeout: int = 0,
221
- pool_size: int = 2,
222
- max_overflow: int = 2,
223
- pool_timeout: float = 5.0,
224
- conn_max_age: Optional[float] = None,
225
- pre_ping: bool = False,
226
- lock_timeout: int = 0,
227
- schema: str = "",
228
- ):
229
- self.pool = PostgresConnectionPool(
230
- dbname=dbname,
231
- host=host,
232
- port=port,
233
- user=user,
234
- password=password,
235
- connect_timeout=connect_timeout,
236
- idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
237
- pool_size=pool_size,
238
- max_overflow=max_overflow,
239
- pool_timeout=pool_timeout,
240
- max_age=conn_max_age,
241
- pre_ping=pre_ping,
242
- )
243
- self.lock_timeout = lock_timeout
244
- self.schema = schema.strip()
143
+ except Exception:
144
+ # conn.close()
145
+ raise
245
146
 
246
147
  @contextmanager
247
- def transaction(self, commit: bool) -> Iterator[PostgresCursor]:
248
- with self.get_connection() as conn:
249
- with conn.transaction(commit) as curs:
250
- yield curs
251
-
252
- @contextmanager
253
- def get_connection(self) -> Iterator[PostgresConnection]:
254
- conn = self.pool.get_connection()
255
- try:
256
- yield conn
257
- finally:
258
- self.pool.put_connection(conn)
259
-
260
- def report_on_prepared_statements(
261
- self,
262
- ) -> Tuple[List[List[Union[bool, str]]], List[str]]:
263
- with self.get_connection() as conn:
264
- with conn.cursor() as curs:
265
- curs.execute("SELECT * from pg_prepared_statements")
266
- return sorted(curs.fetchall()), sorted(conn.is_prepared)
148
+ def transaction(self, *, commit: bool = False) -> Iterator[Cursor[DictRow]]:
149
+ conn: Connection[DictRow]
150
+ with self.get_connection() as conn, conn.transaction(force_rollback=not commit):
151
+ yield conn.cursor()
267
152
 
268
153
  def close(self) -> None:
269
154
  self.pool.close()
@@ -271,8 +156,11 @@ class PostgresDatastore:
271
156
  def __del__(self) -> None:
272
157
  self.close()
273
158
 
159
+ def __enter__(self) -> Self:
160
+ return self
274
161
 
275
- PG_IDENTIFIER_MAX_LEN = 63
162
+ def __exit__(self, *args: object, **kwargs: Any) -> None:
163
+ self.close()
276
164
 
277
165
 
278
166
  class PostgresAggregateRecorder(AggregateRecorder):
@@ -281,8 +169,6 @@ class PostgresAggregateRecorder(AggregateRecorder):
281
169
  datastore: PostgresDatastore,
282
170
  events_table_name: str,
283
171
  ):
284
- self.statement_name_aliases: Dict[str, str] = {}
285
- self.statement_name_aliases_lock = Lock()
286
172
  self.check_table_name_length(events_table_name, datastore.schema)
287
173
  self.datastore = datastore
288
174
  self.events_table_name = events_table_name
@@ -298,15 +184,12 @@ class PostgresAggregateRecorder(AggregateRecorder):
298
184
 
299
185
  self.create_table_statements = self.construct_create_table_statements()
300
186
  self.insert_events_statement = (
301
- f"INSERT INTO {self.events_table_name} VALUES ($1, $2, $3, $4)"
302
- )
303
- self.insert_events_statement_name = f"insert_{events_table_name}".replace(
304
- ".", "_"
187
+ f"INSERT INTO {self.events_table_name} VALUES (%s, %s, %s, %s)"
305
188
  )
306
189
  self.select_events_statement = (
307
- f"SELECT * FROM {self.events_table_name} WHERE originator_id = $1"
190
+ f"SELECT * FROM {self.events_table_name} WHERE originator_id = %s"
308
191
  )
309
- self.lock_statements: List[str] = []
192
+ self.lock_table_statements: List[str] = []
310
193
 
311
194
  @staticmethod
312
195
  def check_table_name_length(table_name: str, schema_name: str) -> None:
@@ -316,47 +199,8 @@ class PostgresAggregateRecorder(AggregateRecorder):
316
199
  else:
317
200
  unqualified_table_name = table_name
318
201
  if len(unqualified_table_name) > 63:
319
- raise ProgrammingError(f"Table name too long: {unqualified_table_name}")
320
-
321
- def get_statement_alias(self, statement_name: str) -> str:
322
- try:
323
- alias = self.statement_name_aliases[statement_name]
324
- except KeyError:
325
- with self.statement_name_aliases_lock:
326
- try:
327
- alias = self.statement_name_aliases[statement_name]
328
- except KeyError:
329
- existing_aliases = self.statement_name_aliases.values()
330
- if (
331
- len(statement_name) <= PG_IDENTIFIER_MAX_LEN
332
- and statement_name not in existing_aliases
333
- ):
334
- alias = statement_name
335
- self.statement_name_aliases[statement_name] = alias
336
- else:
337
- uid = uuid5(
338
- NAMESPACE_URL, f"/statement_names/{statement_name}"
339
- ).hex
340
- alias = uid
341
- for i in range(len(uid)): # pragma: no cover
342
- preserve_end = 21
343
- preserve_start = (
344
- PG_IDENTIFIER_MAX_LEN - preserve_end - i - 2
345
- )
346
- uuid5_tail = i
347
- candidate = (
348
- statement_name[:preserve_start]
349
- + "_"
350
- + (uid[-uuid5_tail:] if i else "")
351
- + "_"
352
- + statement_name[-preserve_end:]
353
- )
354
- assert len(alias) <= PG_IDENTIFIER_MAX_LEN
355
- if candidate not in existing_aliases:
356
- alias = candidate
357
- break
358
- self.statement_name_aliases[statement_name] = alias
359
- return alias
202
+ msg = f"Table name too long: {unqualified_table_name}"
203
+ raise ProgrammingError(msg)
360
204
 
361
205
  def construct_create_table_statements(self) -> List[str]:
362
206
  statement = (
@@ -375,155 +219,121 @@ class PostgresAggregateRecorder(AggregateRecorder):
375
219
  def create_table(self) -> None:
376
220
  with self.datastore.transaction(commit=True) as curs:
377
221
  for statement in self.create_table_statements:
378
- curs.execute(statement)
379
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
222
+ curs.execute(statement, prepare=False)
380
223
 
381
224
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
382
225
  def insert_events(
383
226
  self, stored_events: List[StoredEvent], **kwargs: Any
384
- ) -> Optional[Sequence[int]]:
227
+ ) -> Sequence[int] | None:
228
+ conn: Connection[DictRow]
229
+ exc: Exception | None = None
230
+ notification_ids: Sequence[int] | None = None
385
231
  with self.datastore.get_connection() as conn:
386
- self._prepare_insert_events(conn)
387
- with conn.transaction(commit=True) as curs:
388
- return self._insert_events(curs, stored_events, **kwargs)
389
-
390
- def _prepare_insert_events(self, conn: PostgresConnection) -> None:
391
- self._prepare(
392
- conn,
393
- self.insert_events_statement_name,
394
- self.insert_events_statement,
395
- )
396
-
397
- def _prepare(
398
- self, conn: PostgresConnection, statement_name: str, statement: str
399
- ) -> str:
400
- statement_name_alias = self.get_statement_alias(statement_name)
401
- if statement_name not in conn.is_prepared:
402
- curs: PostgresCursor
403
- with conn.transaction(commit=True) as curs:
404
- try:
405
- lock_timeout = self.datastore.lock_timeout
406
- curs.execute(f"SET LOCAL lock_timeout = '{lock_timeout}s'")
407
- curs.execute(f"PREPARE {statement_name_alias} AS " + statement)
408
- except psycopg2.errors.lookup(DUPLICATE_PREPARED_STATEMENT): # noqa
409
- pass
410
- conn.is_prepared.add(statement_name)
411
- return statement_name_alias
232
+ with conn.pipeline() as pipeline, conn.transaction():
233
+ # Do other things first, so they can be pipelined too.
234
+ with conn.cursor() as curs:
235
+ self._insert_events(curs, stored_events, **kwargs)
236
+ # Then use a different cursor for the executemany() call.
237
+ with conn.cursor() as curs:
238
+ try:
239
+ self._insert_stored_events(curs, stored_events, **kwargs)
240
+ # Sync now, so any uniqueness constraint violation causes an
241
+ # IntegrityError to be raised here, rather an InternalError
242
+ # being raised sometime later e.g. when commit() is called.
243
+ pipeline.sync()
244
+ notification_ids = self._fetch_ids_after_insert_events(
245
+ curs, stored_events, **kwargs
246
+ )
247
+ except Exception as e:
248
+ # Avoid psycopg emitting a pipeline warning.
249
+ exc = e
250
+ if exc:
251
+ # Reraise exception after pipeline context manager has exited.
252
+ raise exc
253
+ return notification_ids
412
254
 
413
255
  def _insert_events(
414
256
  self,
415
- c: PostgresCursor,
257
+ c: Cursor[DictRow],
416
258
  stored_events: List[StoredEvent],
417
259
  **kwargs: Any,
418
- ) -> Optional[Sequence[int]]:
419
- # Acquire "EXCLUSIVE" table lock, to serialize inserts so that
420
- # insertion of notification IDs is monotonic for notification log
421
- # readers. We want concurrent transactions to commit inserted
422
- # notification_id values in order, and by locking the table for writes,
423
- # it can be guaranteed. The EXCLUSIVE lock mode does not block
424
- # the ACCESS SHARE lock which is acquired during SELECT statements,
425
- # so the table can be read concurrently. However INSERT normally
426
- # just acquires ROW EXCLUSIVE locks, which risks interleaving of
427
- # many inserts in one transaction with many insert in another
428
- # transaction. Since one transaction will commit before another,
429
- # the possibility arises for readers that are tailing a notification
430
- # log to miss items inserted later but with lower notification IDs.
431
- # https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
432
- # https://www.postgresql.org/docs/9.1/sql-lock.html
433
- # https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
434
- # -postgresql-serial-column-values-by-commit-order
435
-
436
- len_stored_events = len(stored_events)
260
+ ) -> None:
261
+ pass
437
262
 
263
+ def _insert_stored_events(
264
+ self,
265
+ c: Cursor[DictRow],
266
+ stored_events: List[StoredEvent],
267
+ **_: Any,
268
+ ) -> None:
438
269
  # Only do something if there is something to do.
439
- if len_stored_events > 0:
440
- # Mogrify the table lock statements.
441
- lock_sqls = (c.mogrify(s) for s in self.lock_statements)
442
-
443
- # Prepare the commands before getting the table lock.
444
- alias = self.statement_name_aliases[self.insert_events_statement_name]
445
- page_size = 500
446
- pages = [
447
- (
448
- c.mogrify(
449
- f"EXECUTE {alias}(%s, %s, %s, %s)",
450
- (
451
- stored_event.originator_id,
452
- stored_event.originator_version,
453
- stored_event.topic,
454
- stored_event.state,
455
- ),
270
+ if len(stored_events) > 0:
271
+ self._lock_table(c)
272
+
273
+ # Insert events.
274
+ c.executemany(
275
+ query=self.insert_events_statement,
276
+ params_seq=[
277
+ (
278
+ stored_event.originator_id,
279
+ stored_event.originator_version,
280
+ stored_event.topic,
281
+ stored_event.state,
456
282
  )
457
- for stored_event in page
458
- )
459
- for page in (
460
- stored_events[ndx : min(ndx + page_size, len_stored_events)]
461
- for ndx in range(0, len_stored_events, page_size)
462
- )
463
- ]
464
- commands = [
465
- b"; ".join(page)
466
- for page in chain([chain(lock_sqls, pages[0])], pages[1:])
467
- ]
283
+ for stored_event in stored_events
284
+ ],
285
+ returning="RETURNING" in self.insert_events_statement,
286
+ )
468
287
 
469
- # Execute the commands.
470
- for command in commands:
471
- c.execute(command)
288
+ def _lock_table(self, c: Cursor[DictRow]) -> None:
289
+ pass
290
+
291
+ def _fetch_ids_after_insert_events(
292
+ self,
293
+ c: Cursor[DictRow],
294
+ stored_events: List[StoredEvent],
295
+ **kwargs: Any,
296
+ ) -> Sequence[int] | None:
472
297
  return None
473
298
 
474
299
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
475
300
  def select_events(
476
301
  self,
477
302
  originator_id: UUID,
478
- gt: Optional[int] = None,
479
- lte: Optional[int] = None,
303
+ *,
304
+ gt: int | None = None,
305
+ lte: int | None = None,
480
306
  desc: bool = False,
481
- limit: Optional[int] = None,
307
+ limit: int | None = None,
482
308
  ) -> List[StoredEvent]:
483
- parts = [self.select_events_statement]
309
+ statement = self.select_events_statement
484
310
  params: List[Any] = [originator_id]
485
- statement_name = f"select_{self.events_table_name}".replace(".", "_")
486
311
  if gt is not None:
487
312
  params.append(gt)
488
- parts.append(f"AND originator_version > ${len(params)}")
489
- statement_name += "_gt"
313
+ statement += " AND originator_version > %s"
490
314
  if lte is not None:
491
315
  params.append(lte)
492
- parts.append(f"AND originator_version <= ${len(params)}")
493
- statement_name += "_lte"
494
- parts.append("ORDER BY originator_version")
316
+ statement += " AND originator_version <= %s"
317
+ statement += " ORDER BY originator_version"
495
318
  if desc is False:
496
- parts.append("ASC")
319
+ statement += " ASC"
497
320
  else:
498
- parts.append("DESC")
499
- statement_name += "_desc"
321
+ statement += " DESC"
500
322
  if limit is not None:
501
323
  params.append(limit)
502
- parts.append(f"LIMIT ${len(params)}")
503
- statement_name += "_limit"
504
- statement = " ".join(parts)
505
-
506
- stored_events = []
507
-
508
- with self.datastore.get_connection() as conn:
509
- alias = self._prepare(conn, statement_name, statement)
510
-
511
- with conn.transaction(commit=False) as curs:
512
- curs.execute(
513
- f"EXECUTE {alias}({', '.join(['%s' for _ in params])})",
514
- params,
324
+ statement += " LIMIT %s"
325
+
326
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
327
+ curs.execute(statement, params, prepare=True)
328
+ return [
329
+ StoredEvent(
330
+ originator_id=row["originator_id"],
331
+ originator_version=row["originator_version"],
332
+ topic=row["topic"],
333
+ state=bytes(row["state"]),
515
334
  )
516
- for row in curs.fetchall():
517
- stored_events.append(
518
- StoredEvent(
519
- originator_id=row["originator_id"],
520
- originator_version=row["originator_version"],
521
- topic=row["topic"],
522
- state=bytes(row["state"]),
523
- )
524
- )
525
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
526
- return stored_events
335
+ for row in curs.fetchall()
336
+ ]
527
337
 
528
338
 
529
339
  class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder):
@@ -533,45 +343,42 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
533
343
  events_table_name: str = "stored_events",
534
344
  ):
535
345
  super().__init__(datastore, events_table_name)
536
- self.insert_events_statement = (
537
- f"INSERT INTO {self.events_table_name} VALUES ($1, $2, $3, $4) "
538
- f"RETURNING notification_id"
539
- )
346
+ self.insert_events_statement += " RETURNING notification_id"
540
347
  self.max_notification_id_statement = (
541
348
  f"SELECT MAX(notification_id) FROM {self.events_table_name}"
542
349
  )
543
- self.max_notification_id_statement_name = (
544
- f"max_notification_id_{events_table_name}".replace(".", "_")
545
- )
546
- self.lock_statements = [
350
+ self.lock_table_statements = [
547
351
  f"SET LOCAL lock_timeout = '{self.datastore.lock_timeout}s'",
548
352
  f"LOCK TABLE {self.events_table_name} IN EXCLUSIVE MODE",
549
353
  ]
550
354
 
551
355
  def construct_create_table_statements(self) -> List[str]:
552
- statements = [
553
- "CREATE TABLE IF NOT EXISTS "
554
- f"{self.events_table_name} ("
555
- "originator_id uuid NOT NULL, "
556
- "originator_version bigint NOT NULL, "
557
- "topic text, "
558
- "state bytea, "
559
- "notification_id bigserial, "
560
- "PRIMARY KEY "
561
- "(originator_id, originator_version)) "
562
- "WITH (autovacuum_enabled=false)",
563
- f"CREATE UNIQUE INDEX IF NOT EXISTS "
564
- f"{self.notification_id_index_name}"
565
- f"ON {self.events_table_name} (notification_id ASC);",
356
+ return [
357
+ (
358
+ "CREATE TABLE IF NOT EXISTS "
359
+ f"{self.events_table_name} ("
360
+ "originator_id uuid NOT NULL, "
361
+ "originator_version bigint NOT NULL, "
362
+ "topic text, "
363
+ "state bytea, "
364
+ "notification_id bigserial, "
365
+ "PRIMARY KEY "
366
+ "(originator_id, originator_version)) "
367
+ "WITH (autovacuum_enabled=false)"
368
+ ),
369
+ (
370
+ "CREATE UNIQUE INDEX IF NOT EXISTS "
371
+ f"{self.notification_id_index_name}"
372
+ f"ON {self.events_table_name} (notification_id ASC);"
373
+ ),
566
374
  ]
567
- return statements
568
375
 
569
376
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
570
377
  def select_notifications(
571
378
  self,
572
379
  start: int,
573
380
  limit: int,
574
- stop: Optional[int] = None,
381
+ stop: int | None = None,
575
382
  topics: Sequence[str] = (),
576
383
  ) -> List[Notification]:
577
384
  """
@@ -579,86 +386,91 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
579
386
  from 'start', limited by 'limit'.
580
387
  """
581
388
 
582
- params: List[Union[int, str, Sequence[str]]] = [start]
583
- statement = (
584
- "SELECT * " f"FROM {self.events_table_name} " "WHERE notification_id>=$1 "
585
- )
586
- statement_name = f"select_notifications_{self.events_table_name}".replace(
587
- ".", "_"
588
- )
389
+ params: List[int | str | Sequence[str]] = [start]
390
+ statement = f"SELECT * FROM {self.events_table_name} WHERE notification_id>=%s"
589
391
 
590
392
  if stop is not None:
591
393
  params.append(stop)
592
- statement += f"AND notification_id <= ${len(params)} "
593
- statement_name += "_stop"
394
+ statement += " AND notification_id <= %s"
594
395
 
595
396
  if topics:
596
397
  params.append(topics)
597
- statement += f"AND topic = ANY(${len(params)}) "
598
- statement_name += "_topics"
398
+ statement += " AND topic = ANY(%s)"
599
399
 
600
400
  params.append(limit)
601
- statement += "ORDER BY notification_id " f"LIMIT ${len(params)}"
602
-
603
- notifications = []
604
- with self.datastore.get_connection() as conn:
605
- alias = self._prepare(
606
- conn,
607
- statement_name,
608
- statement,
609
- )
610
- with conn.transaction(commit=False) as curs:
611
- curs.execute(
612
- f"EXECUTE {alias}({', '.join(['%s' for _ in params])})",
613
- params,
401
+ statement += " ORDER BY notification_id LIMIT %s"
402
+
403
+ connection = self.datastore.get_connection()
404
+ with connection as conn, conn.cursor() as curs:
405
+ curs.execute(statement, params, prepare=True)
406
+ return [
407
+ Notification(
408
+ id=row["notification_id"],
409
+ originator_id=row["originator_id"],
410
+ originator_version=row["originator_version"],
411
+ topic=row["topic"],
412
+ state=bytes(row["state"]),
614
413
  )
615
- for row in curs.fetchall():
616
- notifications.append(
617
- Notification(
618
- id=row["notification_id"],
619
- originator_id=row["originator_id"],
620
- originator_version=row["originator_version"],
621
- topic=row["topic"],
622
- state=bytes(row["state"]),
623
- )
624
- )
625
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
626
- return notifications
414
+ for row in curs.fetchall()
415
+ ]
627
416
 
628
417
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
629
418
  def max_notification_id(self) -> int:
630
419
  """
631
420
  Returns the maximum notification ID.
632
421
  """
633
- statement_name = self.max_notification_id_statement_name
634
- with self.datastore.get_connection() as conn:
635
- statement_alias = self._prepare(
636
- conn, statement_name, self.max_notification_id_statement
637
- )
638
- with conn.transaction(commit=False) as curs:
639
- curs.execute(
640
- f"EXECUTE {statement_alias}",
641
- )
642
- max_id = curs.fetchone()[0] or 0
643
- return max_id
422
+ conn: Connection[DictRow]
423
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
424
+ curs.execute(self.max_notification_id_statement)
425
+ fetchone = curs.fetchone()
426
+ assert fetchone is not None
427
+ return fetchone["max"] or 0
428
+
429
+ def _lock_table(self, c: Cursor[DictRow]) -> None:
430
+ # Acquire "EXCLUSIVE" table lock, to serialize transactions that insert
431
+ # stored events, so that readers don't pass over gaps that are filled in
432
+ # later. We want each transaction that will be issued with notifications
433
+ # IDs by the notification ID sequence to receive all its notification IDs
434
+ # and then commit, before another transaction is issued with any notification
435
+ # IDs. In other words, we want the insert order to be the same as the commit
436
+ # order. We can accomplish this by locking the table for writes. The
437
+ # EXCLUSIVE lock mode does not block SELECT statements, which acquire an
438
+ # ACCESS SHARE lock, so the stored events table can be read concurrently
439
+ # with writes and other reads. However, INSERT statements normally just
440
+ # acquires ROW EXCLUSIVE locks, which risks the interleaving (within the
441
+ # recorded sequence of notification IDs) of stored events from one transaction
442
+ # with those of another transaction. And since one transaction will always
443
+ # commit before another, the possibility arises when using ROW EXCLUSIVE locks
444
+ # for readers that are tailing a notification log to miss items inserted later
445
+ # but issued with lower notification IDs.
446
+ # https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
447
+ # https://www.postgresql.org/docs/9.1/sql-lock.html
448
+ # https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
449
+ # -postgresql-serial-column-values-by-commit-order
450
+ for lock_statement in self.lock_table_statements:
451
+ c.execute(lock_statement, prepare=True)
644
452
 
645
- def _insert_events(
453
+ def _fetch_ids_after_insert_events(
646
454
  self,
647
- c: PostgresCursor,
455
+ c: Cursor[DictRow],
648
456
  stored_events: List[StoredEvent],
649
457
  **kwargs: Any,
650
- ) -> Optional[Sequence[int]]:
651
- super()._insert_events(c, stored_events, **kwargs)
652
- if stored_events:
653
- last_notification_id = c.fetchone()[0]
654
- notification_ids = list(
655
- range(
656
- last_notification_id - len(stored_events) + 1,
657
- last_notification_id + 1,
658
- )
659
- )
660
- else:
661
- notification_ids = []
458
+ ) -> Sequence[int] | None:
459
+ notification_ids: List[int] = []
460
+ len_events = len(stored_events)
461
+ if len_events:
462
+ if (
463
+ (c.statusmessage == "SET")
464
+ and c.nextset()
465
+ and (c.statusmessage == "LOCK TABLE")
466
+ ):
467
+ while c.nextset() and len(notification_ids) != len_events:
468
+ row = c.fetchone()
469
+ assert row is not None
470
+ notification_ids.append(row["notification_id"])
471
+ if len(notification_ids) != len(stored_events):
472
+ msg = "Couldn't get all notification IDs"
473
+ raise ProgrammingError(msg)
662
474
  return notification_ids
663
475
 
664
476
 
@@ -673,26 +485,17 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
673
485
  self.tracking_table_name = tracking_table_name
674
486
  super().__init__(datastore, events_table_name)
675
487
  self.insert_tracking_statement = (
676
- f"INSERT INTO {self.tracking_table_name} VALUES ($1, $2)"
677
- )
678
- self.insert_tracking_statement_name = f"insert_{tracking_table_name}".replace(
679
- ".", "_"
488
+ f"INSERT INTO {self.tracking_table_name} VALUES (%s, %s)"
680
489
  )
681
490
  self.max_tracking_id_statement = (
682
491
  "SELECT MAX(notification_id) "
683
492
  f"FROM {self.tracking_table_name} "
684
- "WHERE application_name=$1"
493
+ "WHERE application_name=%s"
685
494
  )
686
495
  self.count_tracking_id_statement = (
687
496
  "SELECT COUNT(*) "
688
497
  f"FROM {self.tracking_table_name} "
689
- "WHERE application_name=$1 AND notification_id=$2"
690
- )
691
- self.max_tracking_id_statement_name = (
692
- f"max_tracking_id_{tracking_table_name}".replace(".", "_")
693
- )
694
- self.count_tracking_id_statement_name = (
695
- f"count_tracking_id_{tracking_table_name}".replace(".", "_")
498
+ "WHERE application_name=%s AND notification_id=%s"
696
499
  )
697
500
 
698
501
  def construct_create_table_statements(self) -> List[str]:
@@ -709,61 +512,46 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
709
512
 
710
513
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
711
514
  def max_tracking_id(self, application_name: str) -> int:
712
- statement_name = self.max_tracking_id_statement_name
713
- with self.datastore.get_connection() as conn:
714
- statement_alias = self._prepare(
715
- conn, statement_name, self.max_tracking_id_statement
515
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
516
+ curs.execute(
517
+ query=self.max_tracking_id_statement,
518
+ params=(application_name,),
519
+ prepare=True,
716
520
  )
717
-
718
- with conn.transaction(commit=False) as curs:
719
- curs.execute(
720
- f"EXECUTE {statement_alias}(%s)",
721
- (application_name,),
722
- )
723
- max_id = curs.fetchone()[0] or 0
724
- return max_id
521
+ fetchone = curs.fetchone()
522
+ assert fetchone is not None
523
+ return fetchone["max"] or 0
725
524
 
726
525
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
727
526
  def has_tracking_id(self, application_name: str, notification_id: int) -> bool:
728
- statement_name = self.count_tracking_id_statement_name
729
- with self.datastore.get_connection() as conn:
730
- statement_alias = self._prepare(
731
- conn, statement_name, self.count_tracking_id_statement
527
+ conn: Connection[DictRow]
528
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
529
+ curs.execute(
530
+ query=self.count_tracking_id_statement,
531
+ params=(application_name, notification_id),
532
+ prepare=True,
732
533
  )
733
-
734
- with conn.transaction(commit=False) as curs:
735
- curs.execute(
736
- f"EXECUTE {statement_alias}(%s, %s)",
737
- (application_name, notification_id),
738
- )
739
- return bool(curs.fetchone()[0])
740
-
741
- def _prepare_insert_events(self, conn: PostgresConnection) -> None:
742
- super()._prepare_insert_events(conn)
743
- self._prepare(
744
- conn, self.insert_tracking_statement_name, self.insert_tracking_statement
745
- )
534
+ fetchone = curs.fetchone()
535
+ assert fetchone is not None
536
+ return bool(fetchone["count"])
746
537
 
747
538
  def _insert_events(
748
539
  self,
749
- c: PostgresCursor,
540
+ c: Cursor[DictRow],
750
541
  stored_events: List[StoredEvent],
751
542
  **kwargs: Any,
752
- ) -> Optional[Sequence[int]]:
753
- notification_ids = super()._insert_events(c, stored_events, **kwargs)
754
- tracking: Optional[Tracking] = kwargs.get("tracking", None)
543
+ ) -> None:
544
+ tracking: Tracking | None = kwargs.get("tracking", None)
755
545
  if tracking is not None:
756
- statement_alias = self.statement_name_aliases[
757
- self.insert_tracking_statement_name
758
- ]
759
546
  c.execute(
760
- f"EXECUTE {statement_alias}(%s, %s)",
761
- (
547
+ query=self.insert_tracking_statement,
548
+ params=(
762
549
  tracking.application_name,
763
550
  tracking.notification_id,
764
551
  ),
552
+ prepare=True,
765
553
  )
766
- return notification_ids
554
+ super()._insert_events(c, stored_events, **kwargs)
767
555
 
768
556
 
769
557
  class Factory(InfrastructureFactory):
@@ -771,72 +559,84 @@ class Factory(InfrastructureFactory):
771
559
  POSTGRES_HOST = "POSTGRES_HOST"
772
560
  POSTGRES_PORT = "POSTGRES_PORT"
773
561
  POSTGRES_USER = "POSTGRES_USER"
774
- POSTGRES_PASSWORD = "POSTGRES_PASSWORD"
562
+ POSTGRES_PASSWORD = "POSTGRES_PASSWORD" # noqa: S105
563
+ POSTGRES_GET_PASSWORD_TOPIC = "POSTGRES_GET_PASSWORD_TOPIC" # noqa: S105
775
564
  POSTGRES_CONNECT_TIMEOUT = "POSTGRES_CONNECT_TIMEOUT"
776
565
  POSTGRES_CONN_MAX_AGE = "POSTGRES_CONN_MAX_AGE"
777
566
  POSTGRES_PRE_PING = "POSTGRES_PRE_PING"
778
- POSTGRES_POOL_TIMEOUT = "POSTGRES_POOL_TIMEOUT"
567
+ POSTGRES_MAX_WAITING = "POSTGRES_MAX_WAITING"
779
568
  POSTGRES_LOCK_TIMEOUT = "POSTGRES_LOCK_TIMEOUT"
780
569
  POSTGRES_POOL_SIZE = "POSTGRES_POOL_SIZE"
781
- POSTGRES_POOL_MAX_OVERFLOW = "POSTGRES_POOL_MAX_OVERFLOW"
570
+ POSTGRES_MAX_OVERFLOW = "POSTGRES_MAX_OVERFLOW"
782
571
  POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT = (
783
572
  "POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT"
784
573
  )
785
574
  POSTGRES_SCHEMA = "POSTGRES_SCHEMA"
786
575
  CREATE_TABLE = "CREATE_TABLE"
787
576
 
577
+ aggregate_recorder_class = PostgresAggregateRecorder
578
+ application_recorder_class = PostgresApplicationRecorder
579
+ process_recorder_class = PostgresProcessRecorder
580
+
788
581
  def __init__(self, env: Environment):
789
582
  super().__init__(env)
790
583
  dbname = self.env.get(self.POSTGRES_DBNAME)
791
584
  if dbname is None:
792
- raise EnvironmentError(
585
+ msg = (
793
586
  "Postgres database name not found "
794
587
  "in environment with key "
795
588
  f"'{self.POSTGRES_DBNAME}'"
796
589
  )
590
+ raise OSError(msg)
797
591
 
798
592
  host = self.env.get(self.POSTGRES_HOST)
799
593
  if host is None:
800
- raise EnvironmentError(
594
+ msg = (
801
595
  "Postgres host not found "
802
596
  "in environment with key "
803
597
  f"'{self.POSTGRES_HOST}'"
804
598
  )
599
+ raise OSError(msg)
805
600
 
806
601
  port = self.env.get(self.POSTGRES_PORT) or "5432"
807
602
 
808
603
  user = self.env.get(self.POSTGRES_USER)
809
604
  if user is None:
810
- raise EnvironmentError(
605
+ msg = (
811
606
  "Postgres user not found "
812
607
  "in environment with key "
813
608
  f"'{self.POSTGRES_USER}'"
814
609
  )
610
+ raise OSError(msg)
611
+
612
+ get_password_func = None
613
+ get_password_topic = self.env.get(self.POSTGRES_GET_PASSWORD_TOPIC)
614
+ if not get_password_topic:
615
+ password = self.env.get(self.POSTGRES_PASSWORD)
616
+ if password is None:
617
+ msg = (
618
+ "Postgres password not found "
619
+ "in environment with key "
620
+ f"'{self.POSTGRES_PASSWORD}'"
621
+ )
622
+ raise OSError(msg)
623
+ else:
624
+ get_password_func = resolve_topic(get_password_topic)
625
+ password = ""
815
626
 
816
- password = self.env.get(self.POSTGRES_PASSWORD)
817
- if password is None:
818
- raise EnvironmentError(
819
- "Postgres password not found "
820
- "in environment with key "
821
- f"'{self.POSTGRES_PASSWORD}'"
822
- )
823
-
824
- connect_timeout: Optional[int]
627
+ connect_timeout = 30
825
628
  connect_timeout_str = self.env.get(self.POSTGRES_CONNECT_TIMEOUT)
826
- if connect_timeout_str is None:
827
- connect_timeout = 5
828
- elif connect_timeout_str == "":
829
- connect_timeout = 5
830
- else:
629
+ if connect_timeout_str:
831
630
  try:
832
631
  connect_timeout = int(connect_timeout_str)
833
632
  except ValueError:
834
- raise EnvironmentError(
835
- f"Postgres environment value for key "
633
+ msg = (
634
+ "Postgres environment value for key "
836
635
  f"'{self.POSTGRES_CONNECT_TIMEOUT}' is invalid. "
837
- f"If set, an integer or empty string is expected: "
636
+ "If set, an integer or empty string is expected: "
838
637
  f"'{connect_timeout_str}'"
839
638
  )
639
+ raise OSError(msg) from None
840
640
 
841
641
  idle_in_transaction_session_timeout_str = (
842
642
  self.env.get(self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT) or "5"
@@ -847,80 +647,69 @@ class Factory(InfrastructureFactory):
847
647
  idle_in_transaction_session_timeout_str
848
648
  )
849
649
  except ValueError:
850
- raise EnvironmentError(
851
- f"Postgres environment value for key "
650
+ msg = (
651
+ "Postgres environment value for key "
852
652
  f"'{self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT}' is invalid. "
853
- f"If set, an integer or empty string is expected: "
653
+ "If set, an integer or empty string is expected: "
854
654
  f"'{idle_in_transaction_session_timeout_str}'"
855
655
  )
656
+ raise OSError(msg) from None
856
657
 
857
- pool_size: Optional[int]
658
+ pool_size = 5
858
659
  pool_size_str = self.env.get(self.POSTGRES_POOL_SIZE)
859
- if pool_size_str is None:
860
- pool_size = 5
861
- elif pool_size_str == "":
862
- pool_size = 5
863
- else:
660
+ if pool_size_str:
864
661
  try:
865
662
  pool_size = int(pool_size_str)
866
663
  except ValueError:
867
- raise EnvironmentError(
868
- f"Postgres environment value for key "
664
+ msg = (
665
+ "Postgres environment value for key "
869
666
  f"'{self.POSTGRES_POOL_SIZE}' is invalid. "
870
- f"If set, an integer or empty string is expected: "
667
+ "If set, an integer or empty string is expected: "
871
668
  f"'{pool_size_str}'"
872
669
  )
670
+ raise OSError(msg) from None
873
671
 
874
- pool_max_overflow: Optional[int]
875
- pool_max_overflow_str = self.env.get(self.POSTGRES_POOL_MAX_OVERFLOW)
876
- if pool_max_overflow_str is None:
877
- pool_max_overflow = 10
878
- elif pool_max_overflow_str == "":
879
- pool_max_overflow = 10
880
- else:
672
+ pool_max_overflow = 10
673
+ pool_max_overflow_str = self.env.get(self.POSTGRES_MAX_OVERFLOW)
674
+ if pool_max_overflow_str:
881
675
  try:
882
676
  pool_max_overflow = int(pool_max_overflow_str)
883
677
  except ValueError:
884
- raise EnvironmentError(
885
- f"Postgres environment value for key "
886
- f"'{self.POSTGRES_POOL_MAX_OVERFLOW}' is invalid. "
887
- f"If set, an integer or empty string is expected: "
678
+ msg = (
679
+ "Postgres environment value for key "
680
+ f"'{self.POSTGRES_MAX_OVERFLOW}' is invalid. "
681
+ "If set, an integer or empty string is expected: "
888
682
  f"'{pool_max_overflow_str}'"
889
683
  )
684
+ raise OSError(msg) from None
890
685
 
891
- pool_timeout: Optional[float]
892
- pool_timeout_str = self.env.get(self.POSTGRES_POOL_TIMEOUT)
893
- if pool_timeout_str is None:
894
- pool_timeout = 30
895
- elif pool_timeout_str == "":
896
- pool_timeout = 30
897
- else:
686
+ max_waiting = 0
687
+ max_waiting_str = self.env.get(self.POSTGRES_MAX_WAITING)
688
+ if max_waiting_str:
898
689
  try:
899
- pool_timeout = float(pool_timeout_str)
690
+ max_waiting = int(max_waiting_str)
900
691
  except ValueError:
901
- raise EnvironmentError(
902
- f"Postgres environment value for key "
903
- f"'{self.POSTGRES_POOL_TIMEOUT}' is invalid. "
904
- f"If set, a float or empty string is expected: "
905
- f"'{pool_timeout_str}'"
692
+ msg = (
693
+ "Postgres environment value for key "
694
+ f"'{self.POSTGRES_MAX_WAITING}' is invalid. "
695
+ "If set, an integer or empty string is expected: "
696
+ f"'{max_waiting_str}'"
906
697
  )
698
+ raise OSError(msg) from None
907
699
 
908
- conn_max_age: Optional[float]
700
+ conn_max_age = 60 * 60.0
909
701
  conn_max_age_str = self.env.get(self.POSTGRES_CONN_MAX_AGE)
910
- if conn_max_age_str is None:
911
- conn_max_age = None
912
- elif conn_max_age_str == "":
913
- conn_max_age = None
914
- else:
702
+ if conn_max_age_str:
915
703
  try:
916
704
  conn_max_age = float(conn_max_age_str)
917
705
  except ValueError:
918
- raise EnvironmentError(
919
- f"Postgres environment value for key "
706
+ msg = (
707
+ "Postgres environment value for key "
920
708
  f"'{self.POSTGRES_CONN_MAX_AGE}' is invalid. "
921
- f"If set, a float or empty string is expected: "
709
+ "If set, a float or empty string is expected: "
922
710
  f"'{conn_max_age_str}'"
923
711
  )
712
+ raise OSError(msg) from None
924
713
 
925
714
  pre_ping = strtobool(self.env.get(self.POSTGRES_PRE_PING) or "no")
926
715
 
@@ -929,12 +718,13 @@ class Factory(InfrastructureFactory):
929
718
  try:
930
719
  lock_timeout = int(lock_timeout_str)
931
720
  except ValueError:
932
- raise EnvironmentError(
933
- f"Postgres environment value for key "
721
+ msg = (
722
+ "Postgres environment value for key "
934
723
  f"'{self.POSTGRES_LOCK_TIMEOUT}' is invalid. "
935
- f"If set, an integer or empty string is expected: "
724
+ "If set, an integer or empty string is expected: "
936
725
  f"'{lock_timeout_str}'"
937
726
  )
727
+ raise OSError(msg) from None
938
728
 
939
729
  schema = self.env.get(self.POSTGRES_SCHEMA) or ""
940
730
 
@@ -944,23 +734,27 @@ class Factory(InfrastructureFactory):
944
734
  port=port,
945
735
  user=user,
946
736
  password=password,
737
+ get_password_func=get_password_func,
947
738
  connect_timeout=connect_timeout,
948
739
  idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
949
740
  pool_size=pool_size,
950
741
  max_overflow=pool_max_overflow,
951
- pool_timeout=pool_timeout,
742
+ max_waiting=max_waiting,
952
743
  conn_max_age=conn_max_age,
953
744
  pre_ping=pre_ping,
954
745
  lock_timeout=lock_timeout,
955
746
  schema=schema,
956
747
  )
957
748
 
749
+ def env_create_table(self) -> bool:
750
+ return strtobool(self.env.get(self.CREATE_TABLE) or "yes")
751
+
958
752
  def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
959
753
  prefix = self.env.name.lower() or "stored"
960
754
  events_table_name = prefix + "_" + purpose
961
755
  if self.datastore.schema:
962
756
  events_table_name = f"{self.datastore.schema}.{events_table_name}"
963
- recorder = PostgresAggregateRecorder(
757
+ recorder = type(self).aggregate_recorder_class(
964
758
  datastore=self.datastore,
965
759
  events_table_name=events_table_name,
966
760
  )
@@ -973,7 +767,7 @@ class Factory(InfrastructureFactory):
973
767
  events_table_name = prefix + "_events"
974
768
  if self.datastore.schema:
975
769
  events_table_name = f"{self.datastore.schema}.{events_table_name}"
976
- recorder = PostgresApplicationRecorder(
770
+ recorder = type(self).application_recorder_class(
977
771
  datastore=self.datastore,
978
772
  events_table_name=events_table_name,
979
773
  )
@@ -989,7 +783,7 @@ class Factory(InfrastructureFactory):
989
783
  if self.datastore.schema:
990
784
  events_table_name = f"{self.datastore.schema}.{events_table_name}"
991
785
  tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
992
- recorder = PostgresProcessRecorder(
786
+ recorder = type(self).process_recorder_class(
993
787
  datastore=self.datastore,
994
788
  events_table_name=events_table_name,
995
789
  tracking_table_name=tracking_table_name,
@@ -998,8 +792,9 @@ class Factory(InfrastructureFactory):
998
792
  recorder.create_table()
999
793
  return recorder
1000
794
 
1001
- def env_create_table(self) -> bool:
1002
- return strtobool(self.env.get(self.CREATE_TABLE) or "yes")
1003
-
1004
795
  def close(self) -> None:
1005
- self.datastore.close()
796
+ if hasattr(self, "datastore"):
797
+ self.datastore.close()
798
+
799
+ def __del__(self) -> None:
800
+ self.close()