eventsourcing 9.2.22__py3-none-any.whl → 9.3.0a1__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

Files changed (144) hide show
  1. eventsourcing/__init__.py +1 -1
  2. eventsourcing/application.py +106 -135
  3. eventsourcing/cipher.py +15 -12
  4. eventsourcing/dispatch.py +31 -91
  5. eventsourcing/domain.py +138 -143
  6. eventsourcing/examples/__init__.py +0 -0
  7. eventsourcing/examples/aggregate1/__init__.py +0 -0
  8. eventsourcing/examples/aggregate1/application.py +27 -0
  9. eventsourcing/examples/aggregate1/domainmodel.py +16 -0
  10. eventsourcing/examples/aggregate1/test_application.py +37 -0
  11. eventsourcing/examples/aggregate2/__init__.py +0 -0
  12. eventsourcing/examples/aggregate2/application.py +27 -0
  13. eventsourcing/examples/aggregate2/domainmodel.py +22 -0
  14. eventsourcing/examples/aggregate2/test_application.py +37 -0
  15. eventsourcing/examples/aggregate3/__init__.py +0 -0
  16. eventsourcing/examples/aggregate3/application.py +27 -0
  17. eventsourcing/examples/aggregate3/domainmodel.py +38 -0
  18. eventsourcing/examples/aggregate3/test_application.py +37 -0
  19. eventsourcing/examples/aggregate4/__init__.py +0 -0
  20. eventsourcing/examples/aggregate4/application.py +27 -0
  21. eventsourcing/examples/aggregate4/domainmodel.py +128 -0
  22. eventsourcing/examples/aggregate4/test_application.py +38 -0
  23. eventsourcing/examples/aggregate5/__init__.py +0 -0
  24. eventsourcing/examples/aggregate5/application.py +27 -0
  25. eventsourcing/examples/aggregate5/domainmodel.py +131 -0
  26. eventsourcing/examples/aggregate5/test_application.py +38 -0
  27. eventsourcing/examples/aggregate6/__init__.py +0 -0
  28. eventsourcing/examples/aggregate6/application.py +30 -0
  29. eventsourcing/examples/aggregate6/domainmodel.py +123 -0
  30. eventsourcing/examples/aggregate6/test_application.py +38 -0
  31. eventsourcing/examples/aggregate6a/__init__.py +0 -0
  32. eventsourcing/examples/aggregate6a/application.py +40 -0
  33. eventsourcing/examples/aggregate6a/domainmodel.py +149 -0
  34. eventsourcing/examples/aggregate6a/test_application.py +45 -0
  35. eventsourcing/examples/aggregate7/__init__.py +0 -0
  36. eventsourcing/examples/aggregate7/application.py +48 -0
  37. eventsourcing/examples/aggregate7/domainmodel.py +144 -0
  38. eventsourcing/examples/aggregate7/persistence.py +57 -0
  39. eventsourcing/examples/aggregate7/test_application.py +38 -0
  40. eventsourcing/examples/aggregate7/test_compression_and_encryption.py +45 -0
  41. eventsourcing/examples/aggregate7/test_snapshotting_intervals.py +67 -0
  42. eventsourcing/examples/aggregate7a/__init__.py +0 -0
  43. eventsourcing/examples/aggregate7a/application.py +56 -0
  44. eventsourcing/examples/aggregate7a/domainmodel.py +170 -0
  45. eventsourcing/examples/aggregate7a/test_application.py +46 -0
  46. eventsourcing/examples/aggregate7a/test_compression_and_encryption.py +45 -0
  47. eventsourcing/examples/aggregate8/__init__.py +0 -0
  48. eventsourcing/examples/aggregate8/application.py +47 -0
  49. eventsourcing/examples/aggregate8/domainmodel.py +65 -0
  50. eventsourcing/examples/aggregate8/persistence.py +57 -0
  51. eventsourcing/examples/aggregate8/test_application.py +37 -0
  52. eventsourcing/examples/aggregate8/test_compression_and_encryption.py +44 -0
  53. eventsourcing/examples/aggregate8/test_snapshotting_intervals.py +38 -0
  54. eventsourcing/examples/bankaccounts/__init__.py +0 -0
  55. eventsourcing/examples/bankaccounts/application.py +70 -0
  56. eventsourcing/examples/bankaccounts/domainmodel.py +56 -0
  57. eventsourcing/examples/bankaccounts/test.py +173 -0
  58. eventsourcing/examples/cargoshipping/__init__.py +0 -0
  59. eventsourcing/examples/cargoshipping/application.py +126 -0
  60. eventsourcing/examples/cargoshipping/domainmodel.py +330 -0
  61. eventsourcing/examples/cargoshipping/interface.py +143 -0
  62. eventsourcing/examples/cargoshipping/test.py +231 -0
  63. eventsourcing/examples/contentmanagement/__init__.py +0 -0
  64. eventsourcing/examples/contentmanagement/application.py +118 -0
  65. eventsourcing/examples/contentmanagement/domainmodel.py +69 -0
  66. eventsourcing/examples/contentmanagement/test.py +180 -0
  67. eventsourcing/examples/contentmanagement/utils.py +26 -0
  68. eventsourcing/examples/contentmanagementsystem/__init__.py +0 -0
  69. eventsourcing/examples/contentmanagementsystem/application.py +54 -0
  70. eventsourcing/examples/contentmanagementsystem/postgres.py +17 -0
  71. eventsourcing/examples/contentmanagementsystem/sqlite.py +17 -0
  72. eventsourcing/examples/contentmanagementsystem/system.py +14 -0
  73. eventsourcing/examples/contentmanagementsystem/test_system.py +174 -0
  74. eventsourcing/examples/searchablecontent/__init__.py +0 -0
  75. eventsourcing/examples/searchablecontent/application.py +45 -0
  76. eventsourcing/examples/searchablecontent/persistence.py +23 -0
  77. eventsourcing/examples/searchablecontent/postgres.py +118 -0
  78. eventsourcing/examples/searchablecontent/sqlite.py +136 -0
  79. eventsourcing/examples/searchablecontent/test_application.py +111 -0
  80. eventsourcing/examples/searchablecontent/test_recorder.py +69 -0
  81. eventsourcing/examples/searchabletimestamps/__init__.py +0 -0
  82. eventsourcing/examples/searchabletimestamps/application.py +32 -0
  83. eventsourcing/examples/searchabletimestamps/persistence.py +20 -0
  84. eventsourcing/examples/searchabletimestamps/postgres.py +110 -0
  85. eventsourcing/examples/searchabletimestamps/sqlite.py +99 -0
  86. eventsourcing/examples/searchabletimestamps/test_searchabletimestamps.py +91 -0
  87. eventsourcing/examples/test_invoice.py +176 -0
  88. eventsourcing/examples/test_parking_lot.py +206 -0
  89. eventsourcing/interface.py +2 -2
  90. eventsourcing/persistence.py +85 -81
  91. eventsourcing/popo.py +30 -31
  92. eventsourcing/postgres.py +361 -578
  93. eventsourcing/sqlite.py +91 -99
  94. eventsourcing/system.py +42 -57
  95. eventsourcing/tests/application.py +20 -32
  96. eventsourcing/tests/application_tests/__init__.py +0 -0
  97. eventsourcing/tests/application_tests/test_application_with_automatic_snapshotting.py +55 -0
  98. eventsourcing/tests/application_tests/test_application_with_popo.py +22 -0
  99. eventsourcing/tests/application_tests/test_application_with_postgres.py +75 -0
  100. eventsourcing/tests/application_tests/test_application_with_sqlite.py +72 -0
  101. eventsourcing/tests/application_tests/test_cache.py +134 -0
  102. eventsourcing/tests/application_tests/test_event_sourced_log.py +162 -0
  103. eventsourcing/tests/application_tests/test_notificationlog.py +232 -0
  104. eventsourcing/tests/application_tests/test_notificationlogreader.py +126 -0
  105. eventsourcing/tests/application_tests/test_processapplication.py +110 -0
  106. eventsourcing/tests/application_tests/test_processingpolicy.py +109 -0
  107. eventsourcing/tests/application_tests/test_repository.py +504 -0
  108. eventsourcing/tests/application_tests/test_snapshotting.py +68 -0
  109. eventsourcing/tests/application_tests/test_upcasting.py +459 -0
  110. eventsourcing/tests/docs_tests/__init__.py +0 -0
  111. eventsourcing/tests/docs_tests/test_docs.py +293 -0
  112. eventsourcing/tests/domain.py +1 -1
  113. eventsourcing/tests/domain_tests/__init__.py +0 -0
  114. eventsourcing/tests/domain_tests/test_aggregate.py +1159 -0
  115. eventsourcing/tests/domain_tests/test_aggregate_decorators.py +1604 -0
  116. eventsourcing/tests/domain_tests/test_domainevent.py +80 -0
  117. eventsourcing/tests/interface_tests/__init__.py +0 -0
  118. eventsourcing/tests/interface_tests/test_remotenotificationlog.py +258 -0
  119. eventsourcing/tests/persistence.py +49 -50
  120. eventsourcing/tests/persistence_tests/__init__.py +0 -0
  121. eventsourcing/tests/persistence_tests/test_aes.py +93 -0
  122. eventsourcing/tests/persistence_tests/test_connection_pool.py +722 -0
  123. eventsourcing/tests/persistence_tests/test_eventstore.py +72 -0
  124. eventsourcing/tests/persistence_tests/test_infrastructure_factory.py +21 -0
  125. eventsourcing/tests/persistence_tests/test_mapper.py +113 -0
  126. eventsourcing/tests/persistence_tests/test_noninterleaving_notification_ids.py +69 -0
  127. eventsourcing/tests/persistence_tests/test_popo.py +124 -0
  128. eventsourcing/tests/persistence_tests/test_postgres.py +1121 -0
  129. eventsourcing/tests/persistence_tests/test_sqlite.py +348 -0
  130. eventsourcing/tests/persistence_tests/test_transcoder.py +44 -0
  131. eventsourcing/tests/postgres_utils.py +7 -7
  132. eventsourcing/tests/system_tests/__init__.py +0 -0
  133. eventsourcing/tests/system_tests/test_runner.py +935 -0
  134. eventsourcing/tests/system_tests/test_system.py +287 -0
  135. eventsourcing/tests/utils_tests/__init__.py +0 -0
  136. eventsourcing/tests/utils_tests/test_utils.py +226 -0
  137. eventsourcing/utils.py +47 -50
  138. {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0a1.dist-info}/METADATA +28 -80
  139. eventsourcing-9.3.0a1.dist-info/RECORD +144 -0
  140. {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0a1.dist-info}/WHEEL +1 -2
  141. eventsourcing-9.2.22.dist-info/AUTHORS +0 -10
  142. eventsourcing-9.2.22.dist-info/RECORD +0 -25
  143. eventsourcing-9.2.22.dist-info/top_level.txt +0 -1
  144. {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0a1.dist-info}/LICENSE +0 -0
eventsourcing/postgres.py CHANGED
@@ -1,35 +1,18 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import logging
3
4
  from contextlib import contextmanager
4
- from itertools import chain
5
- from threading import Lock
6
- from types import TracebackType
7
- from typing import (
8
- Any,
9
- Dict,
10
- Iterator,
11
- List,
12
- Optional,
13
- Sequence,
14
- Set,
15
- Tuple,
16
- Type,
17
- Union,
18
- )
19
- from uuid import NAMESPACE_URL, UUID, uuid5
5
+ from typing import TYPE_CHECKING, Any, Callable, Dict, Iterator, List, Sequence
20
6
 
21
- import psycopg2
22
- import psycopg2.errors
23
- import psycopg2.extras
24
- from psycopg2.errorcodes import DUPLICATE_PREPARED_STATEMENT
25
- from psycopg2.extensions import connection, cursor
7
+ import psycopg
8
+ import psycopg.errors
9
+ import psycopg_pool
10
+ from psycopg import Connection, Cursor
11
+ from psycopg.rows import DictRow, dict_row
26
12
 
27
13
  from eventsourcing.persistence import (
28
14
  AggregateRecorder,
29
15
  ApplicationRecorder,
30
- Connection,
31
- ConnectionPool,
32
- Cursor,
33
16
  DatabaseError,
34
17
  DataError,
35
18
  InfrastructureFactory,
@@ -45,74 +28,32 @@ from eventsourcing.persistence import (
45
28
  StoredEvent,
46
29
  Tracking,
47
30
  )
48
- from eventsourcing.utils import Environment, retry, strtobool
49
-
50
- psycopg2.extras.register_uuid()
51
-
52
-
53
- class PostgresCursor(Cursor):
54
- def __init__(self, pg_cursor: cursor):
55
- self.pg_cursor = pg_cursor
56
-
57
- def __enter__(self, *args: Any, **kwargs: Any) -> "PostgresCursor":
58
- self.pg_cursor.__enter__(*args, **kwargs)
59
- return self
60
-
61
- def __exit__(self, *args: Any, **kwargs: Any) -> None:
62
- return self.pg_cursor.__exit__(*args, **kwargs)
63
-
64
- def mogrify(self, statement: str, params: Any = None) -> bytes:
65
- return self.pg_cursor.mogrify(statement, vars=params)
66
-
67
- def execute(self, statement: Union[str, bytes], params: Any = None) -> None:
68
- self.pg_cursor.execute(query=statement, vars=params)
69
-
70
- def fetchall(self) -> Any:
71
- return self.pg_cursor.fetchall()
72
-
73
- def fetchone(self) -> Any:
74
- return self.pg_cursor.fetchone()
75
-
76
- @property
77
- def closed(self) -> bool:
78
- return self.pg_cursor.closed
31
+ from eventsourcing.utils import Environment, resolve_topic, retry, strtobool
79
32
 
33
+ if TYPE_CHECKING: # pragma: nocover
34
+ from uuid import UUID
80
35
 
81
- class PostgresConnection(Connection[PostgresCursor]):
82
- def __init__(self, pg_conn: connection, max_age: Optional[float]):
83
- super().__init__(max_age=max_age)
84
- self._pg_conn = pg_conn
85
- self.is_prepared: Set[str] = set()
36
+ logging.getLogger("psycopg.pool").setLevel(logging.CRITICAL)
37
+ logging.getLogger("psycopg").setLevel(logging.CRITICAL)
86
38
 
87
- @contextmanager
88
- def transaction(self, commit: bool) -> Iterator[PostgresCursor]:
89
- # Context managed transaction.
90
- with PostgresTransaction(self, commit) as curs:
91
- # Context managed cursor.
92
- with curs:
93
- yield curs
94
-
95
- def cursor(self) -> PostgresCursor:
96
- return PostgresCursor(
97
- self._pg_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
98
- )
99
-
100
- def rollback(self) -> None:
101
- self._pg_conn.rollback()
102
39
 
103
- def commit(self) -> None:
104
- self._pg_conn.commit()
105
-
106
- def _close(self) -> None:
107
- self._pg_conn.close()
108
- super()._close()
40
+ class ConnectionPool(psycopg_pool.ConnectionPool[Any]):
41
+ def __init__(
42
+ self,
43
+ *args: Any,
44
+ get_password_func: Callable[[], str] | None = None,
45
+ **kwargs: Any,
46
+ ) -> None:
47
+ self.get_password_func = get_password_func
48
+ super().__init__(*args, **kwargs)
109
49
 
110
- @property
111
- def closed(self) -> bool:
112
- return bool(self._pg_conn.closed)
50
+ def _connect(self, timeout: float | None = None) -> Connection[Any]:
51
+ if self.get_password_func:
52
+ self.kwargs["password"] = self.get_password_func()
53
+ return super()._connect(timeout=timeout)
113
54
 
114
55
 
115
- class PostgresConnectionPool(ConnectionPool[PostgresConnection]):
56
+ class PostgresDatastore:
116
57
  def __init__(
117
58
  self,
118
59
  dbname: str,
@@ -120,152 +61,94 @@ class PostgresConnectionPool(ConnectionPool[PostgresConnection]):
120
61
  port: str,
121
62
  user: str,
122
63
  password: str,
64
+ *,
123
65
  connect_timeout: int = 5,
124
66
  idle_in_transaction_session_timeout: int = 0,
125
- pool_size: int = 1,
126
- max_overflow: int = 0,
67
+ pool_size: int = 2,
68
+ max_overflow: int = 2,
127
69
  pool_timeout: float = 5.0,
128
- max_age: Optional[float] = None,
70
+ conn_max_age: float = 60 * 60.0,
129
71
  pre_ping: bool = False,
72
+ lock_timeout: int = 0,
73
+ schema: str = "",
74
+ pool_open_timeout: int | None = None,
75
+ get_password_func: Callable[[], str] | None = None,
130
76
  ):
131
- self.dbname = dbname
132
- self.host = host
133
- self.port = port
134
- self.user = user
135
- self.password = password
136
- self.connect_timeout = connect_timeout
137
77
  self.idle_in_transaction_session_timeout = idle_in_transaction_session_timeout
138
- super().__init__(
139
- pool_size=pool_size,
140
- max_overflow=max_overflow,
141
- pool_timeout=pool_timeout,
142
- max_age=max_age,
143
- pre_ping=pre_ping,
144
- mutually_exclusive_read_write=False,
78
+ self.pre_ping = pre_ping
79
+ self.pool_open_timeout = pool_open_timeout
80
+
81
+ check = ConnectionPool.check_connection if pre_ping else None
82
+ kwargs: Dict[str, Any] = {"check": check}
83
+ self.pool = ConnectionPool(
84
+ get_password_func=get_password_func,
85
+ connection_class=Connection[DictRow],
86
+ kwargs={
87
+ "dbname": dbname,
88
+ "host": host,
89
+ "port": port,
90
+ "user": user,
91
+ "password": password,
92
+ "row_factory": dict_row,
93
+ },
94
+ min_size=pool_size,
95
+ max_size=pool_size + max_overflow,
96
+ open=False,
97
+ configure=self.after_connect,
98
+ timeout=connect_timeout,
99
+ max_waiting=round(pool_timeout),
100
+ max_lifetime=conn_max_age,
101
+ **kwargs, # use the 'check' argument when no longer supporting Python 3.7
145
102
  )
103
+ self.lock_timeout = lock_timeout
104
+ self.schema = schema.strip()
146
105
 
147
- def _create_connection(self) -> PostgresConnection:
148
- # Make a connection to a database.
149
- try:
150
- pg_conn = psycopg2.connect(
151
- dbname=self.dbname,
152
- host=self.host,
153
- port=self.port,
154
- user=self.user,
155
- password=self.password,
156
- connect_timeout=self.connect_timeout,
157
- )
158
- except psycopg2.OperationalError as e:
159
- raise OperationalError(e) from e
160
- pg_conn.cursor().execute(
161
- f"SET idle_in_transaction_session_timeout = "
106
+ def after_connect(self, conn: Connection[DictRow]) -> None:
107
+ conn.autocommit = True
108
+ conn.cursor().execute(
109
+ "SET idle_in_transaction_session_timeout = "
162
110
  f"'{self.idle_in_transaction_session_timeout}s'"
163
111
  )
164
- return PostgresConnection(pg_conn, max_age=self.max_age)
165
-
166
-
167
- class PostgresTransaction:
168
- def __init__(self, conn: PostgresConnection, commit: bool):
169
- self.conn = conn
170
- self.commit = commit
171
- self.has_entered = False
172
112
 
173
- def __enter__(self) -> PostgresCursor:
174
- self.has_entered = True
175
- return self.conn.cursor()
176
-
177
- def __exit__(
178
- self,
179
- exc_type: Type[BaseException],
180
- exc_val: BaseException,
181
- exc_tb: TracebackType,
182
- ) -> None:
113
+ @contextmanager
114
+ def get_connection(self) -> Iterator[Connection[DictRow]]:
183
115
  try:
184
- if exc_val:
185
- self.conn.rollback()
186
- raise exc_val
187
- elif not self.commit:
188
- self.conn.rollback()
189
- else:
190
- self.conn.commit()
191
- except psycopg2.InterfaceError as e:
192
- self.conn.close()
116
+ wait = self.pool_open_timeout is not None
117
+ timeout = self.pool_open_timeout or 30.0
118
+ self.pool.open(wait, timeout)
119
+
120
+ with self.pool.connection() as conn:
121
+ yield conn
122
+ except psycopg.InterfaceError as e:
123
+ # conn.close()
193
124
  raise InterfaceError(str(e)) from e
194
- except psycopg2.DataError as e:
195
- raise DataError(str(e)) from e
196
- except psycopg2.OperationalError as e:
197
- self.conn.close()
125
+ except psycopg.OperationalError as e:
126
+ # conn.close()
198
127
  raise OperationalError(str(e)) from e
199
- except psycopg2.IntegrityError as e:
128
+ except psycopg.DataError as e:
129
+ raise DataError(str(e)) from e
130
+ except psycopg.IntegrityError as e:
200
131
  raise IntegrityError(str(e)) from e
201
- except psycopg2.InternalError as e:
132
+ except psycopg.InternalError as e:
202
133
  raise InternalError(str(e)) from e
203
- except psycopg2.ProgrammingError as e:
134
+ except psycopg.ProgrammingError as e:
204
135
  raise ProgrammingError(str(e)) from e
205
- except psycopg2.NotSupportedError as e:
136
+ except psycopg.NotSupportedError as e:
206
137
  raise NotSupportedError(str(e)) from e
207
- except psycopg2.DatabaseError as e:
138
+ except psycopg.DatabaseError as e:
208
139
  raise DatabaseError(str(e)) from e
209
- except psycopg2.Error as e:
140
+ except psycopg.Error as e:
141
+ # conn.close()
210
142
  raise PersistenceError(str(e)) from e
211
-
212
-
213
- class PostgresDatastore:
214
- def __init__(
215
- self,
216
- dbname: str,
217
- host: str,
218
- port: str,
219
- user: str,
220
- password: str,
221
- connect_timeout: int = 5,
222
- idle_in_transaction_session_timeout: int = 0,
223
- pool_size: int = 2,
224
- max_overflow: int = 2,
225
- pool_timeout: float = 5.0,
226
- conn_max_age: Optional[float] = None,
227
- pre_ping: bool = False,
228
- lock_timeout: int = 0,
229
- schema: str = "",
230
- ):
231
- self.pool = PostgresConnectionPool(
232
- dbname=dbname,
233
- host=host,
234
- port=port,
235
- user=user,
236
- password=password,
237
- connect_timeout=connect_timeout,
238
- idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
239
- pool_size=pool_size,
240
- max_overflow=max_overflow,
241
- pool_timeout=pool_timeout,
242
- max_age=conn_max_age,
243
- pre_ping=pre_ping,
244
- )
245
- self.lock_timeout = lock_timeout
246
- self.schema = schema.strip()
143
+ except Exception:
144
+ # conn.close()
145
+ raise
247
146
 
248
147
  @contextmanager
249
- def transaction(self, commit: bool) -> Iterator[PostgresCursor]:
250
- with self.get_connection() as conn:
251
- with conn.transaction(commit) as curs:
252
- yield curs
253
-
254
- @contextmanager
255
- def get_connection(self) -> Iterator[PostgresConnection]:
256
- conn = self.pool.get_connection()
257
- try:
258
- yield conn
259
- finally:
260
- self.pool.put_connection(conn)
261
-
262
- def report_on_prepared_statements(
263
- self,
264
- ) -> Tuple[List[List[Union[bool, str]]], List[str]]:
265
- with self.get_connection() as conn:
266
- with conn.cursor() as curs:
267
- curs.execute("SELECT * from pg_prepared_statements")
268
- return sorted(curs.fetchall()), sorted(conn.is_prepared)
148
+ def transaction(self, *, commit: bool = False) -> Iterator[Cursor[DictRow]]:
149
+ conn: Connection[DictRow]
150
+ with self.get_connection() as conn, conn.transaction(force_rollback=not commit):
151
+ yield conn.cursor()
269
152
 
270
153
  def close(self) -> None:
271
154
  self.pool.close()
@@ -274,17 +157,12 @@ class PostgresDatastore:
274
157
  self.close()
275
158
 
276
159
 
277
- PG_IDENTIFIER_MAX_LEN = 63
278
-
279
-
280
160
  class PostgresAggregateRecorder(AggregateRecorder):
281
161
  def __init__(
282
162
  self,
283
163
  datastore: PostgresDatastore,
284
164
  events_table_name: str,
285
165
  ):
286
- self.statement_name_aliases: Dict[str, str] = {}
287
- self.statement_name_aliases_lock = Lock()
288
166
  self.check_table_name_length(events_table_name, datastore.schema)
289
167
  self.datastore = datastore
290
168
  self.events_table_name = events_table_name
@@ -300,15 +178,12 @@ class PostgresAggregateRecorder(AggregateRecorder):
300
178
 
301
179
  self.create_table_statements = self.construct_create_table_statements()
302
180
  self.insert_events_statement = (
303
- f"INSERT INTO {self.events_table_name} VALUES ($1, $2, $3, $4)"
304
- )
305
- self.insert_events_statement_name = f"insert_{events_table_name}".replace(
306
- ".", "_"
181
+ f"INSERT INTO {self.events_table_name} VALUES (%s, %s, %s, %s)"
307
182
  )
308
183
  self.select_events_statement = (
309
- f"SELECT * FROM {self.events_table_name} WHERE originator_id = $1"
184
+ f"SELECT * FROM {self.events_table_name} WHERE originator_id = %s"
310
185
  )
311
- self.lock_statements: List[str] = []
186
+ self.lock_table_statements: List[str] = []
312
187
 
313
188
  @staticmethod
314
189
  def check_table_name_length(table_name: str, schema_name: str) -> None:
@@ -318,47 +193,8 @@ class PostgresAggregateRecorder(AggregateRecorder):
318
193
  else:
319
194
  unqualified_table_name = table_name
320
195
  if len(unqualified_table_name) > 63:
321
- raise ProgrammingError(f"Table name too long: {unqualified_table_name}")
322
-
323
- def get_statement_alias(self, statement_name: str) -> str:
324
- try:
325
- alias = self.statement_name_aliases[statement_name]
326
- except KeyError:
327
- with self.statement_name_aliases_lock:
328
- try:
329
- alias = self.statement_name_aliases[statement_name]
330
- except KeyError:
331
- existing_aliases = self.statement_name_aliases.values()
332
- if (
333
- len(statement_name) <= PG_IDENTIFIER_MAX_LEN
334
- and statement_name not in existing_aliases
335
- ):
336
- alias = statement_name
337
- self.statement_name_aliases[statement_name] = alias
338
- else:
339
- uid = uuid5(
340
- NAMESPACE_URL, f"/statement_names/{statement_name}"
341
- ).hex
342
- alias = uid
343
- for i in range(len(uid)): # pragma: no cover
344
- preserve_end = 21
345
- preserve_start = (
346
- PG_IDENTIFIER_MAX_LEN - preserve_end - i - 2
347
- )
348
- uuid5_tail = i
349
- candidate = (
350
- statement_name[:preserve_start]
351
- + "_"
352
- + (uid[-uuid5_tail:] if i else "")
353
- + "_"
354
- + statement_name[-preserve_end:]
355
- )
356
- assert len(alias) <= PG_IDENTIFIER_MAX_LEN
357
- if candidate not in existing_aliases:
358
- alias = candidate
359
- break
360
- self.statement_name_aliases[statement_name] = alias
361
- return alias
196
+ msg = f"Table name too long: {unqualified_table_name}"
197
+ raise ProgrammingError(msg)
362
198
 
363
199
  def construct_create_table_statements(self) -> List[str]:
364
200
  statement = (
@@ -377,155 +213,121 @@ class PostgresAggregateRecorder(AggregateRecorder):
377
213
  def create_table(self) -> None:
378
214
  with self.datastore.transaction(commit=True) as curs:
379
215
  for statement in self.create_table_statements:
380
- curs.execute(statement)
381
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
216
+ curs.execute(statement, prepare=False)
382
217
 
383
218
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
384
219
  def insert_events(
385
220
  self, stored_events: List[StoredEvent], **kwargs: Any
386
- ) -> Optional[Sequence[int]]:
221
+ ) -> Sequence[int] | None:
222
+ conn: Connection[DictRow]
223
+ exc: Exception | None = None
224
+ notification_ids: Sequence[int] | None = None
387
225
  with self.datastore.get_connection() as conn:
388
- self._prepare_insert_events(conn)
389
- with conn.transaction(commit=True) as curs:
390
- return self._insert_events(curs, stored_events, **kwargs)
391
-
392
- def _prepare_insert_events(self, conn: PostgresConnection) -> None:
393
- self._prepare(
394
- conn,
395
- self.insert_events_statement_name,
396
- self.insert_events_statement,
397
- )
398
-
399
- def _prepare(
400
- self, conn: PostgresConnection, statement_name: str, statement: str
401
- ) -> str:
402
- statement_name_alias = self.get_statement_alias(statement_name)
403
- if statement_name not in conn.is_prepared:
404
- curs: PostgresCursor
405
- with conn.transaction(commit=True) as curs:
406
- try:
407
- lock_timeout = self.datastore.lock_timeout
408
- curs.execute(f"SET LOCAL lock_timeout = '{lock_timeout}s'")
409
- curs.execute(f"PREPARE {statement_name_alias} AS " + statement)
410
- except psycopg2.errors.lookup(DUPLICATE_PREPARED_STATEMENT): # noqa
411
- pass
412
- conn.is_prepared.add(statement_name)
413
- return statement_name_alias
226
+ with conn.pipeline() as pipeline, conn.transaction():
227
+ # Do other things first, so they can be pipelined too.
228
+ with conn.cursor() as curs:
229
+ self._insert_events(curs, stored_events, **kwargs)
230
+ # Then use a different cursor for the executemany() call.
231
+ with conn.cursor() as curs:
232
+ try:
233
+ self._insert_stored_events(curs, stored_events, **kwargs)
234
+ # Sync now, so any uniqueness constraint violation causes an
235
+ # IntegrityError to be raised here, rather an InternalError
236
+ # being raised sometime later e.g. when commit() is called.
237
+ pipeline.sync()
238
+ notification_ids = self._fetch_ids_after_insert_events(
239
+ curs, stored_events, **kwargs
240
+ )
241
+ except Exception as e:
242
+ # Avoid psycopg emitting a pipeline warning.
243
+ exc = e
244
+ if exc:
245
+ # Reraise exception after pipeline context manager has exited.
246
+ raise exc
247
+ return notification_ids
414
248
 
415
249
  def _insert_events(
416
250
  self,
417
- c: PostgresCursor,
251
+ c: Cursor[DictRow],
418
252
  stored_events: List[StoredEvent],
419
253
  **kwargs: Any,
420
- ) -> Optional[Sequence[int]]:
421
- # Acquire "EXCLUSIVE" table lock, to serialize inserts so that
422
- # insertion of notification IDs is monotonic for notification log
423
- # readers. We want concurrent transactions to commit inserted
424
- # notification_id values in order, and by locking the table for writes,
425
- # it can be guaranteed. The EXCLUSIVE lock mode does not block
426
- # the ACCESS SHARE lock which is acquired during SELECT statements,
427
- # so the table can be read concurrently. However, INSERT normally
428
- # just acquires ROW EXCLUSIVE locks, which risks interleaving of
429
- # many inserts in one transaction with many insert in another
430
- # transaction. Since one transaction will commit before another,
431
- # the possibility arises for readers that are tailing a notification
432
- # log to miss items inserted later but with lower notification IDs.
433
- # https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
434
- # https://www.postgresql.org/docs/9.1/sql-lock.html
435
- # https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
436
- # -postgresql-serial-column-values-by-commit-order
437
-
438
- len_stored_events = len(stored_events)
254
+ ) -> None:
255
+ pass
439
256
 
257
+ def _insert_stored_events(
258
+ self,
259
+ c: Cursor[DictRow],
260
+ stored_events: List[StoredEvent],
261
+ **_: Any,
262
+ ) -> None:
440
263
  # Only do something if there is something to do.
441
- if len_stored_events > 0:
442
- # Mogrify the table lock statements.
443
- lock_sqls = (c.mogrify(s) for s in self.lock_statements)
444
-
445
- # Prepare the commands before getting the table lock.
446
- alias = self.statement_name_aliases[self.insert_events_statement_name]
447
- page_size = 500
448
- pages = [
449
- (
450
- c.mogrify(
451
- f"EXECUTE {alias}(%s, %s, %s, %s)",
452
- (
453
- stored_event.originator_id,
454
- stored_event.originator_version,
455
- stored_event.topic,
456
- stored_event.state,
457
- ),
264
+ if len(stored_events) > 0:
265
+ self._lock_table(c)
266
+
267
+ # Insert events.
268
+ c.executemany(
269
+ query=self.insert_events_statement,
270
+ params_seq=[
271
+ (
272
+ stored_event.originator_id,
273
+ stored_event.originator_version,
274
+ stored_event.topic,
275
+ stored_event.state,
458
276
  )
459
- for stored_event in page
460
- )
461
- for page in (
462
- stored_events[ndx : min(ndx + page_size, len_stored_events)]
463
- for ndx in range(0, len_stored_events, page_size)
464
- )
465
- ]
466
- commands = [
467
- b"; ".join(page)
468
- for page in chain([chain(lock_sqls, pages[0])], pages[1:])
469
- ]
277
+ for stored_event in stored_events
278
+ ],
279
+ returning="RETURNING" in self.insert_events_statement,
280
+ )
281
+
282
+ def _lock_table(self, c: Cursor[DictRow]) -> None:
283
+ pass
470
284
 
471
- # Execute the commands.
472
- for command in commands:
473
- c.execute(command)
285
+ def _fetch_ids_after_insert_events(
286
+ self,
287
+ c: Cursor[DictRow],
288
+ stored_events: List[StoredEvent],
289
+ **kwargs: Any,
290
+ ) -> Sequence[int] | None:
474
291
  return None
475
292
 
476
293
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
477
294
  def select_events(
478
295
  self,
479
296
  originator_id: UUID,
480
- gt: Optional[int] = None,
481
- lte: Optional[int] = None,
297
+ *,
298
+ gt: int | None = None,
299
+ lte: int | None = None,
482
300
  desc: bool = False,
483
- limit: Optional[int] = None,
301
+ limit: int | None = None,
484
302
  ) -> List[StoredEvent]:
485
- parts = [self.select_events_statement]
303
+ statement = self.select_events_statement
486
304
  params: List[Any] = [originator_id]
487
- statement_name = f"select_{self.events_table_name}".replace(".", "_")
488
305
  if gt is not None:
489
306
  params.append(gt)
490
- parts.append(f"AND originator_version > ${len(params)}")
491
- statement_name += "_gt"
307
+ statement += " AND originator_version > %s"
492
308
  if lte is not None:
493
309
  params.append(lte)
494
- parts.append(f"AND originator_version <= ${len(params)}")
495
- statement_name += "_lte"
496
- parts.append("ORDER BY originator_version")
310
+ statement += " AND originator_version <= %s"
311
+ statement += " ORDER BY originator_version"
497
312
  if desc is False:
498
- parts.append("ASC")
313
+ statement += " ASC"
499
314
  else:
500
- parts.append("DESC")
501
- statement_name += "_desc"
315
+ statement += " DESC"
502
316
  if limit is not None:
503
317
  params.append(limit)
504
- parts.append(f"LIMIT ${len(params)}")
505
- statement_name += "_limit"
506
- statement = " ".join(parts)
507
-
508
- stored_events = []
509
-
510
- with self.datastore.get_connection() as conn:
511
- alias = self._prepare(conn, statement_name, statement)
512
-
513
- with conn.transaction(commit=False) as curs:
514
- curs.execute(
515
- f"EXECUTE {alias}({', '.join(['%s' for _ in params])})",
516
- params,
318
+ statement += " LIMIT %s"
319
+
320
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
321
+ curs.execute(statement, params, prepare=True)
322
+ return [
323
+ StoredEvent(
324
+ originator_id=row["originator_id"],
325
+ originator_version=row["originator_version"],
326
+ topic=row["topic"],
327
+ state=bytes(row["state"]),
517
328
  )
518
- for row in curs.fetchall():
519
- stored_events.append(
520
- StoredEvent(
521
- originator_id=row["originator_id"],
522
- originator_version=row["originator_version"],
523
- topic=row["topic"],
524
- state=bytes(row["state"]),
525
- )
526
- )
527
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
528
- return stored_events
329
+ for row in curs.fetchall()
330
+ ]
529
331
 
530
332
 
531
333
  class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder):
@@ -535,45 +337,42 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
535
337
  events_table_name: str = "stored_events",
536
338
  ):
537
339
  super().__init__(datastore, events_table_name)
538
- self.insert_events_statement = (
539
- f"INSERT INTO {self.events_table_name} VALUES ($1, $2, $3, $4) "
540
- f"RETURNING notification_id"
541
- )
340
+ self.insert_events_statement += " RETURNING notification_id"
542
341
  self.max_notification_id_statement = (
543
342
  f"SELECT MAX(notification_id) FROM {self.events_table_name}"
544
343
  )
545
- self.max_notification_id_statement_name = (
546
- f"max_notification_id_{events_table_name}".replace(".", "_")
547
- )
548
- self.lock_statements = [
344
+ self.lock_table_statements = [
549
345
  f"SET LOCAL lock_timeout = '{self.datastore.lock_timeout}s'",
550
346
  f"LOCK TABLE {self.events_table_name} IN EXCLUSIVE MODE",
551
347
  ]
552
348
 
553
349
  def construct_create_table_statements(self) -> List[str]:
554
- statements = [
555
- "CREATE TABLE IF NOT EXISTS "
556
- f"{self.events_table_name} ("
557
- "originator_id uuid NOT NULL, "
558
- "originator_version bigint NOT NULL, "
559
- "topic text, "
560
- "state bytea, "
561
- "notification_id bigserial, "
562
- "PRIMARY KEY "
563
- "(originator_id, originator_version)) "
564
- "WITH (autovacuum_enabled=false)",
565
- f"CREATE UNIQUE INDEX IF NOT EXISTS "
566
- f"{self.notification_id_index_name}"
567
- f"ON {self.events_table_name} (notification_id ASC);",
350
+ return [
351
+ (
352
+ "CREATE TABLE IF NOT EXISTS "
353
+ f"{self.events_table_name} ("
354
+ "originator_id uuid NOT NULL, "
355
+ "originator_version bigint NOT NULL, "
356
+ "topic text, "
357
+ "state bytea, "
358
+ "notification_id bigserial, "
359
+ "PRIMARY KEY "
360
+ "(originator_id, originator_version)) "
361
+ "WITH (autovacuum_enabled=false)"
362
+ ),
363
+ (
364
+ "CREATE UNIQUE INDEX IF NOT EXISTS "
365
+ f"{self.notification_id_index_name}"
366
+ f"ON {self.events_table_name} (notification_id ASC);"
367
+ ),
568
368
  ]
569
- return statements
570
369
 
571
370
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
572
371
  def select_notifications(
573
372
  self,
574
373
  start: int,
575
374
  limit: int,
576
- stop: Optional[int] = None,
375
+ stop: int | None = None,
577
376
  topics: Sequence[str] = (),
578
377
  ) -> List[Notification]:
579
378
  """
@@ -581,86 +380,91 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
581
380
  from 'start', limited by 'limit'.
582
381
  """
583
382
 
584
- params: List[Union[int, str, Sequence[str]]] = [start]
585
- statement = (
586
- "SELECT * " f"FROM {self.events_table_name} " "WHERE notification_id>=$1 "
587
- )
588
- statement_name = f"select_notifications_{self.events_table_name}".replace(
589
- ".", "_"
590
- )
383
+ params: List[int | str | Sequence[str]] = [start]
384
+ statement = f"SELECT * FROM {self.events_table_name} WHERE notification_id>=%s"
591
385
 
592
386
  if stop is not None:
593
387
  params.append(stop)
594
- statement += f"AND notification_id <= ${len(params)} "
595
- statement_name += "_stop"
388
+ statement += " AND notification_id <= %s"
596
389
 
597
390
  if topics:
598
391
  params.append(topics)
599
- statement += f"AND topic = ANY(${len(params)}) "
600
- statement_name += "_topics"
392
+ statement += " AND topic = ANY(%s)"
601
393
 
602
394
  params.append(limit)
603
- statement += "ORDER BY notification_id " f"LIMIT ${len(params)}"
604
-
605
- notifications = []
606
- with self.datastore.get_connection() as conn:
607
- alias = self._prepare(
608
- conn,
609
- statement_name,
610
- statement,
611
- )
612
- with conn.transaction(commit=False) as curs:
613
- curs.execute(
614
- f"EXECUTE {alias}({', '.join(['%s' for _ in params])})",
615
- params,
395
+ statement += " ORDER BY notification_id LIMIT %s"
396
+
397
+ connection = self.datastore.get_connection()
398
+ with connection as conn, conn.cursor() as curs:
399
+ curs.execute(statement, params, prepare=True)
400
+ return [
401
+ Notification(
402
+ id=row["notification_id"],
403
+ originator_id=row["originator_id"],
404
+ originator_version=row["originator_version"],
405
+ topic=row["topic"],
406
+ state=bytes(row["state"]),
616
407
  )
617
- for row in curs.fetchall():
618
- notifications.append(
619
- Notification(
620
- id=row["notification_id"],
621
- originator_id=row["originator_id"],
622
- originator_version=row["originator_version"],
623
- topic=row["topic"],
624
- state=bytes(row["state"]),
625
- )
626
- )
627
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
628
- return notifications
408
+ for row in curs.fetchall()
409
+ ]
629
410
 
630
411
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
631
412
  def max_notification_id(self) -> int:
632
413
  """
633
414
  Returns the maximum notification ID.
634
415
  """
635
- statement_name = self.max_notification_id_statement_name
636
- with self.datastore.get_connection() as conn:
637
- statement_alias = self._prepare(
638
- conn, statement_name, self.max_notification_id_statement
639
- )
640
- with conn.transaction(commit=False) as curs:
641
- curs.execute(
642
- f"EXECUTE {statement_alias}",
643
- )
644
- max_id = curs.fetchone()[0] or 0
645
- return max_id
416
+ conn: Connection[DictRow]
417
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
418
+ curs.execute(self.max_notification_id_statement)
419
+ fetchone = curs.fetchone()
420
+ assert fetchone is not None
421
+ return fetchone["max"] or 0
422
+
423
+ def _lock_table(self, c: Cursor[DictRow]) -> None:
424
+ # Acquire "EXCLUSIVE" table lock, to serialize transactions that insert
425
+ # stored events, so that readers don't pass over gaps that are filled in
426
+ # later. We want each transaction that will be issued with notifications
427
+ # IDs by the notification ID sequence to receive all its notification IDs
428
+ # and then commit, before another transaction is issued with any notification
429
+ # IDs. In other words, we want the insert order to be the same as the commit
430
+ # order. We can accomplish this by locking the table for writes. The
431
+ # EXCLUSIVE lock mode does not block SELECT statements, which acquire an
432
+ # ACCESS SHARE lock, so the stored events table can be read concurrently
433
+ # with writes and other reads. However, INSERT statements normally just
434
+ # acquires ROW EXCLUSIVE locks, which risks the interleaving (within the
435
+ # recorded sequence of notification IDs) of stored events from one transaction
436
+ # with those of another transaction. And since one transaction will always
437
+ # commit before another, the possibility arises when using ROW EXCLUSIVE locks
438
+ # for readers that are tailing a notification log to miss items inserted later
439
+ # but issued with lower notification IDs.
440
+ # https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
441
+ # https://www.postgresql.org/docs/9.1/sql-lock.html
442
+ # https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
443
+ # -postgresql-serial-column-values-by-commit-order
444
+ for lock_statement in self.lock_table_statements:
445
+ c.execute(lock_statement, prepare=True)
646
446
 
647
- def _insert_events(
447
+ def _fetch_ids_after_insert_events(
648
448
  self,
649
- c: PostgresCursor,
449
+ c: Cursor[DictRow],
650
450
  stored_events: List[StoredEvent],
651
451
  **kwargs: Any,
652
- ) -> Optional[Sequence[int]]:
653
- super()._insert_events(c, stored_events, **kwargs)
654
- if stored_events:
655
- last_notification_id = c.fetchone()[0]
656
- notification_ids = list(
657
- range(
658
- last_notification_id - len(stored_events) + 1,
659
- last_notification_id + 1,
660
- )
661
- )
662
- else:
663
- notification_ids = []
452
+ ) -> Sequence[int] | None:
453
+ notification_ids: List[int] = []
454
+ len_events = len(stored_events)
455
+ if len_events:
456
+ if (
457
+ (c.statusmessage == "SET")
458
+ and c.nextset()
459
+ and (c.statusmessage == "LOCK TABLE")
460
+ ):
461
+ while c.nextset() and len(notification_ids) != len_events:
462
+ row = c.fetchone()
463
+ assert row is not None
464
+ notification_ids.append(row["notification_id"])
465
+ if len(notification_ids) != len(stored_events):
466
+ msg = "Couldn't get all notification IDs"
467
+ raise ProgrammingError(msg)
664
468
  return notification_ids
665
469
 
666
470
 
@@ -675,26 +479,17 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
675
479
  self.tracking_table_name = tracking_table_name
676
480
  super().__init__(datastore, events_table_name)
677
481
  self.insert_tracking_statement = (
678
- f"INSERT INTO {self.tracking_table_name} VALUES ($1, $2)"
679
- )
680
- self.insert_tracking_statement_name = f"insert_{tracking_table_name}".replace(
681
- ".", "_"
482
+ f"INSERT INTO {self.tracking_table_name} VALUES (%s, %s)"
682
483
  )
683
484
  self.max_tracking_id_statement = (
684
485
  "SELECT MAX(notification_id) "
685
486
  f"FROM {self.tracking_table_name} "
686
- "WHERE application_name=$1"
487
+ "WHERE application_name=%s"
687
488
  )
688
489
  self.count_tracking_id_statement = (
689
490
  "SELECT COUNT(*) "
690
491
  f"FROM {self.tracking_table_name} "
691
- "WHERE application_name=$1 AND notification_id=$2"
692
- )
693
- self.max_tracking_id_statement_name = (
694
- f"max_tracking_id_{tracking_table_name}".replace(".", "_")
695
- )
696
- self.count_tracking_id_statement_name = (
697
- f"count_tracking_id_{tracking_table_name}".replace(".", "_")
492
+ "WHERE application_name=%s AND notification_id=%s"
698
493
  )
699
494
 
700
495
  def construct_create_table_statements(self) -> List[str]:
@@ -711,61 +506,46 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
711
506
 
712
507
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
713
508
  def max_tracking_id(self, application_name: str) -> int:
714
- statement_name = self.max_tracking_id_statement_name
715
- with self.datastore.get_connection() as conn:
716
- statement_alias = self._prepare(
717
- conn, statement_name, self.max_tracking_id_statement
509
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
510
+ curs.execute(
511
+ query=self.max_tracking_id_statement,
512
+ params=(application_name,),
513
+ prepare=True,
718
514
  )
719
-
720
- with conn.transaction(commit=False) as curs:
721
- curs.execute(
722
- f"EXECUTE {statement_alias}(%s)",
723
- (application_name,),
724
- )
725
- max_id = curs.fetchone()[0] or 0
726
- return max_id
515
+ fetchone = curs.fetchone()
516
+ assert fetchone is not None
517
+ return fetchone["max"] or 0
727
518
 
728
519
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
729
520
  def has_tracking_id(self, application_name: str, notification_id: int) -> bool:
730
- statement_name = self.count_tracking_id_statement_name
731
- with self.datastore.get_connection() as conn:
732
- statement_alias = self._prepare(
733
- conn, statement_name, self.count_tracking_id_statement
521
+ conn: Connection[DictRow]
522
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
523
+ curs.execute(
524
+ query=self.count_tracking_id_statement,
525
+ params=(application_name, notification_id),
526
+ prepare=True,
734
527
  )
735
-
736
- with conn.transaction(commit=False) as curs:
737
- curs.execute(
738
- f"EXECUTE {statement_alias}(%s, %s)",
739
- (application_name, notification_id),
740
- )
741
- return bool(curs.fetchone()[0])
742
-
743
- def _prepare_insert_events(self, conn: PostgresConnection) -> None:
744
- super()._prepare_insert_events(conn)
745
- self._prepare(
746
- conn, self.insert_tracking_statement_name, self.insert_tracking_statement
747
- )
528
+ fetchone = curs.fetchone()
529
+ assert fetchone is not None
530
+ return bool(fetchone["count"])
748
531
 
749
532
  def _insert_events(
750
533
  self,
751
- c: PostgresCursor,
534
+ c: Cursor[DictRow],
752
535
  stored_events: List[StoredEvent],
753
536
  **kwargs: Any,
754
- ) -> Optional[Sequence[int]]:
755
- notification_ids = super()._insert_events(c, stored_events, **kwargs)
756
- tracking: Optional[Tracking] = kwargs.get("tracking", None)
537
+ ) -> None:
538
+ tracking: Tracking | None = kwargs.get("tracking", None)
757
539
  if tracking is not None:
758
- statement_alias = self.statement_name_aliases[
759
- self.insert_tracking_statement_name
760
- ]
761
540
  c.execute(
762
- f"EXECUTE {statement_alias}(%s, %s)",
763
- (
541
+ query=self.insert_tracking_statement,
542
+ params=(
764
543
  tracking.application_name,
765
544
  tracking.notification_id,
766
545
  ),
546
+ prepare=True,
767
547
  )
768
- return notification_ids
548
+ super()._insert_events(c, stored_events, **kwargs)
769
549
 
770
550
 
771
551
  class Factory(InfrastructureFactory):
@@ -773,7 +553,8 @@ class Factory(InfrastructureFactory):
773
553
  POSTGRES_HOST = "POSTGRES_HOST"
774
554
  POSTGRES_PORT = "POSTGRES_PORT"
775
555
  POSTGRES_USER = "POSTGRES_USER"
776
- POSTGRES_PASSWORD = "POSTGRES_PASSWORD"
556
+ POSTGRES_PASSWORD = "POSTGRES_PASSWORD" # noqa: S105
557
+ POSTGRES_GET_PASSWORD_TOPIC = "POSTGRES_GET_PASSWORD_TOPIC" # noqa: S105
777
558
  POSTGRES_CONNECT_TIMEOUT = "POSTGRES_CONNECT_TIMEOUT"
778
559
  POSTGRES_CONN_MAX_AGE = "POSTGRES_CONN_MAX_AGE"
779
560
  POSTGRES_PRE_PING = "POSTGRES_PRE_PING"
@@ -795,54 +576,61 @@ class Factory(InfrastructureFactory):
795
576
  super().__init__(env)
796
577
  dbname = self.env.get(self.POSTGRES_DBNAME)
797
578
  if dbname is None:
798
- raise EnvironmentError(
579
+ msg = (
799
580
  "Postgres database name not found "
800
581
  "in environment with key "
801
582
  f"'{self.POSTGRES_DBNAME}'"
802
583
  )
584
+ raise OSError(msg)
803
585
 
804
586
  host = self.env.get(self.POSTGRES_HOST)
805
587
  if host is None:
806
- raise EnvironmentError(
588
+ msg = (
807
589
  "Postgres host not found "
808
590
  "in environment with key "
809
591
  f"'{self.POSTGRES_HOST}'"
810
592
  )
593
+ raise OSError(msg)
811
594
 
812
595
  port = self.env.get(self.POSTGRES_PORT) or "5432"
813
596
 
814
597
  user = self.env.get(self.POSTGRES_USER)
815
598
  if user is None:
816
- raise EnvironmentError(
599
+ msg = (
817
600
  "Postgres user not found "
818
601
  "in environment with key "
819
602
  f"'{self.POSTGRES_USER}'"
820
603
  )
604
+ raise OSError(msg)
605
+
606
+ get_password_func = None
607
+ get_password_topic = self.env.get(self.POSTGRES_GET_PASSWORD_TOPIC)
608
+ if not get_password_topic:
609
+ password = self.env.get(self.POSTGRES_PASSWORD)
610
+ if password is None:
611
+ msg = (
612
+ "Postgres password not found "
613
+ "in environment with key "
614
+ f"'{self.POSTGRES_PASSWORD}'"
615
+ )
616
+ raise OSError(msg)
617
+ else:
618
+ get_password_func = resolve_topic(get_password_topic)
619
+ password = ""
821
620
 
822
- password = self.env.get(self.POSTGRES_PASSWORD)
823
- if password is None:
824
- raise EnvironmentError(
825
- "Postgres password not found "
826
- "in environment with key "
827
- f"'{self.POSTGRES_PASSWORD}'"
828
- )
829
-
830
- connect_timeout: Optional[int]
621
+ connect_timeout = 5
831
622
  connect_timeout_str = self.env.get(self.POSTGRES_CONNECT_TIMEOUT)
832
- if connect_timeout_str is None:
833
- connect_timeout = 5
834
- elif connect_timeout_str == "":
835
- connect_timeout = 5
836
- else:
623
+ if connect_timeout_str:
837
624
  try:
838
625
  connect_timeout = int(connect_timeout_str)
839
626
  except ValueError:
840
- raise EnvironmentError(
841
- f"Postgres environment value for key "
627
+ msg = (
628
+ "Postgres environment value for key "
842
629
  f"'{self.POSTGRES_CONNECT_TIMEOUT}' is invalid. "
843
- f"If set, an integer or empty string is expected: "
630
+ "If set, an integer or empty string is expected: "
844
631
  f"'{connect_timeout_str}'"
845
632
  )
633
+ raise OSError(msg) from None
846
634
 
847
635
  idle_in_transaction_session_timeout_str = (
848
636
  self.env.get(self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT) or "5"
@@ -853,80 +641,69 @@ class Factory(InfrastructureFactory):
853
641
  idle_in_transaction_session_timeout_str
854
642
  )
855
643
  except ValueError:
856
- raise EnvironmentError(
857
- f"Postgres environment value for key "
644
+ msg = (
645
+ "Postgres environment value for key "
858
646
  f"'{self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT}' is invalid. "
859
- f"If set, an integer or empty string is expected: "
647
+ "If set, an integer or empty string is expected: "
860
648
  f"'{idle_in_transaction_session_timeout_str}'"
861
649
  )
650
+ raise OSError(msg) from None
862
651
 
863
- pool_size: Optional[int]
652
+ pool_size = 5
864
653
  pool_size_str = self.env.get(self.POSTGRES_POOL_SIZE)
865
- if pool_size_str is None:
866
- pool_size = 5
867
- elif pool_size_str == "":
868
- pool_size = 5
869
- else:
654
+ if pool_size_str:
870
655
  try:
871
656
  pool_size = int(pool_size_str)
872
657
  except ValueError:
873
- raise EnvironmentError(
874
- f"Postgres environment value for key "
658
+ msg = (
659
+ "Postgres environment value for key "
875
660
  f"'{self.POSTGRES_POOL_SIZE}' is invalid. "
876
- f"If set, an integer or empty string is expected: "
661
+ "If set, an integer or empty string is expected: "
877
662
  f"'{pool_size_str}'"
878
663
  )
664
+ raise OSError(msg) from None
879
665
 
880
- pool_max_overflow: Optional[int]
666
+ pool_max_overflow = 10
881
667
  pool_max_overflow_str = self.env.get(self.POSTGRES_POOL_MAX_OVERFLOW)
882
- if pool_max_overflow_str is None:
883
- pool_max_overflow = 10
884
- elif pool_max_overflow_str == "":
885
- pool_max_overflow = 10
886
- else:
668
+ if pool_max_overflow_str:
887
669
  try:
888
670
  pool_max_overflow = int(pool_max_overflow_str)
889
671
  except ValueError:
890
- raise EnvironmentError(
891
- f"Postgres environment value for key "
672
+ msg = (
673
+ "Postgres environment value for key "
892
674
  f"'{self.POSTGRES_POOL_MAX_OVERFLOW}' is invalid. "
893
- f"If set, an integer or empty string is expected: "
675
+ "If set, an integer or empty string is expected: "
894
676
  f"'{pool_max_overflow_str}'"
895
677
  )
678
+ raise OSError(msg) from None
896
679
 
897
- pool_timeout: Optional[float]
680
+ pool_timeout = 30.0
898
681
  pool_timeout_str = self.env.get(self.POSTGRES_POOL_TIMEOUT)
899
- if pool_timeout_str is None:
900
- pool_timeout = 30
901
- elif pool_timeout_str == "":
902
- pool_timeout = 30
903
- else:
682
+ if pool_timeout_str:
904
683
  try:
905
684
  pool_timeout = float(pool_timeout_str)
906
685
  except ValueError:
907
- raise EnvironmentError(
908
- f"Postgres environment value for key "
686
+ msg = (
687
+ "Postgres environment value for key "
909
688
  f"'{self.POSTGRES_POOL_TIMEOUT}' is invalid. "
910
- f"If set, a float or empty string is expected: "
689
+ "If set, a float or empty string is expected: "
911
690
  f"'{pool_timeout_str}'"
912
691
  )
692
+ raise OSError(msg) from None
913
693
 
914
- conn_max_age: Optional[float]
694
+ conn_max_age = 60 * 60.0
915
695
  conn_max_age_str = self.env.get(self.POSTGRES_CONN_MAX_AGE)
916
- if conn_max_age_str is None:
917
- conn_max_age = None
918
- elif conn_max_age_str == "":
919
- conn_max_age = None
920
- else:
696
+ if conn_max_age_str:
921
697
  try:
922
698
  conn_max_age = float(conn_max_age_str)
923
699
  except ValueError:
924
- raise EnvironmentError(
925
- f"Postgres environment value for key "
700
+ msg = (
701
+ "Postgres environment value for key "
926
702
  f"'{self.POSTGRES_CONN_MAX_AGE}' is invalid. "
927
- f"If set, a float or empty string is expected: "
703
+ "If set, a float or empty string is expected: "
928
704
  f"'{conn_max_age_str}'"
929
705
  )
706
+ raise OSError(msg) from None
930
707
 
931
708
  pre_ping = strtobool(self.env.get(self.POSTGRES_PRE_PING) or "no")
932
709
 
@@ -935,12 +712,13 @@ class Factory(InfrastructureFactory):
935
712
  try:
936
713
  lock_timeout = int(lock_timeout_str)
937
714
  except ValueError:
938
- raise EnvironmentError(
939
- f"Postgres environment value for key "
715
+ msg = (
716
+ "Postgres environment value for key "
940
717
  f"'{self.POSTGRES_LOCK_TIMEOUT}' is invalid. "
941
- f"If set, an integer or empty string is expected: "
718
+ "If set, an integer or empty string is expected: "
942
719
  f"'{lock_timeout_str}'"
943
720
  )
721
+ raise OSError(msg) from None
944
722
 
945
723
  schema = self.env.get(self.POSTGRES_SCHEMA) or ""
946
724
 
@@ -950,6 +728,7 @@ class Factory(InfrastructureFactory):
950
728
  port=port,
951
729
  user=user,
952
730
  password=password,
731
+ get_password_func=get_password_func,
953
732
  connect_timeout=connect_timeout,
954
733
  idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
955
734
  pool_size=pool_size,
@@ -1008,4 +787,8 @@ class Factory(InfrastructureFactory):
1008
787
  return strtobool(self.env.get(self.CREATE_TABLE) or "yes")
1009
788
 
1010
789
  def close(self) -> None:
1011
- self.datastore.close()
790
+ if hasattr(self, "datastore"):
791
+ self.datastore.close()
792
+
793
+ def __del__(self) -> None:
794
+ self.close()