eventsourcing 9.2.22__py3-none-any.whl → 9.3.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

Files changed (144) hide show
  1. eventsourcing/__init__.py +1 -1
  2. eventsourcing/application.py +116 -135
  3. eventsourcing/cipher.py +15 -12
  4. eventsourcing/dispatch.py +31 -91
  5. eventsourcing/domain.py +220 -226
  6. eventsourcing/examples/__init__.py +0 -0
  7. eventsourcing/examples/aggregate1/__init__.py +0 -0
  8. eventsourcing/examples/aggregate1/application.py +27 -0
  9. eventsourcing/examples/aggregate1/domainmodel.py +16 -0
  10. eventsourcing/examples/aggregate1/test_application.py +37 -0
  11. eventsourcing/examples/aggregate2/__init__.py +0 -0
  12. eventsourcing/examples/aggregate2/application.py +27 -0
  13. eventsourcing/examples/aggregate2/domainmodel.py +22 -0
  14. eventsourcing/examples/aggregate2/test_application.py +37 -0
  15. eventsourcing/examples/aggregate3/__init__.py +0 -0
  16. eventsourcing/examples/aggregate3/application.py +27 -0
  17. eventsourcing/examples/aggregate3/domainmodel.py +38 -0
  18. eventsourcing/examples/aggregate3/test_application.py +37 -0
  19. eventsourcing/examples/aggregate4/__init__.py +0 -0
  20. eventsourcing/examples/aggregate4/application.py +27 -0
  21. eventsourcing/examples/aggregate4/domainmodel.py +114 -0
  22. eventsourcing/examples/aggregate4/test_application.py +38 -0
  23. eventsourcing/examples/aggregate5/__init__.py +0 -0
  24. eventsourcing/examples/aggregate5/application.py +27 -0
  25. eventsourcing/examples/aggregate5/domainmodel.py +131 -0
  26. eventsourcing/examples/aggregate5/test_application.py +38 -0
  27. eventsourcing/examples/aggregate6/__init__.py +0 -0
  28. eventsourcing/examples/aggregate6/application.py +30 -0
  29. eventsourcing/examples/aggregate6/domainmodel.py +123 -0
  30. eventsourcing/examples/aggregate6/test_application.py +38 -0
  31. eventsourcing/examples/aggregate6a/__init__.py +0 -0
  32. eventsourcing/examples/aggregate6a/application.py +40 -0
  33. eventsourcing/examples/aggregate6a/domainmodel.py +149 -0
  34. eventsourcing/examples/aggregate6a/test_application.py +45 -0
  35. eventsourcing/examples/aggregate7/__init__.py +0 -0
  36. eventsourcing/examples/aggregate7/application.py +48 -0
  37. eventsourcing/examples/aggregate7/domainmodel.py +144 -0
  38. eventsourcing/examples/aggregate7/persistence.py +57 -0
  39. eventsourcing/examples/aggregate7/test_application.py +38 -0
  40. eventsourcing/examples/aggregate7/test_compression_and_encryption.py +45 -0
  41. eventsourcing/examples/aggregate7/test_snapshotting_intervals.py +67 -0
  42. eventsourcing/examples/aggregate7a/__init__.py +0 -0
  43. eventsourcing/examples/aggregate7a/application.py +56 -0
  44. eventsourcing/examples/aggregate7a/domainmodel.py +170 -0
  45. eventsourcing/examples/aggregate7a/test_application.py +46 -0
  46. eventsourcing/examples/aggregate7a/test_compression_and_encryption.py +45 -0
  47. eventsourcing/examples/aggregate8/__init__.py +0 -0
  48. eventsourcing/examples/aggregate8/application.py +47 -0
  49. eventsourcing/examples/aggregate8/domainmodel.py +65 -0
  50. eventsourcing/examples/aggregate8/persistence.py +57 -0
  51. eventsourcing/examples/aggregate8/test_application.py +37 -0
  52. eventsourcing/examples/aggregate8/test_compression_and_encryption.py +44 -0
  53. eventsourcing/examples/aggregate8/test_snapshotting_intervals.py +38 -0
  54. eventsourcing/examples/bankaccounts/__init__.py +0 -0
  55. eventsourcing/examples/bankaccounts/application.py +70 -0
  56. eventsourcing/examples/bankaccounts/domainmodel.py +56 -0
  57. eventsourcing/examples/bankaccounts/test.py +173 -0
  58. eventsourcing/examples/cargoshipping/__init__.py +0 -0
  59. eventsourcing/examples/cargoshipping/application.py +126 -0
  60. eventsourcing/examples/cargoshipping/domainmodel.py +330 -0
  61. eventsourcing/examples/cargoshipping/interface.py +143 -0
  62. eventsourcing/examples/cargoshipping/test.py +231 -0
  63. eventsourcing/examples/contentmanagement/__init__.py +0 -0
  64. eventsourcing/examples/contentmanagement/application.py +118 -0
  65. eventsourcing/examples/contentmanagement/domainmodel.py +69 -0
  66. eventsourcing/examples/contentmanagement/test.py +180 -0
  67. eventsourcing/examples/contentmanagement/utils.py +26 -0
  68. eventsourcing/examples/contentmanagementsystem/__init__.py +0 -0
  69. eventsourcing/examples/contentmanagementsystem/application.py +54 -0
  70. eventsourcing/examples/contentmanagementsystem/postgres.py +17 -0
  71. eventsourcing/examples/contentmanagementsystem/sqlite.py +17 -0
  72. eventsourcing/examples/contentmanagementsystem/system.py +14 -0
  73. eventsourcing/examples/contentmanagementsystem/test_system.py +180 -0
  74. eventsourcing/examples/searchablecontent/__init__.py +0 -0
  75. eventsourcing/examples/searchablecontent/application.py +45 -0
  76. eventsourcing/examples/searchablecontent/persistence.py +23 -0
  77. eventsourcing/examples/searchablecontent/postgres.py +118 -0
  78. eventsourcing/examples/searchablecontent/sqlite.py +136 -0
  79. eventsourcing/examples/searchablecontent/test_application.py +110 -0
  80. eventsourcing/examples/searchablecontent/test_recorder.py +68 -0
  81. eventsourcing/examples/searchabletimestamps/__init__.py +0 -0
  82. eventsourcing/examples/searchabletimestamps/application.py +32 -0
  83. eventsourcing/examples/searchabletimestamps/persistence.py +20 -0
  84. eventsourcing/examples/searchabletimestamps/postgres.py +110 -0
  85. eventsourcing/examples/searchabletimestamps/sqlite.py +99 -0
  86. eventsourcing/examples/searchabletimestamps/test_searchabletimestamps.py +94 -0
  87. eventsourcing/examples/test_invoice.py +176 -0
  88. eventsourcing/examples/test_parking_lot.py +206 -0
  89. eventsourcing/interface.py +2 -2
  90. eventsourcing/persistence.py +85 -81
  91. eventsourcing/popo.py +30 -31
  92. eventsourcing/postgres.py +379 -590
  93. eventsourcing/sqlite.py +91 -99
  94. eventsourcing/system.py +52 -57
  95. eventsourcing/tests/application.py +20 -32
  96. eventsourcing/tests/application_tests/__init__.py +0 -0
  97. eventsourcing/tests/application_tests/test_application_with_automatic_snapshotting.py +55 -0
  98. eventsourcing/tests/application_tests/test_application_with_popo.py +22 -0
  99. eventsourcing/tests/application_tests/test_application_with_postgres.py +75 -0
  100. eventsourcing/tests/application_tests/test_application_with_sqlite.py +72 -0
  101. eventsourcing/tests/application_tests/test_cache.py +134 -0
  102. eventsourcing/tests/application_tests/test_event_sourced_log.py +162 -0
  103. eventsourcing/tests/application_tests/test_notificationlog.py +232 -0
  104. eventsourcing/tests/application_tests/test_notificationlogreader.py +126 -0
  105. eventsourcing/tests/application_tests/test_processapplication.py +110 -0
  106. eventsourcing/tests/application_tests/test_processingpolicy.py +109 -0
  107. eventsourcing/tests/application_tests/test_repository.py +504 -0
  108. eventsourcing/tests/application_tests/test_snapshotting.py +68 -0
  109. eventsourcing/tests/application_tests/test_upcasting.py +459 -0
  110. eventsourcing/tests/docs_tests/__init__.py +0 -0
  111. eventsourcing/tests/docs_tests/test_docs.py +293 -0
  112. eventsourcing/tests/domain.py +1 -1
  113. eventsourcing/tests/domain_tests/__init__.py +0 -0
  114. eventsourcing/tests/domain_tests/test_aggregate.py +1180 -0
  115. eventsourcing/tests/domain_tests/test_aggregate_decorators.py +1604 -0
  116. eventsourcing/tests/domain_tests/test_domainevent.py +80 -0
  117. eventsourcing/tests/interface_tests/__init__.py +0 -0
  118. eventsourcing/tests/interface_tests/test_remotenotificationlog.py +258 -0
  119. eventsourcing/tests/persistence.py +52 -50
  120. eventsourcing/tests/persistence_tests/__init__.py +0 -0
  121. eventsourcing/tests/persistence_tests/test_aes.py +93 -0
  122. eventsourcing/tests/persistence_tests/test_connection_pool.py +722 -0
  123. eventsourcing/tests/persistence_tests/test_eventstore.py +72 -0
  124. eventsourcing/tests/persistence_tests/test_infrastructure_factory.py +21 -0
  125. eventsourcing/tests/persistence_tests/test_mapper.py +113 -0
  126. eventsourcing/tests/persistence_tests/test_noninterleaving_notification_ids.py +69 -0
  127. eventsourcing/tests/persistence_tests/test_popo.py +124 -0
  128. eventsourcing/tests/persistence_tests/test_postgres.py +1119 -0
  129. eventsourcing/tests/persistence_tests/test_sqlite.py +348 -0
  130. eventsourcing/tests/persistence_tests/test_transcoder.py +44 -0
  131. eventsourcing/tests/postgres_utils.py +7 -7
  132. eventsourcing/tests/system_tests/__init__.py +0 -0
  133. eventsourcing/tests/system_tests/test_runner.py +935 -0
  134. eventsourcing/tests/system_tests/test_system.py +284 -0
  135. eventsourcing/tests/utils_tests/__init__.py +0 -0
  136. eventsourcing/tests/utils_tests/test_utils.py +226 -0
  137. eventsourcing/utils.py +47 -50
  138. {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0.dist-info}/METADATA +29 -79
  139. eventsourcing-9.3.0.dist-info/RECORD +145 -0
  140. {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0.dist-info}/WHEEL +1 -2
  141. eventsourcing-9.2.22.dist-info/RECORD +0 -25
  142. eventsourcing-9.2.22.dist-info/top_level.txt +0 -1
  143. {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0.dist-info}/AUTHORS +0 -0
  144. {eventsourcing-9.2.22.dist-info → eventsourcing-9.3.0.dist-info}/LICENSE +0 -0
eventsourcing/postgres.py CHANGED
@@ -1,35 +1,19 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import logging
3
4
  from contextlib import contextmanager
4
- from itertools import chain
5
- from threading import Lock
6
- from types import TracebackType
7
- from typing import (
8
- Any,
9
- Dict,
10
- Iterator,
11
- List,
12
- Optional,
13
- Sequence,
14
- Set,
15
- Tuple,
16
- Type,
17
- Union,
18
- )
19
- from uuid import NAMESPACE_URL, UUID, uuid5
5
+ from typing import TYPE_CHECKING, Any, Callable, Iterator, List, Sequence
20
6
 
21
- import psycopg2
22
- import psycopg2.errors
23
- import psycopg2.extras
24
- from psycopg2.errorcodes import DUPLICATE_PREPARED_STATEMENT
25
- from psycopg2.extensions import connection, cursor
7
+ import psycopg
8
+ import psycopg.errors
9
+ import psycopg_pool
10
+ from psycopg import Connection, Cursor
11
+ from psycopg.rows import DictRow, dict_row
12
+ from typing_extensions import Self
26
13
 
27
14
  from eventsourcing.persistence import (
28
15
  AggregateRecorder,
29
16
  ApplicationRecorder,
30
- Connection,
31
- ConnectionPool,
32
- Cursor,
33
17
  DatabaseError,
34
18
  DataError,
35
19
  InfrastructureFactory,
@@ -45,74 +29,32 @@ from eventsourcing.persistence import (
45
29
  StoredEvent,
46
30
  Tracking,
47
31
  )
48
- from eventsourcing.utils import Environment, retry, strtobool
49
-
50
- psycopg2.extras.register_uuid()
51
-
52
-
53
- class PostgresCursor(Cursor):
54
- def __init__(self, pg_cursor: cursor):
55
- self.pg_cursor = pg_cursor
56
-
57
- def __enter__(self, *args: Any, **kwargs: Any) -> "PostgresCursor":
58
- self.pg_cursor.__enter__(*args, **kwargs)
59
- return self
60
-
61
- def __exit__(self, *args: Any, **kwargs: Any) -> None:
62
- return self.pg_cursor.__exit__(*args, **kwargs)
63
-
64
- def mogrify(self, statement: str, params: Any = None) -> bytes:
65
- return self.pg_cursor.mogrify(statement, vars=params)
66
-
67
- def execute(self, statement: Union[str, bytes], params: Any = None) -> None:
68
- self.pg_cursor.execute(query=statement, vars=params)
69
-
70
- def fetchall(self) -> Any:
71
- return self.pg_cursor.fetchall()
72
-
73
- def fetchone(self) -> Any:
74
- return self.pg_cursor.fetchone()
32
+ from eventsourcing.utils import Environment, resolve_topic, retry, strtobool
75
33
 
76
- @property
77
- def closed(self) -> bool:
78
- return self.pg_cursor.closed
34
+ if TYPE_CHECKING: # pragma: nocover
35
+ from uuid import UUID
79
36
 
37
+ logging.getLogger("psycopg.pool").setLevel(logging.CRITICAL)
38
+ logging.getLogger("psycopg").setLevel(logging.CRITICAL)
80
39
 
81
- class PostgresConnection(Connection[PostgresCursor]):
82
- def __init__(self, pg_conn: connection, max_age: Optional[float]):
83
- super().__init__(max_age=max_age)
84
- self._pg_conn = pg_conn
85
- self.is_prepared: Set[str] = set()
86
40
 
87
- @contextmanager
88
- def transaction(self, commit: bool) -> Iterator[PostgresCursor]:
89
- # Context managed transaction.
90
- with PostgresTransaction(self, commit) as curs:
91
- # Context managed cursor.
92
- with curs:
93
- yield curs
94
-
95
- def cursor(self) -> PostgresCursor:
96
- return PostgresCursor(
97
- self._pg_conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
98
- )
99
-
100
- def rollback(self) -> None:
101
- self._pg_conn.rollback()
102
-
103
- def commit(self) -> None:
104
- self._pg_conn.commit()
105
-
106
- def _close(self) -> None:
107
- self._pg_conn.close()
108
- super()._close()
41
+ class ConnectionPool(psycopg_pool.ConnectionPool[Any]):
42
+ def __init__(
43
+ self,
44
+ *args: Any,
45
+ get_password_func: Callable[[], str] | None = None,
46
+ **kwargs: Any,
47
+ ) -> None:
48
+ self.get_password_func = get_password_func
49
+ super().__init__(*args, **kwargs)
109
50
 
110
- @property
111
- def closed(self) -> bool:
112
- return bool(self._pg_conn.closed)
51
+ def _connect(self, timeout: float | None = None) -> Connection[Any]:
52
+ if self.get_password_func:
53
+ self.kwargs["password"] = self.get_password_func()
54
+ return super()._connect(timeout=timeout)
113
55
 
114
56
 
115
- class PostgresConnectionPool(ConnectionPool[PostgresConnection]):
57
+ class PostgresDatastore:
116
58
  def __init__(
117
59
  self,
118
60
  dbname: str,
@@ -120,152 +62,93 @@ class PostgresConnectionPool(ConnectionPool[PostgresConnection]):
120
62
  port: str,
121
63
  user: str,
122
64
  password: str,
123
- connect_timeout: int = 5,
65
+ *,
66
+ connect_timeout: int = 30,
124
67
  idle_in_transaction_session_timeout: int = 0,
125
- pool_size: int = 1,
126
- max_overflow: int = 0,
127
- pool_timeout: float = 5.0,
128
- max_age: Optional[float] = None,
68
+ pool_size: int = 2,
69
+ max_overflow: int = 2,
70
+ max_waiting: int = 0,
71
+ conn_max_age: float = 60 * 60.0,
129
72
  pre_ping: bool = False,
73
+ lock_timeout: int = 0,
74
+ schema: str = "",
75
+ pool_open_timeout: int | None = None,
76
+ get_password_func: Callable[[], str] | None = None,
130
77
  ):
131
- self.dbname = dbname
132
- self.host = host
133
- self.port = port
134
- self.user = user
135
- self.password = password
136
- self.connect_timeout = connect_timeout
137
78
  self.idle_in_transaction_session_timeout = idle_in_transaction_session_timeout
138
- super().__init__(
139
- pool_size=pool_size,
140
- max_overflow=max_overflow,
141
- pool_timeout=pool_timeout,
142
- max_age=max_age,
143
- pre_ping=pre_ping,
144
- mutually_exclusive_read_write=False,
79
+ self.pre_ping = pre_ping
80
+ self.pool_open_timeout = pool_open_timeout
81
+
82
+ check = ConnectionPool.check_connection if pre_ping else None
83
+ self.pool = ConnectionPool(
84
+ get_password_func=get_password_func,
85
+ connection_class=Connection[DictRow],
86
+ kwargs={
87
+ "dbname": dbname,
88
+ "host": host,
89
+ "port": port,
90
+ "user": user,
91
+ "password": password,
92
+ "row_factory": dict_row,
93
+ },
94
+ min_size=pool_size,
95
+ max_size=pool_size + max_overflow,
96
+ open=False,
97
+ configure=self.after_connect,
98
+ timeout=connect_timeout,
99
+ max_waiting=max_waiting,
100
+ max_lifetime=conn_max_age,
101
+ check=check,
145
102
  )
103
+ self.lock_timeout = lock_timeout
104
+ self.schema = schema.strip()
146
105
 
147
- def _create_connection(self) -> PostgresConnection:
148
- # Make a connection to a database.
149
- try:
150
- pg_conn = psycopg2.connect(
151
- dbname=self.dbname,
152
- host=self.host,
153
- port=self.port,
154
- user=self.user,
155
- password=self.password,
156
- connect_timeout=self.connect_timeout,
157
- )
158
- except psycopg2.OperationalError as e:
159
- raise OperationalError(e) from e
160
- pg_conn.cursor().execute(
161
- f"SET idle_in_transaction_session_timeout = "
106
+ def after_connect(self, conn: Connection[DictRow]) -> None:
107
+ conn.autocommit = True
108
+ conn.cursor().execute(
109
+ "SET idle_in_transaction_session_timeout = "
162
110
  f"'{self.idle_in_transaction_session_timeout}s'"
163
111
  )
164
- return PostgresConnection(pg_conn, max_age=self.max_age)
165
-
166
-
167
- class PostgresTransaction:
168
- def __init__(self, conn: PostgresConnection, commit: bool):
169
- self.conn = conn
170
- self.commit = commit
171
- self.has_entered = False
172
112
 
173
- def __enter__(self) -> PostgresCursor:
174
- self.has_entered = True
175
- return self.conn.cursor()
176
-
177
- def __exit__(
178
- self,
179
- exc_type: Type[BaseException],
180
- exc_val: BaseException,
181
- exc_tb: TracebackType,
182
- ) -> None:
113
+ @contextmanager
114
+ def get_connection(self) -> Iterator[Connection[DictRow]]:
183
115
  try:
184
- if exc_val:
185
- self.conn.rollback()
186
- raise exc_val
187
- elif not self.commit:
188
- self.conn.rollback()
189
- else:
190
- self.conn.commit()
191
- except psycopg2.InterfaceError as e:
192
- self.conn.close()
116
+ wait = self.pool_open_timeout is not None
117
+ timeout = self.pool_open_timeout or 30.0
118
+ self.pool.open(wait, timeout)
119
+
120
+ with self.pool.connection() as conn:
121
+ yield conn
122
+ except psycopg.InterfaceError as e:
123
+ # conn.close()
193
124
  raise InterfaceError(str(e)) from e
194
- except psycopg2.DataError as e:
195
- raise DataError(str(e)) from e
196
- except psycopg2.OperationalError as e:
197
- self.conn.close()
125
+ except psycopg.OperationalError as e:
126
+ # conn.close()
198
127
  raise OperationalError(str(e)) from e
199
- except psycopg2.IntegrityError as e:
128
+ except psycopg.DataError as e:
129
+ raise DataError(str(e)) from e
130
+ except psycopg.IntegrityError as e:
200
131
  raise IntegrityError(str(e)) from e
201
- except psycopg2.InternalError as e:
132
+ except psycopg.InternalError as e:
202
133
  raise InternalError(str(e)) from e
203
- except psycopg2.ProgrammingError as e:
134
+ except psycopg.ProgrammingError as e:
204
135
  raise ProgrammingError(str(e)) from e
205
- except psycopg2.NotSupportedError as e:
136
+ except psycopg.NotSupportedError as e:
206
137
  raise NotSupportedError(str(e)) from e
207
- except psycopg2.DatabaseError as e:
138
+ except psycopg.DatabaseError as e:
208
139
  raise DatabaseError(str(e)) from e
209
- except psycopg2.Error as e:
140
+ except psycopg.Error as e:
141
+ # conn.close()
210
142
  raise PersistenceError(str(e)) from e
211
-
212
-
213
- class PostgresDatastore:
214
- def __init__(
215
- self,
216
- dbname: str,
217
- host: str,
218
- port: str,
219
- user: str,
220
- password: str,
221
- connect_timeout: int = 5,
222
- idle_in_transaction_session_timeout: int = 0,
223
- pool_size: int = 2,
224
- max_overflow: int = 2,
225
- pool_timeout: float = 5.0,
226
- conn_max_age: Optional[float] = None,
227
- pre_ping: bool = False,
228
- lock_timeout: int = 0,
229
- schema: str = "",
230
- ):
231
- self.pool = PostgresConnectionPool(
232
- dbname=dbname,
233
- host=host,
234
- port=port,
235
- user=user,
236
- password=password,
237
- connect_timeout=connect_timeout,
238
- idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
239
- pool_size=pool_size,
240
- max_overflow=max_overflow,
241
- pool_timeout=pool_timeout,
242
- max_age=conn_max_age,
243
- pre_ping=pre_ping,
244
- )
245
- self.lock_timeout = lock_timeout
246
- self.schema = schema.strip()
247
-
248
- @contextmanager
249
- def transaction(self, commit: bool) -> Iterator[PostgresCursor]:
250
- with self.get_connection() as conn:
251
- with conn.transaction(commit) as curs:
252
- yield curs
143
+ except Exception:
144
+ # conn.close()
145
+ raise
253
146
 
254
147
  @contextmanager
255
- def get_connection(self) -> Iterator[PostgresConnection]:
256
- conn = self.pool.get_connection()
257
- try:
258
- yield conn
259
- finally:
260
- self.pool.put_connection(conn)
261
-
262
- def report_on_prepared_statements(
263
- self,
264
- ) -> Tuple[List[List[Union[bool, str]]], List[str]]:
265
- with self.get_connection() as conn:
266
- with conn.cursor() as curs:
267
- curs.execute("SELECT * from pg_prepared_statements")
268
- return sorted(curs.fetchall()), sorted(conn.is_prepared)
148
+ def transaction(self, *, commit: bool = False) -> Iterator[Cursor[DictRow]]:
149
+ conn: Connection[DictRow]
150
+ with self.get_connection() as conn, conn.transaction(force_rollback=not commit):
151
+ yield conn.cursor()
269
152
 
270
153
  def close(self) -> None:
271
154
  self.pool.close()
@@ -273,8 +156,11 @@ class PostgresDatastore:
273
156
  def __del__(self) -> None:
274
157
  self.close()
275
158
 
159
+ def __enter__(self) -> Self:
160
+ return self
276
161
 
277
- PG_IDENTIFIER_MAX_LEN = 63
162
+ def __exit__(self, *args: object, **kwargs: Any) -> None:
163
+ self.close()
278
164
 
279
165
 
280
166
  class PostgresAggregateRecorder(AggregateRecorder):
@@ -283,8 +169,6 @@ class PostgresAggregateRecorder(AggregateRecorder):
283
169
  datastore: PostgresDatastore,
284
170
  events_table_name: str,
285
171
  ):
286
- self.statement_name_aliases: Dict[str, str] = {}
287
- self.statement_name_aliases_lock = Lock()
288
172
  self.check_table_name_length(events_table_name, datastore.schema)
289
173
  self.datastore = datastore
290
174
  self.events_table_name = events_table_name
@@ -300,15 +184,12 @@ class PostgresAggregateRecorder(AggregateRecorder):
300
184
 
301
185
  self.create_table_statements = self.construct_create_table_statements()
302
186
  self.insert_events_statement = (
303
- f"INSERT INTO {self.events_table_name} VALUES ($1, $2, $3, $4)"
304
- )
305
- self.insert_events_statement_name = f"insert_{events_table_name}".replace(
306
- ".", "_"
187
+ f"INSERT INTO {self.events_table_name} VALUES (%s, %s, %s, %s)"
307
188
  )
308
189
  self.select_events_statement = (
309
- f"SELECT * FROM {self.events_table_name} WHERE originator_id = $1"
190
+ f"SELECT * FROM {self.events_table_name} WHERE originator_id = %s"
310
191
  )
311
- self.lock_statements: List[str] = []
192
+ self.lock_table_statements: List[str] = []
312
193
 
313
194
  @staticmethod
314
195
  def check_table_name_length(table_name: str, schema_name: str) -> None:
@@ -318,47 +199,8 @@ class PostgresAggregateRecorder(AggregateRecorder):
318
199
  else:
319
200
  unqualified_table_name = table_name
320
201
  if len(unqualified_table_name) > 63:
321
- raise ProgrammingError(f"Table name too long: {unqualified_table_name}")
322
-
323
- def get_statement_alias(self, statement_name: str) -> str:
324
- try:
325
- alias = self.statement_name_aliases[statement_name]
326
- except KeyError:
327
- with self.statement_name_aliases_lock:
328
- try:
329
- alias = self.statement_name_aliases[statement_name]
330
- except KeyError:
331
- existing_aliases = self.statement_name_aliases.values()
332
- if (
333
- len(statement_name) <= PG_IDENTIFIER_MAX_LEN
334
- and statement_name not in existing_aliases
335
- ):
336
- alias = statement_name
337
- self.statement_name_aliases[statement_name] = alias
338
- else:
339
- uid = uuid5(
340
- NAMESPACE_URL, f"/statement_names/{statement_name}"
341
- ).hex
342
- alias = uid
343
- for i in range(len(uid)): # pragma: no cover
344
- preserve_end = 21
345
- preserve_start = (
346
- PG_IDENTIFIER_MAX_LEN - preserve_end - i - 2
347
- )
348
- uuid5_tail = i
349
- candidate = (
350
- statement_name[:preserve_start]
351
- + "_"
352
- + (uid[-uuid5_tail:] if i else "")
353
- + "_"
354
- + statement_name[-preserve_end:]
355
- )
356
- assert len(alias) <= PG_IDENTIFIER_MAX_LEN
357
- if candidate not in existing_aliases:
358
- alias = candidate
359
- break
360
- self.statement_name_aliases[statement_name] = alias
361
- return alias
202
+ msg = f"Table name too long: {unqualified_table_name}"
203
+ raise ProgrammingError(msg)
362
204
 
363
205
  def construct_create_table_statements(self) -> List[str]:
364
206
  statement = (
@@ -377,155 +219,121 @@ class PostgresAggregateRecorder(AggregateRecorder):
377
219
  def create_table(self) -> None:
378
220
  with self.datastore.transaction(commit=True) as curs:
379
221
  for statement in self.create_table_statements:
380
- curs.execute(statement)
381
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
222
+ curs.execute(statement, prepare=False)
382
223
 
383
224
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
384
225
  def insert_events(
385
226
  self, stored_events: List[StoredEvent], **kwargs: Any
386
- ) -> Optional[Sequence[int]]:
227
+ ) -> Sequence[int] | None:
228
+ conn: Connection[DictRow]
229
+ exc: Exception | None = None
230
+ notification_ids: Sequence[int] | None = None
387
231
  with self.datastore.get_connection() as conn:
388
- self._prepare_insert_events(conn)
389
- with conn.transaction(commit=True) as curs:
390
- return self._insert_events(curs, stored_events, **kwargs)
391
-
392
- def _prepare_insert_events(self, conn: PostgresConnection) -> None:
393
- self._prepare(
394
- conn,
395
- self.insert_events_statement_name,
396
- self.insert_events_statement,
397
- )
398
-
399
- def _prepare(
400
- self, conn: PostgresConnection, statement_name: str, statement: str
401
- ) -> str:
402
- statement_name_alias = self.get_statement_alias(statement_name)
403
- if statement_name not in conn.is_prepared:
404
- curs: PostgresCursor
405
- with conn.transaction(commit=True) as curs:
406
- try:
407
- lock_timeout = self.datastore.lock_timeout
408
- curs.execute(f"SET LOCAL lock_timeout = '{lock_timeout}s'")
409
- curs.execute(f"PREPARE {statement_name_alias} AS " + statement)
410
- except psycopg2.errors.lookup(DUPLICATE_PREPARED_STATEMENT): # noqa
411
- pass
412
- conn.is_prepared.add(statement_name)
413
- return statement_name_alias
232
+ with conn.pipeline() as pipeline, conn.transaction():
233
+ # Do other things first, so they can be pipelined too.
234
+ with conn.cursor() as curs:
235
+ self._insert_events(curs, stored_events, **kwargs)
236
+ # Then use a different cursor for the executemany() call.
237
+ with conn.cursor() as curs:
238
+ try:
239
+ self._insert_stored_events(curs, stored_events, **kwargs)
240
+ # Sync now, so any uniqueness constraint violation causes an
241
+ # IntegrityError to be raised here, rather an InternalError
242
+ # being raised sometime later e.g. when commit() is called.
243
+ pipeline.sync()
244
+ notification_ids = self._fetch_ids_after_insert_events(
245
+ curs, stored_events, **kwargs
246
+ )
247
+ except Exception as e:
248
+ # Avoid psycopg emitting a pipeline warning.
249
+ exc = e
250
+ if exc:
251
+ # Reraise exception after pipeline context manager has exited.
252
+ raise exc
253
+ return notification_ids
414
254
 
415
255
  def _insert_events(
416
256
  self,
417
- c: PostgresCursor,
257
+ c: Cursor[DictRow],
418
258
  stored_events: List[StoredEvent],
419
259
  **kwargs: Any,
420
- ) -> Optional[Sequence[int]]:
421
- # Acquire "EXCLUSIVE" table lock, to serialize inserts so that
422
- # insertion of notification IDs is monotonic for notification log
423
- # readers. We want concurrent transactions to commit inserted
424
- # notification_id values in order, and by locking the table for writes,
425
- # it can be guaranteed. The EXCLUSIVE lock mode does not block
426
- # the ACCESS SHARE lock which is acquired during SELECT statements,
427
- # so the table can be read concurrently. However, INSERT normally
428
- # just acquires ROW EXCLUSIVE locks, which risks interleaving of
429
- # many inserts in one transaction with many insert in another
430
- # transaction. Since one transaction will commit before another,
431
- # the possibility arises for readers that are tailing a notification
432
- # log to miss items inserted later but with lower notification IDs.
433
- # https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
434
- # https://www.postgresql.org/docs/9.1/sql-lock.html
435
- # https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
436
- # -postgresql-serial-column-values-by-commit-order
437
-
438
- len_stored_events = len(stored_events)
260
+ ) -> None:
261
+ pass
439
262
 
263
+ def _insert_stored_events(
264
+ self,
265
+ c: Cursor[DictRow],
266
+ stored_events: List[StoredEvent],
267
+ **_: Any,
268
+ ) -> None:
440
269
  # Only do something if there is something to do.
441
- if len_stored_events > 0:
442
- # Mogrify the table lock statements.
443
- lock_sqls = (c.mogrify(s) for s in self.lock_statements)
444
-
445
- # Prepare the commands before getting the table lock.
446
- alias = self.statement_name_aliases[self.insert_events_statement_name]
447
- page_size = 500
448
- pages = [
449
- (
450
- c.mogrify(
451
- f"EXECUTE {alias}(%s, %s, %s, %s)",
452
- (
453
- stored_event.originator_id,
454
- stored_event.originator_version,
455
- stored_event.topic,
456
- stored_event.state,
457
- ),
270
+ if len(stored_events) > 0:
271
+ self._lock_table(c)
272
+
273
+ # Insert events.
274
+ c.executemany(
275
+ query=self.insert_events_statement,
276
+ params_seq=[
277
+ (
278
+ stored_event.originator_id,
279
+ stored_event.originator_version,
280
+ stored_event.topic,
281
+ stored_event.state,
458
282
  )
459
- for stored_event in page
460
- )
461
- for page in (
462
- stored_events[ndx : min(ndx + page_size, len_stored_events)]
463
- for ndx in range(0, len_stored_events, page_size)
464
- )
465
- ]
466
- commands = [
467
- b"; ".join(page)
468
- for page in chain([chain(lock_sqls, pages[0])], pages[1:])
469
- ]
283
+ for stored_event in stored_events
284
+ ],
285
+ returning="RETURNING" in self.insert_events_statement,
286
+ )
470
287
 
471
- # Execute the commands.
472
- for command in commands:
473
- c.execute(command)
288
+ def _lock_table(self, c: Cursor[DictRow]) -> None:
289
+ pass
290
+
291
+ def _fetch_ids_after_insert_events(
292
+ self,
293
+ c: Cursor[DictRow],
294
+ stored_events: List[StoredEvent],
295
+ **kwargs: Any,
296
+ ) -> Sequence[int] | None:
474
297
  return None
475
298
 
476
299
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
477
300
  def select_events(
478
301
  self,
479
302
  originator_id: UUID,
480
- gt: Optional[int] = None,
481
- lte: Optional[int] = None,
303
+ *,
304
+ gt: int | None = None,
305
+ lte: int | None = None,
482
306
  desc: bool = False,
483
- limit: Optional[int] = None,
307
+ limit: int | None = None,
484
308
  ) -> List[StoredEvent]:
485
- parts = [self.select_events_statement]
309
+ statement = self.select_events_statement
486
310
  params: List[Any] = [originator_id]
487
- statement_name = f"select_{self.events_table_name}".replace(".", "_")
488
311
  if gt is not None:
489
312
  params.append(gt)
490
- parts.append(f"AND originator_version > ${len(params)}")
491
- statement_name += "_gt"
313
+ statement += " AND originator_version > %s"
492
314
  if lte is not None:
493
315
  params.append(lte)
494
- parts.append(f"AND originator_version <= ${len(params)}")
495
- statement_name += "_lte"
496
- parts.append("ORDER BY originator_version")
316
+ statement += " AND originator_version <= %s"
317
+ statement += " ORDER BY originator_version"
497
318
  if desc is False:
498
- parts.append("ASC")
319
+ statement += " ASC"
499
320
  else:
500
- parts.append("DESC")
501
- statement_name += "_desc"
321
+ statement += " DESC"
502
322
  if limit is not None:
503
323
  params.append(limit)
504
- parts.append(f"LIMIT ${len(params)}")
505
- statement_name += "_limit"
506
- statement = " ".join(parts)
507
-
508
- stored_events = []
509
-
510
- with self.datastore.get_connection() as conn:
511
- alias = self._prepare(conn, statement_name, statement)
512
-
513
- with conn.transaction(commit=False) as curs:
514
- curs.execute(
515
- f"EXECUTE {alias}({', '.join(['%s' for _ in params])})",
516
- params,
324
+ statement += " LIMIT %s"
325
+
326
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
327
+ curs.execute(statement, params, prepare=True)
328
+ return [
329
+ StoredEvent(
330
+ originator_id=row["originator_id"],
331
+ originator_version=row["originator_version"],
332
+ topic=row["topic"],
333
+ state=bytes(row["state"]),
517
334
  )
518
- for row in curs.fetchall():
519
- stored_events.append(
520
- StoredEvent(
521
- originator_id=row["originator_id"],
522
- originator_version=row["originator_version"],
523
- topic=row["topic"],
524
- state=bytes(row["state"]),
525
- )
526
- )
527
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
528
- return stored_events
335
+ for row in curs.fetchall()
336
+ ]
529
337
 
530
338
 
531
339
  class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder):
@@ -535,45 +343,42 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
535
343
  events_table_name: str = "stored_events",
536
344
  ):
537
345
  super().__init__(datastore, events_table_name)
538
- self.insert_events_statement = (
539
- f"INSERT INTO {self.events_table_name} VALUES ($1, $2, $3, $4) "
540
- f"RETURNING notification_id"
541
- )
346
+ self.insert_events_statement += " RETURNING notification_id"
542
347
  self.max_notification_id_statement = (
543
348
  f"SELECT MAX(notification_id) FROM {self.events_table_name}"
544
349
  )
545
- self.max_notification_id_statement_name = (
546
- f"max_notification_id_{events_table_name}".replace(".", "_")
547
- )
548
- self.lock_statements = [
350
+ self.lock_table_statements = [
549
351
  f"SET LOCAL lock_timeout = '{self.datastore.lock_timeout}s'",
550
352
  f"LOCK TABLE {self.events_table_name} IN EXCLUSIVE MODE",
551
353
  ]
552
354
 
553
355
  def construct_create_table_statements(self) -> List[str]:
554
- statements = [
555
- "CREATE TABLE IF NOT EXISTS "
556
- f"{self.events_table_name} ("
557
- "originator_id uuid NOT NULL, "
558
- "originator_version bigint NOT NULL, "
559
- "topic text, "
560
- "state bytea, "
561
- "notification_id bigserial, "
562
- "PRIMARY KEY "
563
- "(originator_id, originator_version)) "
564
- "WITH (autovacuum_enabled=false)",
565
- f"CREATE UNIQUE INDEX IF NOT EXISTS "
566
- f"{self.notification_id_index_name}"
567
- f"ON {self.events_table_name} (notification_id ASC);",
356
+ return [
357
+ (
358
+ "CREATE TABLE IF NOT EXISTS "
359
+ f"{self.events_table_name} ("
360
+ "originator_id uuid NOT NULL, "
361
+ "originator_version bigint NOT NULL, "
362
+ "topic text, "
363
+ "state bytea, "
364
+ "notification_id bigserial, "
365
+ "PRIMARY KEY "
366
+ "(originator_id, originator_version)) "
367
+ "WITH (autovacuum_enabled=false)"
368
+ ),
369
+ (
370
+ "CREATE UNIQUE INDEX IF NOT EXISTS "
371
+ f"{self.notification_id_index_name}"
372
+ f"ON {self.events_table_name} (notification_id ASC);"
373
+ ),
568
374
  ]
569
- return statements
570
375
 
571
376
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
572
377
  def select_notifications(
573
378
  self,
574
379
  start: int,
575
380
  limit: int,
576
- stop: Optional[int] = None,
381
+ stop: int | None = None,
577
382
  topics: Sequence[str] = (),
578
383
  ) -> List[Notification]:
579
384
  """
@@ -581,86 +386,91 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
581
386
  from 'start', limited by 'limit'.
582
387
  """
583
388
 
584
- params: List[Union[int, str, Sequence[str]]] = [start]
585
- statement = (
586
- "SELECT * " f"FROM {self.events_table_name} " "WHERE notification_id>=$1 "
587
- )
588
- statement_name = f"select_notifications_{self.events_table_name}".replace(
589
- ".", "_"
590
- )
389
+ params: List[int | str | Sequence[str]] = [start]
390
+ statement = f"SELECT * FROM {self.events_table_name} WHERE notification_id>=%s"
591
391
 
592
392
  if stop is not None:
593
393
  params.append(stop)
594
- statement += f"AND notification_id <= ${len(params)} "
595
- statement_name += "_stop"
394
+ statement += " AND notification_id <= %s"
596
395
 
597
396
  if topics:
598
397
  params.append(topics)
599
- statement += f"AND topic = ANY(${len(params)}) "
600
- statement_name += "_topics"
398
+ statement += " AND topic = ANY(%s)"
601
399
 
602
400
  params.append(limit)
603
- statement += "ORDER BY notification_id " f"LIMIT ${len(params)}"
604
-
605
- notifications = []
606
- with self.datastore.get_connection() as conn:
607
- alias = self._prepare(
608
- conn,
609
- statement_name,
610
- statement,
611
- )
612
- with conn.transaction(commit=False) as curs:
613
- curs.execute(
614
- f"EXECUTE {alias}({', '.join(['%s' for _ in params])})",
615
- params,
401
+ statement += " ORDER BY notification_id LIMIT %s"
402
+
403
+ connection = self.datastore.get_connection()
404
+ with connection as conn, conn.cursor() as curs:
405
+ curs.execute(statement, params, prepare=True)
406
+ return [
407
+ Notification(
408
+ id=row["notification_id"],
409
+ originator_id=row["originator_id"],
410
+ originator_version=row["originator_version"],
411
+ topic=row["topic"],
412
+ state=bytes(row["state"]),
616
413
  )
617
- for row in curs.fetchall():
618
- notifications.append(
619
- Notification(
620
- id=row["notification_id"],
621
- originator_id=row["originator_id"],
622
- originator_version=row["originator_version"],
623
- topic=row["topic"],
624
- state=bytes(row["state"]),
625
- )
626
- )
627
- pass # for Coverage 5.5 bug with CPython 3.10.0rc1
628
- return notifications
414
+ for row in curs.fetchall()
415
+ ]
629
416
 
630
417
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
631
418
  def max_notification_id(self) -> int:
632
419
  """
633
420
  Returns the maximum notification ID.
634
421
  """
635
- statement_name = self.max_notification_id_statement_name
636
- with self.datastore.get_connection() as conn:
637
- statement_alias = self._prepare(
638
- conn, statement_name, self.max_notification_id_statement
639
- )
640
- with conn.transaction(commit=False) as curs:
641
- curs.execute(
642
- f"EXECUTE {statement_alias}",
643
- )
644
- max_id = curs.fetchone()[0] or 0
645
- return max_id
422
+ conn: Connection[DictRow]
423
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
424
+ curs.execute(self.max_notification_id_statement)
425
+ fetchone = curs.fetchone()
426
+ assert fetchone is not None
427
+ return fetchone["max"] or 0
428
+
429
+ def _lock_table(self, c: Cursor[DictRow]) -> None:
430
+ # Acquire "EXCLUSIVE" table lock, to serialize transactions that insert
431
+ # stored events, so that readers don't pass over gaps that are filled in
432
+ # later. We want each transaction that will be issued with notifications
433
+ # IDs by the notification ID sequence to receive all its notification IDs
434
+ # and then commit, before another transaction is issued with any notification
435
+ # IDs. In other words, we want the insert order to be the same as the commit
436
+ # order. We can accomplish this by locking the table for writes. The
437
+ # EXCLUSIVE lock mode does not block SELECT statements, which acquire an
438
+ # ACCESS SHARE lock, so the stored events table can be read concurrently
439
+ # with writes and other reads. However, INSERT statements normally just
440
+ # acquires ROW EXCLUSIVE locks, which risks the interleaving (within the
441
+ # recorded sequence of notification IDs) of stored events from one transaction
442
+ # with those of another transaction. And since one transaction will always
443
+ # commit before another, the possibility arises when using ROW EXCLUSIVE locks
444
+ # for readers that are tailing a notification log to miss items inserted later
445
+ # but issued with lower notification IDs.
446
+ # https://www.postgresql.org/docs/current/explicit-locking.html#LOCKING-TABLES
447
+ # https://www.postgresql.org/docs/9.1/sql-lock.html
448
+ # https://stackoverflow.com/questions/45866187/guarantee-monotonicity-of
449
+ # -postgresql-serial-column-values-by-commit-order
450
+ for lock_statement in self.lock_table_statements:
451
+ c.execute(lock_statement, prepare=True)
646
452
 
647
- def _insert_events(
453
+ def _fetch_ids_after_insert_events(
648
454
  self,
649
- c: PostgresCursor,
455
+ c: Cursor[DictRow],
650
456
  stored_events: List[StoredEvent],
651
457
  **kwargs: Any,
652
- ) -> Optional[Sequence[int]]:
653
- super()._insert_events(c, stored_events, **kwargs)
654
- if stored_events:
655
- last_notification_id = c.fetchone()[0]
656
- notification_ids = list(
657
- range(
658
- last_notification_id - len(stored_events) + 1,
659
- last_notification_id + 1,
660
- )
661
- )
662
- else:
663
- notification_ids = []
458
+ ) -> Sequence[int] | None:
459
+ notification_ids: List[int] = []
460
+ len_events = len(stored_events)
461
+ if len_events:
462
+ if (
463
+ (c.statusmessage == "SET")
464
+ and c.nextset()
465
+ and (c.statusmessage == "LOCK TABLE")
466
+ ):
467
+ while c.nextset() and len(notification_ids) != len_events:
468
+ row = c.fetchone()
469
+ assert row is not None
470
+ notification_ids.append(row["notification_id"])
471
+ if len(notification_ids) != len(stored_events):
472
+ msg = "Couldn't get all notification IDs"
473
+ raise ProgrammingError(msg)
664
474
  return notification_ids
665
475
 
666
476
 
@@ -675,26 +485,17 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
675
485
  self.tracking_table_name = tracking_table_name
676
486
  super().__init__(datastore, events_table_name)
677
487
  self.insert_tracking_statement = (
678
- f"INSERT INTO {self.tracking_table_name} VALUES ($1, $2)"
679
- )
680
- self.insert_tracking_statement_name = f"insert_{tracking_table_name}".replace(
681
- ".", "_"
488
+ f"INSERT INTO {self.tracking_table_name} VALUES (%s, %s)"
682
489
  )
683
490
  self.max_tracking_id_statement = (
684
491
  "SELECT MAX(notification_id) "
685
492
  f"FROM {self.tracking_table_name} "
686
- "WHERE application_name=$1"
493
+ "WHERE application_name=%s"
687
494
  )
688
495
  self.count_tracking_id_statement = (
689
496
  "SELECT COUNT(*) "
690
497
  f"FROM {self.tracking_table_name} "
691
- "WHERE application_name=$1 AND notification_id=$2"
692
- )
693
- self.max_tracking_id_statement_name = (
694
- f"max_tracking_id_{tracking_table_name}".replace(".", "_")
695
- )
696
- self.count_tracking_id_statement_name = (
697
- f"count_tracking_id_{tracking_table_name}".replace(".", "_")
498
+ "WHERE application_name=%s AND notification_id=%s"
698
499
  )
699
500
 
700
501
  def construct_create_table_statements(self) -> List[str]:
@@ -711,61 +512,46 @@ class PostgresProcessRecorder(PostgresApplicationRecorder, ProcessRecorder):
711
512
 
712
513
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
713
514
  def max_tracking_id(self, application_name: str) -> int:
714
- statement_name = self.max_tracking_id_statement_name
715
- with self.datastore.get_connection() as conn:
716
- statement_alias = self._prepare(
717
- conn, statement_name, self.max_tracking_id_statement
515
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
516
+ curs.execute(
517
+ query=self.max_tracking_id_statement,
518
+ params=(application_name,),
519
+ prepare=True,
718
520
  )
719
-
720
- with conn.transaction(commit=False) as curs:
721
- curs.execute(
722
- f"EXECUTE {statement_alias}(%s)",
723
- (application_name,),
724
- )
725
- max_id = curs.fetchone()[0] or 0
726
- return max_id
521
+ fetchone = curs.fetchone()
522
+ assert fetchone is not None
523
+ return fetchone["max"] or 0
727
524
 
728
525
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
729
526
  def has_tracking_id(self, application_name: str, notification_id: int) -> bool:
730
- statement_name = self.count_tracking_id_statement_name
731
- with self.datastore.get_connection() as conn:
732
- statement_alias = self._prepare(
733
- conn, statement_name, self.count_tracking_id_statement
527
+ conn: Connection[DictRow]
528
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
529
+ curs.execute(
530
+ query=self.count_tracking_id_statement,
531
+ params=(application_name, notification_id),
532
+ prepare=True,
734
533
  )
735
-
736
- with conn.transaction(commit=False) as curs:
737
- curs.execute(
738
- f"EXECUTE {statement_alias}(%s, %s)",
739
- (application_name, notification_id),
740
- )
741
- return bool(curs.fetchone()[0])
742
-
743
- def _prepare_insert_events(self, conn: PostgresConnection) -> None:
744
- super()._prepare_insert_events(conn)
745
- self._prepare(
746
- conn, self.insert_tracking_statement_name, self.insert_tracking_statement
747
- )
534
+ fetchone = curs.fetchone()
535
+ assert fetchone is not None
536
+ return bool(fetchone["count"])
748
537
 
749
538
  def _insert_events(
750
539
  self,
751
- c: PostgresCursor,
540
+ c: Cursor[DictRow],
752
541
  stored_events: List[StoredEvent],
753
542
  **kwargs: Any,
754
- ) -> Optional[Sequence[int]]:
755
- notification_ids = super()._insert_events(c, stored_events, **kwargs)
756
- tracking: Optional[Tracking] = kwargs.get("tracking", None)
543
+ ) -> None:
544
+ tracking: Tracking | None = kwargs.get("tracking", None)
757
545
  if tracking is not None:
758
- statement_alias = self.statement_name_aliases[
759
- self.insert_tracking_statement_name
760
- ]
761
546
  c.execute(
762
- f"EXECUTE {statement_alias}(%s, %s)",
763
- (
547
+ query=self.insert_tracking_statement,
548
+ params=(
764
549
  tracking.application_name,
765
550
  tracking.notification_id,
766
551
  ),
552
+ prepare=True,
767
553
  )
768
- return notification_ids
554
+ super()._insert_events(c, stored_events, **kwargs)
769
555
 
770
556
 
771
557
  class Factory(InfrastructureFactory):
@@ -773,14 +559,15 @@ class Factory(InfrastructureFactory):
773
559
  POSTGRES_HOST = "POSTGRES_HOST"
774
560
  POSTGRES_PORT = "POSTGRES_PORT"
775
561
  POSTGRES_USER = "POSTGRES_USER"
776
- POSTGRES_PASSWORD = "POSTGRES_PASSWORD"
562
+ POSTGRES_PASSWORD = "POSTGRES_PASSWORD" # noqa: S105
563
+ POSTGRES_GET_PASSWORD_TOPIC = "POSTGRES_GET_PASSWORD_TOPIC" # noqa: S105
777
564
  POSTGRES_CONNECT_TIMEOUT = "POSTGRES_CONNECT_TIMEOUT"
778
565
  POSTGRES_CONN_MAX_AGE = "POSTGRES_CONN_MAX_AGE"
779
566
  POSTGRES_PRE_PING = "POSTGRES_PRE_PING"
780
- POSTGRES_POOL_TIMEOUT = "POSTGRES_POOL_TIMEOUT"
567
+ POSTGRES_MAX_WAITING = "POSTGRES_MAX_WAITING"
781
568
  POSTGRES_LOCK_TIMEOUT = "POSTGRES_LOCK_TIMEOUT"
782
569
  POSTGRES_POOL_SIZE = "POSTGRES_POOL_SIZE"
783
- POSTGRES_POOL_MAX_OVERFLOW = "POSTGRES_POOL_MAX_OVERFLOW"
570
+ POSTGRES_MAX_OVERFLOW = "POSTGRES_MAX_OVERFLOW"
784
571
  POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT = (
785
572
  "POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT"
786
573
  )
@@ -795,54 +582,61 @@ class Factory(InfrastructureFactory):
795
582
  super().__init__(env)
796
583
  dbname = self.env.get(self.POSTGRES_DBNAME)
797
584
  if dbname is None:
798
- raise EnvironmentError(
585
+ msg = (
799
586
  "Postgres database name not found "
800
587
  "in environment with key "
801
588
  f"'{self.POSTGRES_DBNAME}'"
802
589
  )
590
+ raise OSError(msg)
803
591
 
804
592
  host = self.env.get(self.POSTGRES_HOST)
805
593
  if host is None:
806
- raise EnvironmentError(
594
+ msg = (
807
595
  "Postgres host not found "
808
596
  "in environment with key "
809
597
  f"'{self.POSTGRES_HOST}'"
810
598
  )
599
+ raise OSError(msg)
811
600
 
812
601
  port = self.env.get(self.POSTGRES_PORT) or "5432"
813
602
 
814
603
  user = self.env.get(self.POSTGRES_USER)
815
604
  if user is None:
816
- raise EnvironmentError(
605
+ msg = (
817
606
  "Postgres user not found "
818
607
  "in environment with key "
819
608
  f"'{self.POSTGRES_USER}'"
820
609
  )
610
+ raise OSError(msg)
611
+
612
+ get_password_func = None
613
+ get_password_topic = self.env.get(self.POSTGRES_GET_PASSWORD_TOPIC)
614
+ if not get_password_topic:
615
+ password = self.env.get(self.POSTGRES_PASSWORD)
616
+ if password is None:
617
+ msg = (
618
+ "Postgres password not found "
619
+ "in environment with key "
620
+ f"'{self.POSTGRES_PASSWORD}'"
621
+ )
622
+ raise OSError(msg)
623
+ else:
624
+ get_password_func = resolve_topic(get_password_topic)
625
+ password = ""
821
626
 
822
- password = self.env.get(self.POSTGRES_PASSWORD)
823
- if password is None:
824
- raise EnvironmentError(
825
- "Postgres password not found "
826
- "in environment with key "
827
- f"'{self.POSTGRES_PASSWORD}'"
828
- )
829
-
830
- connect_timeout: Optional[int]
627
+ connect_timeout = 30
831
628
  connect_timeout_str = self.env.get(self.POSTGRES_CONNECT_TIMEOUT)
832
- if connect_timeout_str is None:
833
- connect_timeout = 5
834
- elif connect_timeout_str == "":
835
- connect_timeout = 5
836
- else:
629
+ if connect_timeout_str:
837
630
  try:
838
631
  connect_timeout = int(connect_timeout_str)
839
632
  except ValueError:
840
- raise EnvironmentError(
841
- f"Postgres environment value for key "
633
+ msg = (
634
+ "Postgres environment value for key "
842
635
  f"'{self.POSTGRES_CONNECT_TIMEOUT}' is invalid. "
843
- f"If set, an integer or empty string is expected: "
636
+ "If set, an integer or empty string is expected: "
844
637
  f"'{connect_timeout_str}'"
845
638
  )
639
+ raise OSError(msg) from None
846
640
 
847
641
  idle_in_transaction_session_timeout_str = (
848
642
  self.env.get(self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT) or "5"
@@ -853,80 +647,69 @@ class Factory(InfrastructureFactory):
853
647
  idle_in_transaction_session_timeout_str
854
648
  )
855
649
  except ValueError:
856
- raise EnvironmentError(
857
- f"Postgres environment value for key "
650
+ msg = (
651
+ "Postgres environment value for key "
858
652
  f"'{self.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT}' is invalid. "
859
- f"If set, an integer or empty string is expected: "
653
+ "If set, an integer or empty string is expected: "
860
654
  f"'{idle_in_transaction_session_timeout_str}'"
861
655
  )
656
+ raise OSError(msg) from None
862
657
 
863
- pool_size: Optional[int]
658
+ pool_size = 5
864
659
  pool_size_str = self.env.get(self.POSTGRES_POOL_SIZE)
865
- if pool_size_str is None:
866
- pool_size = 5
867
- elif pool_size_str == "":
868
- pool_size = 5
869
- else:
660
+ if pool_size_str:
870
661
  try:
871
662
  pool_size = int(pool_size_str)
872
663
  except ValueError:
873
- raise EnvironmentError(
874
- f"Postgres environment value for key "
664
+ msg = (
665
+ "Postgres environment value for key "
875
666
  f"'{self.POSTGRES_POOL_SIZE}' is invalid. "
876
- f"If set, an integer or empty string is expected: "
667
+ "If set, an integer or empty string is expected: "
877
668
  f"'{pool_size_str}'"
878
669
  )
670
+ raise OSError(msg) from None
879
671
 
880
- pool_max_overflow: Optional[int]
881
- pool_max_overflow_str = self.env.get(self.POSTGRES_POOL_MAX_OVERFLOW)
882
- if pool_max_overflow_str is None:
883
- pool_max_overflow = 10
884
- elif pool_max_overflow_str == "":
885
- pool_max_overflow = 10
886
- else:
672
+ pool_max_overflow = 10
673
+ pool_max_overflow_str = self.env.get(self.POSTGRES_MAX_OVERFLOW)
674
+ if pool_max_overflow_str:
887
675
  try:
888
676
  pool_max_overflow = int(pool_max_overflow_str)
889
677
  except ValueError:
890
- raise EnvironmentError(
891
- f"Postgres environment value for key "
892
- f"'{self.POSTGRES_POOL_MAX_OVERFLOW}' is invalid. "
893
- f"If set, an integer or empty string is expected: "
678
+ msg = (
679
+ "Postgres environment value for key "
680
+ f"'{self.POSTGRES_MAX_OVERFLOW}' is invalid. "
681
+ "If set, an integer or empty string is expected: "
894
682
  f"'{pool_max_overflow_str}'"
895
683
  )
684
+ raise OSError(msg) from None
896
685
 
897
- pool_timeout: Optional[float]
898
- pool_timeout_str = self.env.get(self.POSTGRES_POOL_TIMEOUT)
899
- if pool_timeout_str is None:
900
- pool_timeout = 30
901
- elif pool_timeout_str == "":
902
- pool_timeout = 30
903
- else:
686
+ max_waiting = 0
687
+ max_waiting_str = self.env.get(self.POSTGRES_MAX_WAITING)
688
+ if max_waiting_str:
904
689
  try:
905
- pool_timeout = float(pool_timeout_str)
690
+ max_waiting = int(max_waiting_str)
906
691
  except ValueError:
907
- raise EnvironmentError(
908
- f"Postgres environment value for key "
909
- f"'{self.POSTGRES_POOL_TIMEOUT}' is invalid. "
910
- f"If set, a float or empty string is expected: "
911
- f"'{pool_timeout_str}'"
692
+ msg = (
693
+ "Postgres environment value for key "
694
+ f"'{self.POSTGRES_MAX_WAITING}' is invalid. "
695
+ "If set, an integer or empty string is expected: "
696
+ f"'{max_waiting_str}'"
912
697
  )
698
+ raise OSError(msg) from None
913
699
 
914
- conn_max_age: Optional[float]
700
+ conn_max_age = 60 * 60.0
915
701
  conn_max_age_str = self.env.get(self.POSTGRES_CONN_MAX_AGE)
916
- if conn_max_age_str is None:
917
- conn_max_age = None
918
- elif conn_max_age_str == "":
919
- conn_max_age = None
920
- else:
702
+ if conn_max_age_str:
921
703
  try:
922
704
  conn_max_age = float(conn_max_age_str)
923
705
  except ValueError:
924
- raise EnvironmentError(
925
- f"Postgres environment value for key "
706
+ msg = (
707
+ "Postgres environment value for key "
926
708
  f"'{self.POSTGRES_CONN_MAX_AGE}' is invalid. "
927
- f"If set, a float or empty string is expected: "
709
+ "If set, a float or empty string is expected: "
928
710
  f"'{conn_max_age_str}'"
929
711
  )
712
+ raise OSError(msg) from None
930
713
 
931
714
  pre_ping = strtobool(self.env.get(self.POSTGRES_PRE_PING) or "no")
932
715
 
@@ -935,12 +718,13 @@ class Factory(InfrastructureFactory):
935
718
  try:
936
719
  lock_timeout = int(lock_timeout_str)
937
720
  except ValueError:
938
- raise EnvironmentError(
939
- f"Postgres environment value for key "
721
+ msg = (
722
+ "Postgres environment value for key "
940
723
  f"'{self.POSTGRES_LOCK_TIMEOUT}' is invalid. "
941
- f"If set, an integer or empty string is expected: "
724
+ "If set, an integer or empty string is expected: "
942
725
  f"'{lock_timeout_str}'"
943
726
  )
727
+ raise OSError(msg) from None
944
728
 
945
729
  schema = self.env.get(self.POSTGRES_SCHEMA) or ""
946
730
 
@@ -950,17 +734,21 @@ class Factory(InfrastructureFactory):
950
734
  port=port,
951
735
  user=user,
952
736
  password=password,
737
+ get_password_func=get_password_func,
953
738
  connect_timeout=connect_timeout,
954
739
  idle_in_transaction_session_timeout=idle_in_transaction_session_timeout,
955
740
  pool_size=pool_size,
956
741
  max_overflow=pool_max_overflow,
957
- pool_timeout=pool_timeout,
742
+ max_waiting=max_waiting,
958
743
  conn_max_age=conn_max_age,
959
744
  pre_ping=pre_ping,
960
745
  lock_timeout=lock_timeout,
961
746
  schema=schema,
962
747
  )
963
748
 
749
+ def env_create_table(self) -> bool:
750
+ return strtobool(self.env.get(self.CREATE_TABLE) or "yes")
751
+
964
752
  def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
965
753
  prefix = self.env.name.lower() or "stored"
966
754
  events_table_name = prefix + "_" + purpose
@@ -1004,8 +792,9 @@ class Factory(InfrastructureFactory):
1004
792
  recorder.create_table()
1005
793
  return recorder
1006
794
 
1007
- def env_create_table(self) -> bool:
1008
- return strtobool(self.env.get(self.CREATE_TABLE) or "yes")
1009
-
1010
795
  def close(self) -> None:
1011
- self.datastore.close()
796
+ if hasattr(self, "datastore"):
797
+ self.datastore.close()
798
+
799
+ def __del__(self) -> None:
800
+ self.close()