eventsourcing 9.2.21__py3-none-any.whl → 9.3.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Potentially problematic release.
This version of eventsourcing might be problematic. Click here for more details.
- eventsourcing/__init__.py +1 -1
- eventsourcing/application.py +137 -132
- eventsourcing/cipher.py +17 -12
- eventsourcing/compressor.py +2 -0
- eventsourcing/dispatch.py +30 -56
- eventsourcing/domain.py +221 -227
- eventsourcing/examples/__init__.py +0 -0
- eventsourcing/examples/aggregate1/__init__.py +0 -0
- eventsourcing/examples/aggregate1/application.py +27 -0
- eventsourcing/examples/aggregate1/domainmodel.py +16 -0
- eventsourcing/examples/aggregate1/test_application.py +37 -0
- eventsourcing/examples/aggregate2/__init__.py +0 -0
- eventsourcing/examples/aggregate2/application.py +27 -0
- eventsourcing/examples/aggregate2/domainmodel.py +22 -0
- eventsourcing/examples/aggregate2/test_application.py +37 -0
- eventsourcing/examples/aggregate3/__init__.py +0 -0
- eventsourcing/examples/aggregate3/application.py +27 -0
- eventsourcing/examples/aggregate3/domainmodel.py +38 -0
- eventsourcing/examples/aggregate3/test_application.py +37 -0
- eventsourcing/examples/aggregate4/__init__.py +0 -0
- eventsourcing/examples/aggregate4/application.py +27 -0
- eventsourcing/examples/aggregate4/domainmodel.py +114 -0
- eventsourcing/examples/aggregate4/test_application.py +38 -0
- eventsourcing/examples/aggregate5/__init__.py +0 -0
- eventsourcing/examples/aggregate5/application.py +27 -0
- eventsourcing/examples/aggregate5/domainmodel.py +131 -0
- eventsourcing/examples/aggregate5/test_application.py +38 -0
- eventsourcing/examples/aggregate6/__init__.py +0 -0
- eventsourcing/examples/aggregate6/application.py +30 -0
- eventsourcing/examples/aggregate6/domainmodel.py +123 -0
- eventsourcing/examples/aggregate6/test_application.py +38 -0
- eventsourcing/examples/aggregate6a/__init__.py +0 -0
- eventsourcing/examples/aggregate6a/application.py +40 -0
- eventsourcing/examples/aggregate6a/domainmodel.py +149 -0
- eventsourcing/examples/aggregate6a/test_application.py +45 -0
- eventsourcing/examples/aggregate7/__init__.py +0 -0
- eventsourcing/examples/aggregate7/application.py +48 -0
- eventsourcing/examples/aggregate7/domainmodel.py +144 -0
- eventsourcing/examples/aggregate7/persistence.py +57 -0
- eventsourcing/examples/aggregate7/test_application.py +38 -0
- eventsourcing/examples/aggregate7/test_compression_and_encryption.py +45 -0
- eventsourcing/examples/aggregate7/test_snapshotting_intervals.py +67 -0
- eventsourcing/examples/aggregate7a/__init__.py +0 -0
- eventsourcing/examples/aggregate7a/application.py +56 -0
- eventsourcing/examples/aggregate7a/domainmodel.py +170 -0
- eventsourcing/examples/aggregate7a/test_application.py +46 -0
- eventsourcing/examples/aggregate7a/test_compression_and_encryption.py +45 -0
- eventsourcing/examples/aggregate8/__init__.py +0 -0
- eventsourcing/examples/aggregate8/application.py +47 -0
- eventsourcing/examples/aggregate8/domainmodel.py +65 -0
- eventsourcing/examples/aggregate8/persistence.py +57 -0
- eventsourcing/examples/aggregate8/test_application.py +37 -0
- eventsourcing/examples/aggregate8/test_compression_and_encryption.py +44 -0
- eventsourcing/examples/aggregate8/test_snapshotting_intervals.py +38 -0
- eventsourcing/examples/bankaccounts/__init__.py +0 -0
- eventsourcing/examples/bankaccounts/application.py +70 -0
- eventsourcing/examples/bankaccounts/domainmodel.py +56 -0
- eventsourcing/examples/bankaccounts/test.py +173 -0
- eventsourcing/examples/cargoshipping/__init__.py +0 -0
- eventsourcing/examples/cargoshipping/application.py +126 -0
- eventsourcing/examples/cargoshipping/domainmodel.py +330 -0
- eventsourcing/examples/cargoshipping/interface.py +143 -0
- eventsourcing/examples/cargoshipping/test.py +231 -0
- eventsourcing/examples/contentmanagement/__init__.py +0 -0
- eventsourcing/examples/contentmanagement/application.py +118 -0
- eventsourcing/examples/contentmanagement/domainmodel.py +69 -0
- eventsourcing/examples/contentmanagement/test.py +180 -0
- eventsourcing/examples/contentmanagement/utils.py +26 -0
- eventsourcing/examples/contentmanagementsystem/__init__.py +0 -0
- eventsourcing/examples/contentmanagementsystem/application.py +54 -0
- eventsourcing/examples/contentmanagementsystem/postgres.py +17 -0
- eventsourcing/examples/contentmanagementsystem/sqlite.py +17 -0
- eventsourcing/examples/contentmanagementsystem/system.py +14 -0
- eventsourcing/examples/contentmanagementsystem/test_system.py +180 -0
- eventsourcing/examples/searchablecontent/__init__.py +0 -0
- eventsourcing/examples/searchablecontent/application.py +45 -0
- eventsourcing/examples/searchablecontent/persistence.py +23 -0
- eventsourcing/examples/searchablecontent/postgres.py +118 -0
- eventsourcing/examples/searchablecontent/sqlite.py +136 -0
- eventsourcing/examples/searchablecontent/test_application.py +110 -0
- eventsourcing/examples/searchablecontent/test_recorder.py +68 -0
- eventsourcing/examples/searchabletimestamps/__init__.py +0 -0
- eventsourcing/examples/searchabletimestamps/application.py +32 -0
- eventsourcing/examples/searchabletimestamps/persistence.py +20 -0
- eventsourcing/examples/searchabletimestamps/postgres.py +110 -0
- eventsourcing/examples/searchabletimestamps/sqlite.py +99 -0
- eventsourcing/examples/searchabletimestamps/test_searchabletimestamps.py +94 -0
- eventsourcing/examples/test_invoice.py +176 -0
- eventsourcing/examples/test_parking_lot.py +206 -0
- eventsourcing/interface.py +4 -2
- eventsourcing/persistence.py +88 -82
- eventsourcing/popo.py +32 -31
- eventsourcing/postgres.py +388 -593
- eventsourcing/sqlite.py +100 -102
- eventsourcing/system.py +66 -71
- eventsourcing/tests/application.py +20 -32
- eventsourcing/tests/application_tests/__init__.py +0 -0
- eventsourcing/tests/application_tests/test_application_with_automatic_snapshotting.py +55 -0
- eventsourcing/tests/application_tests/test_application_with_popo.py +22 -0
- eventsourcing/tests/application_tests/test_application_with_postgres.py +75 -0
- eventsourcing/tests/application_tests/test_application_with_sqlite.py +72 -0
- eventsourcing/tests/application_tests/test_cache.py +134 -0
- eventsourcing/tests/application_tests/test_event_sourced_log.py +162 -0
- eventsourcing/tests/application_tests/test_notificationlog.py +232 -0
- eventsourcing/tests/application_tests/test_notificationlogreader.py +126 -0
- eventsourcing/tests/application_tests/test_processapplication.py +110 -0
- eventsourcing/tests/application_tests/test_processingpolicy.py +109 -0
- eventsourcing/tests/application_tests/test_repository.py +504 -0
- eventsourcing/tests/application_tests/test_snapshotting.py +68 -0
- eventsourcing/tests/application_tests/test_upcasting.py +459 -0
- eventsourcing/tests/docs_tests/__init__.py +0 -0
- eventsourcing/tests/docs_tests/test_docs.py +293 -0
- eventsourcing/tests/domain.py +1 -1
- eventsourcing/tests/domain_tests/__init__.py +0 -0
- eventsourcing/tests/domain_tests/test_aggregate.py +1180 -0
- eventsourcing/tests/domain_tests/test_aggregate_decorators.py +1604 -0
- eventsourcing/tests/domain_tests/test_domainevent.py +80 -0
- eventsourcing/tests/interface_tests/__init__.py +0 -0
- eventsourcing/tests/interface_tests/test_remotenotificationlog.py +258 -0
- eventsourcing/tests/persistence.py +52 -50
- eventsourcing/tests/persistence_tests/__init__.py +0 -0
- eventsourcing/tests/persistence_tests/test_aes.py +93 -0
- eventsourcing/tests/persistence_tests/test_connection_pool.py +722 -0
- eventsourcing/tests/persistence_tests/test_eventstore.py +72 -0
- eventsourcing/tests/persistence_tests/test_infrastructure_factory.py +21 -0
- eventsourcing/tests/persistence_tests/test_mapper.py +113 -0
- eventsourcing/tests/persistence_tests/test_noninterleaving_notification_ids.py +69 -0
- eventsourcing/tests/persistence_tests/test_popo.py +124 -0
- eventsourcing/tests/persistence_tests/test_postgres.py +1119 -0
- eventsourcing/tests/persistence_tests/test_sqlite.py +348 -0
- eventsourcing/tests/persistence_tests/test_transcoder.py +44 -0
- eventsourcing/tests/postgres_utils.py +7 -7
- eventsourcing/tests/system_tests/__init__.py +0 -0
- eventsourcing/tests/system_tests/test_runner.py +935 -0
- eventsourcing/tests/system_tests/test_system.py +284 -0
- eventsourcing/tests/utils_tests/__init__.py +0 -0
- eventsourcing/tests/utils_tests/test_utils.py +226 -0
- eventsourcing/utils.py +49 -50
- {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/METADATA +30 -33
- eventsourcing-9.3.0.dist-info/RECORD +145 -0
- {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/WHEEL +1 -2
- eventsourcing-9.2.21.dist-info/RECORD +0 -25
- eventsourcing-9.2.21.dist-info/top_level.txt +0 -1
- {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/AUTHORS +0 -0
- {eventsourcing-9.2.21.dist-info → eventsourcing-9.3.0.dist-info}/LICENSE +0 -0
|
@@ -0,0 +1,1119 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import sys
|
|
4
|
+
from threading import Event, Thread
|
|
5
|
+
from time import sleep
|
|
6
|
+
from typing import List
|
|
7
|
+
from unittest import TestCase, skipIf
|
|
8
|
+
from uuid import uuid4
|
|
9
|
+
|
|
10
|
+
import psycopg
|
|
11
|
+
from psycopg import Connection
|
|
12
|
+
from psycopg_pool import ConnectionPool
|
|
13
|
+
|
|
14
|
+
from eventsourcing.persistence import (
|
|
15
|
+
DatabaseError,
|
|
16
|
+
DataError,
|
|
17
|
+
InfrastructureFactory,
|
|
18
|
+
IntegrityError,
|
|
19
|
+
InterfaceError,
|
|
20
|
+
InternalError,
|
|
21
|
+
NotSupportedError,
|
|
22
|
+
OperationalError,
|
|
23
|
+
PersistenceError,
|
|
24
|
+
ProgrammingError,
|
|
25
|
+
StoredEvent,
|
|
26
|
+
Tracking,
|
|
27
|
+
)
|
|
28
|
+
from eventsourcing.postgres import (
|
|
29
|
+
Factory,
|
|
30
|
+
PostgresAggregateRecorder,
|
|
31
|
+
PostgresApplicationRecorder,
|
|
32
|
+
PostgresDatastore,
|
|
33
|
+
PostgresProcessRecorder,
|
|
34
|
+
)
|
|
35
|
+
from eventsourcing.tests.persistence import (
|
|
36
|
+
AggregateRecorderTestCase,
|
|
37
|
+
ApplicationRecorderTestCase,
|
|
38
|
+
InfrastructureFactoryTestCase,
|
|
39
|
+
ProcessRecorderTestCase,
|
|
40
|
+
)
|
|
41
|
+
from eventsourcing.tests.persistence_tests.test_connection_pool import (
|
|
42
|
+
TestConnectionPool,
|
|
43
|
+
)
|
|
44
|
+
from eventsourcing.tests.postgres_utils import (
|
|
45
|
+
drop_postgres_table,
|
|
46
|
+
pg_close_all_connections,
|
|
47
|
+
)
|
|
48
|
+
from eventsourcing.utils import Environment, get_topic
|
|
49
|
+
|
|
50
|
+
|
|
51
|
+
class TestPostgresDatastore(TestCase):
|
|
52
|
+
def test_is_pipeline_supported(self):
|
|
53
|
+
self.assertTrue(psycopg.Pipeline.is_supported())
|
|
54
|
+
|
|
55
|
+
def test_has_connection_pool(self):
|
|
56
|
+
with PostgresDatastore(
|
|
57
|
+
dbname="eventsourcing",
|
|
58
|
+
host="127.0.0.1",
|
|
59
|
+
port="5432",
|
|
60
|
+
user="eventsourcing",
|
|
61
|
+
password="eventsourcing", # noqa: S106
|
|
62
|
+
) as datastore:
|
|
63
|
+
self.assertIsInstance(datastore.pool, ConnectionPool)
|
|
64
|
+
|
|
65
|
+
def test_get_connection(self):
|
|
66
|
+
with PostgresDatastore(
|
|
67
|
+
dbname="eventsourcing",
|
|
68
|
+
host="127.0.0.1",
|
|
69
|
+
port="5432",
|
|
70
|
+
user="eventsourcing",
|
|
71
|
+
password="eventsourcing", # noqa: S106
|
|
72
|
+
) as datastore:
|
|
73
|
+
conn: Connection
|
|
74
|
+
with datastore.get_connection() as conn:
|
|
75
|
+
self.assertIsInstance(conn, Connection)
|
|
76
|
+
|
|
77
|
+
def test_context_manager_converts_exceptions_and_conditionally_calls_close(self):
|
|
78
|
+
cases = [
|
|
79
|
+
(InterfaceError, psycopg.InterfaceError(), True),
|
|
80
|
+
(DataError, psycopg.DataError(), False),
|
|
81
|
+
(OperationalError, psycopg.OperationalError(), True),
|
|
82
|
+
(IntegrityError, psycopg.IntegrityError(), False),
|
|
83
|
+
(InternalError, psycopg.InternalError(), False),
|
|
84
|
+
(ProgrammingError, psycopg.ProgrammingError(), False),
|
|
85
|
+
(NotSupportedError, psycopg.NotSupportedError(), False),
|
|
86
|
+
(DatabaseError, psycopg.DatabaseError(), False),
|
|
87
|
+
(PersistenceError, psycopg.Error(), True),
|
|
88
|
+
(TypeError, TypeError(), True),
|
|
89
|
+
(TypeError, TypeError, True),
|
|
90
|
+
]
|
|
91
|
+
with PostgresDatastore(
|
|
92
|
+
dbname="eventsourcing",
|
|
93
|
+
host="127.0.0.1",
|
|
94
|
+
port="5432",
|
|
95
|
+
user="eventsourcing",
|
|
96
|
+
password="eventsourcing", # noqa: S106
|
|
97
|
+
) as datastore:
|
|
98
|
+
for expected_exc_type, raised_exc, expect_conn_closed in cases:
|
|
99
|
+
with self.assertRaises(expected_exc_type):
|
|
100
|
+
conn: Connection
|
|
101
|
+
with datastore.get_connection() as conn:
|
|
102
|
+
self.assertFalse(conn.closed)
|
|
103
|
+
raise raised_exc
|
|
104
|
+
self.assertTrue(conn.closed is expect_conn_closed, raised_exc)
|
|
105
|
+
|
|
106
|
+
def test_transaction_from_datastore(self):
|
|
107
|
+
with PostgresDatastore(
|
|
108
|
+
dbname="eventsourcing",
|
|
109
|
+
host="127.0.0.1",
|
|
110
|
+
port="5432",
|
|
111
|
+
user="eventsourcing",
|
|
112
|
+
password="eventsourcing", # noqa: S106
|
|
113
|
+
) as datastore, datastore.transaction(commit=False) as curs:
|
|
114
|
+
# As a convenience, we can use the transaction() method.
|
|
115
|
+
curs.execute("SELECT 1")
|
|
116
|
+
self.assertEqual(curs.fetchall(), [{"?column?": 1}])
|
|
117
|
+
|
|
118
|
+
def test_connect_failure_raises_operational_error(self):
|
|
119
|
+
datastore = PostgresDatastore(
|
|
120
|
+
dbname="eventsourcing",
|
|
121
|
+
host="127.0.0.1",
|
|
122
|
+
port="4321", # wrong port
|
|
123
|
+
user="eventsourcing",
|
|
124
|
+
password="eventsourcing", # noqa: S106
|
|
125
|
+
pool_open_timeout=2,
|
|
126
|
+
)
|
|
127
|
+
with self.assertRaises(OperationalError), datastore.get_connection():
|
|
128
|
+
pass
|
|
129
|
+
|
|
130
|
+
with PostgresDatastore(
|
|
131
|
+
dbname="eventsourcing",
|
|
132
|
+
host="127.0.0.1",
|
|
133
|
+
port="987654321", # bad value
|
|
134
|
+
user="eventsourcing",
|
|
135
|
+
password="eventsourcing", # noqa: S106
|
|
136
|
+
pool_open_timeout=2,
|
|
137
|
+
) as datastore, self.assertRaises(OperationalError), datastore.get_connection():
|
|
138
|
+
pass
|
|
139
|
+
|
|
140
|
+
@skipIf(
|
|
141
|
+
sys.version_info[:2] < (3, 8),
|
|
142
|
+
"The 'check' argument and the check_connection() method aren't supported.",
|
|
143
|
+
)
|
|
144
|
+
def test_pre_ping(self):
|
|
145
|
+
# Define method to open and close a connection, and then execute a statement.
|
|
146
|
+
def open_close_execute(*, pre_ping: bool):
|
|
147
|
+
with PostgresDatastore(
|
|
148
|
+
dbname="eventsourcing",
|
|
149
|
+
host="127.0.0.1",
|
|
150
|
+
port="5432",
|
|
151
|
+
user="eventsourcing",
|
|
152
|
+
password="eventsourcing", # noqa: S106
|
|
153
|
+
pool_size=1,
|
|
154
|
+
pre_ping=pre_ping,
|
|
155
|
+
) as datastore:
|
|
156
|
+
|
|
157
|
+
# Create a connection.
|
|
158
|
+
conn: Connection
|
|
159
|
+
with datastore.get_connection() as conn, conn.cursor() as curs:
|
|
160
|
+
curs.execute("SELECT 1")
|
|
161
|
+
self.assertEqual(curs.fetchall(), [{"?column?": 1}])
|
|
162
|
+
|
|
163
|
+
# Close all connections via separate connection.
|
|
164
|
+
pg_close_all_connections()
|
|
165
|
+
|
|
166
|
+
# Check the connection doesn't think it's closed.
|
|
167
|
+
self.assertTrue(datastore.pool._pool)
|
|
168
|
+
self.assertFalse(datastore.pool._pool[0].closed)
|
|
169
|
+
|
|
170
|
+
# Get a closed connection.
|
|
171
|
+
conn: Connection
|
|
172
|
+
with datastore.get_connection() as conn:
|
|
173
|
+
self.assertFalse(conn.closed)
|
|
174
|
+
|
|
175
|
+
with conn.cursor() as curs:
|
|
176
|
+
curs.execute("SELECT 1")
|
|
177
|
+
|
|
178
|
+
# Check using the closed connection gives an error.
|
|
179
|
+
with self.assertRaises(OperationalError):
|
|
180
|
+
open_close_execute(pre_ping=False)
|
|
181
|
+
|
|
182
|
+
# Now try that again with pre-ping enabled.
|
|
183
|
+
open_close_execute(pre_ping=True)
|
|
184
|
+
|
|
185
|
+
def test_idle_in_transaction_session_timeout(self):
|
|
186
|
+
with PostgresDatastore(
|
|
187
|
+
dbname="eventsourcing",
|
|
188
|
+
host="127.0.0.1",
|
|
189
|
+
port="5432",
|
|
190
|
+
user="eventsourcing",
|
|
191
|
+
password="eventsourcing", # noqa: S106
|
|
192
|
+
idle_in_transaction_session_timeout=1,
|
|
193
|
+
) as datastore:
|
|
194
|
+
|
|
195
|
+
# Error on commit is raised.
|
|
196
|
+
with self.assertRaises(
|
|
197
|
+
OperationalError
|
|
198
|
+
), datastore.get_connection() as curs:
|
|
199
|
+
curs.execute("BEGIN")
|
|
200
|
+
curs.execute("SELECT 1")
|
|
201
|
+
self.assertFalse(curs.closed)
|
|
202
|
+
sleep(2)
|
|
203
|
+
|
|
204
|
+
# Error on commit is raised.
|
|
205
|
+
with self.assertRaises(OperationalError), datastore.transaction(
|
|
206
|
+
commit=True
|
|
207
|
+
) as curs:
|
|
208
|
+
# curs.execute("BEGIN")
|
|
209
|
+
curs.execute("SELECT 1")
|
|
210
|
+
self.assertFalse(curs.closed)
|
|
211
|
+
sleep(2)
|
|
212
|
+
|
|
213
|
+
# Force rollback. Error is ignored.
|
|
214
|
+
with datastore.transaction(commit=False) as curs:
|
|
215
|
+
# curs.execute("BEGIN")
|
|
216
|
+
curs.execute("SELECT 1")
|
|
217
|
+
self.assertFalse(curs.closed)
|
|
218
|
+
sleep(2)
|
|
219
|
+
|
|
220
|
+
# Autocommit mode - transaction is commited in time.
|
|
221
|
+
with datastore.get_connection() as curs:
|
|
222
|
+
curs.execute("SELECT 1")
|
|
223
|
+
self.assertFalse(curs.closed)
|
|
224
|
+
sleep(2)
|
|
225
|
+
|
|
226
|
+
def test_get_password_func(self):
|
|
227
|
+
# Check correct password is required, wrong password causes operational error.
|
|
228
|
+
with PostgresDatastore(
|
|
229
|
+
dbname="eventsourcing",
|
|
230
|
+
host="127.0.0.1",
|
|
231
|
+
port="5432",
|
|
232
|
+
user="eventsourcing",
|
|
233
|
+
password="wrong", # noqa: S106
|
|
234
|
+
pool_size=1,
|
|
235
|
+
) as datastore:
|
|
236
|
+
|
|
237
|
+
conn: Connection
|
|
238
|
+
with self.assertRaises(
|
|
239
|
+
OperationalError
|
|
240
|
+
), datastore.get_connection() as conn, conn.cursor() as curs:
|
|
241
|
+
curs.execute("SELECT 1")
|
|
242
|
+
|
|
243
|
+
# Define a "get password" function, with a generator that returns
|
|
244
|
+
# wrong password a few times first.
|
|
245
|
+
def password_token_generator():
|
|
246
|
+
yield "wrong"
|
|
247
|
+
yield "wrong"
|
|
248
|
+
yield "eventsourcing"
|
|
249
|
+
|
|
250
|
+
password_generator = password_token_generator()
|
|
251
|
+
|
|
252
|
+
def get_password_func():
|
|
253
|
+
return next(password_generator)
|
|
254
|
+
|
|
255
|
+
# Construct datastore with "get password" function.
|
|
256
|
+
with PostgresDatastore(
|
|
257
|
+
dbname="eventsourcing",
|
|
258
|
+
host="127.0.0.1",
|
|
259
|
+
port="5432",
|
|
260
|
+
user="eventsourcing",
|
|
261
|
+
password="",
|
|
262
|
+
pool_size=1,
|
|
263
|
+
get_password_func=get_password_func,
|
|
264
|
+
connect_timeout=10,
|
|
265
|
+
) as datastore, datastore.get_connection() as conn, conn.cursor() as curs:
|
|
266
|
+
# Create a connection, and check it works (this test depends on psycopg
|
|
267
|
+
# retrying attempt to connect, should call "get password" twice).
|
|
268
|
+
curs.execute("SELECT 1")
|
|
269
|
+
self.assertEqual(curs.fetchall(), [{"?column?": 1}])
|
|
270
|
+
|
|
271
|
+
|
|
272
|
+
# Use maximally long identifier for table name.
|
|
273
|
+
EVENTS_TABLE_NAME = "s" * 50 + "stored_events"
|
|
274
|
+
|
|
275
|
+
MAX_IDENTIFIER_LEN = 63
|
|
276
|
+
|
|
277
|
+
|
|
278
|
+
def _check_identifier_is_max_len(identifier):
|
|
279
|
+
if len(identifier) != MAX_IDENTIFIER_LEN:
|
|
280
|
+
msg = "Expected length of name string to be max identifier length"
|
|
281
|
+
raise ValueError(msg)
|
|
282
|
+
|
|
283
|
+
|
|
284
|
+
_check_identifier_is_max_len(EVENTS_TABLE_NAME)
|
|
285
|
+
|
|
286
|
+
|
|
287
|
+
class SetupPostgresDatastore(TestCase):
|
|
288
|
+
schema = ""
|
|
289
|
+
|
|
290
|
+
def setUp(self) -> None:
|
|
291
|
+
super().setUp()
|
|
292
|
+
self.datastore = PostgresDatastore(
|
|
293
|
+
"eventsourcing",
|
|
294
|
+
"127.0.0.1",
|
|
295
|
+
"5432",
|
|
296
|
+
"eventsourcing",
|
|
297
|
+
"eventsourcing",
|
|
298
|
+
schema=self.schema,
|
|
299
|
+
)
|
|
300
|
+
self.drop_tables()
|
|
301
|
+
|
|
302
|
+
def tearDown(self) -> None:
|
|
303
|
+
super().tearDown()
|
|
304
|
+
self.drop_tables()
|
|
305
|
+
self.datastore.close()
|
|
306
|
+
|
|
307
|
+
def drop_tables(self):
|
|
308
|
+
events_table_name = EVENTS_TABLE_NAME
|
|
309
|
+
if self.datastore.schema:
|
|
310
|
+
events_table_name = f"{self.datastore.schema}.{events_table_name}"
|
|
311
|
+
drop_postgres_table(self.datastore, events_table_name)
|
|
312
|
+
|
|
313
|
+
|
|
314
|
+
class WithSchema(SetupPostgresDatastore):
|
|
315
|
+
schema = "myschema"
|
|
316
|
+
|
|
317
|
+
def test_datastore_has_schema(self):
|
|
318
|
+
self.assertEqual(self.datastore.schema, self.schema)
|
|
319
|
+
|
|
320
|
+
|
|
321
|
+
class TestPostgresAggregateRecorder(SetupPostgresDatastore, AggregateRecorderTestCase):
|
|
322
|
+
def create_recorder(
|
|
323
|
+
self, table_name=EVENTS_TABLE_NAME
|
|
324
|
+
) -> PostgresAggregateRecorder:
|
|
325
|
+
if self.datastore.schema:
|
|
326
|
+
table_name = f"{self.datastore.schema}.{table_name}"
|
|
327
|
+
recorder = PostgresAggregateRecorder(
|
|
328
|
+
datastore=self.datastore, events_table_name=table_name
|
|
329
|
+
)
|
|
330
|
+
recorder.create_table()
|
|
331
|
+
return recorder
|
|
332
|
+
|
|
333
|
+
def drop_tables(self):
|
|
334
|
+
super().drop_tables()
|
|
335
|
+
drop_postgres_table(self.datastore, "stored_events")
|
|
336
|
+
|
|
337
|
+
def test_create_table(self):
|
|
338
|
+
recorder = PostgresAggregateRecorder(
|
|
339
|
+
datastore=self.datastore, events_table_name="stored_events"
|
|
340
|
+
)
|
|
341
|
+
recorder.create_table()
|
|
342
|
+
|
|
343
|
+
def test_insert_and_select(self):
|
|
344
|
+
super().test_insert_and_select()
|
|
345
|
+
|
|
346
|
+
def test_performance(self):
|
|
347
|
+
super().test_performance()
|
|
348
|
+
|
|
349
|
+
def test_retry_insert_events_after_closing_connection(self):
|
|
350
|
+
# This checks connection is recreated after connections are closed.
|
|
351
|
+
self.datastore.pool.pool_size = 1
|
|
352
|
+
|
|
353
|
+
# Construct the recorder.
|
|
354
|
+
recorder = self.create_recorder()
|
|
355
|
+
|
|
356
|
+
# Check we have open connections.
|
|
357
|
+
self.assertTrue(self.datastore.pool._pool)
|
|
358
|
+
|
|
359
|
+
# Close connections.
|
|
360
|
+
pg_close_all_connections()
|
|
361
|
+
self.assertFalse(self.datastore.pool._pool[0].closed)
|
|
362
|
+
|
|
363
|
+
# Write a stored event.
|
|
364
|
+
stored_event1 = StoredEvent(
|
|
365
|
+
originator_id=uuid4(),
|
|
366
|
+
originator_version=0,
|
|
367
|
+
topic="topic1",
|
|
368
|
+
state=b"state1",
|
|
369
|
+
)
|
|
370
|
+
recorder.insert_events([stored_event1])
|
|
371
|
+
|
|
372
|
+
|
|
373
|
+
class TestPostgresAggregateRecorderWithSchema(
|
|
374
|
+
WithSchema, TestPostgresAggregateRecorder
|
|
375
|
+
):
|
|
376
|
+
pass
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class TestPostgresAggregateRecorderErrors(SetupPostgresDatastore, TestCase):
|
|
380
|
+
def create_recorder(self, table_name=EVENTS_TABLE_NAME):
|
|
381
|
+
return PostgresAggregateRecorder(
|
|
382
|
+
datastore=self.datastore, events_table_name=table_name
|
|
383
|
+
)
|
|
384
|
+
|
|
385
|
+
def test_excessively_long_table_name_raises_error(self):
|
|
386
|
+
# Add one more character to the table name.
|
|
387
|
+
long_table_name = "s" + EVENTS_TABLE_NAME
|
|
388
|
+
self.assertEqual(len(long_table_name), 64)
|
|
389
|
+
with self.assertRaises(ProgrammingError):
|
|
390
|
+
self.create_recorder(long_table_name)
|
|
391
|
+
|
|
392
|
+
def test_create_table_raises_programming_error_when_sql_is_broken(self):
|
|
393
|
+
recorder = self.create_recorder()
|
|
394
|
+
|
|
395
|
+
# Mess up the statement.
|
|
396
|
+
recorder.create_table_statements = ["BLAH"]
|
|
397
|
+
with self.assertRaises(ProgrammingError):
|
|
398
|
+
recorder.create_table()
|
|
399
|
+
|
|
400
|
+
def test_insert_events_raises_programming_error_when_table_not_created(self):
|
|
401
|
+
# Construct the recorder.
|
|
402
|
+
recorder = self.create_recorder()
|
|
403
|
+
|
|
404
|
+
# Write a stored event without creating the table.
|
|
405
|
+
stored_event1 = StoredEvent(
|
|
406
|
+
originator_id=uuid4(),
|
|
407
|
+
originator_version=0,
|
|
408
|
+
topic="topic1",
|
|
409
|
+
state=b"state1",
|
|
410
|
+
)
|
|
411
|
+
with self.assertRaises(ProgrammingError):
|
|
412
|
+
recorder.insert_events([stored_event1])
|
|
413
|
+
|
|
414
|
+
def test_insert_events_raises_programming_error_when_sql_is_broken(self):
|
|
415
|
+
# Construct the recorder.
|
|
416
|
+
recorder = self.create_recorder()
|
|
417
|
+
|
|
418
|
+
# Create the table.
|
|
419
|
+
recorder.create_table()
|
|
420
|
+
|
|
421
|
+
# Write a stored event with broken statement.
|
|
422
|
+
recorder.insert_events_statement = "BLAH"
|
|
423
|
+
stored_event1 = StoredEvent(
|
|
424
|
+
originator_id=uuid4(),
|
|
425
|
+
originator_version=0,
|
|
426
|
+
topic="topic1",
|
|
427
|
+
state=b"state1",
|
|
428
|
+
)
|
|
429
|
+
with self.assertRaises(ProgrammingError):
|
|
430
|
+
recorder.insert_events([stored_event1])
|
|
431
|
+
|
|
432
|
+
def test_select_events_raises_programming_error_when_table_not_created(self):
|
|
433
|
+
# Construct the recorder.
|
|
434
|
+
recorder = self.create_recorder()
|
|
435
|
+
|
|
436
|
+
# Select events without creating the table.
|
|
437
|
+
originator_id = uuid4()
|
|
438
|
+
with self.assertRaises(ProgrammingError):
|
|
439
|
+
recorder.select_events(originator_id=originator_id)
|
|
440
|
+
|
|
441
|
+
def test_select_events_raises_programming_error_when_sql_is_broken(self):
|
|
442
|
+
# Construct the recorder.
|
|
443
|
+
recorder = self.create_recorder()
|
|
444
|
+
|
|
445
|
+
# Create the table.
|
|
446
|
+
recorder.create_table()
|
|
447
|
+
|
|
448
|
+
# Select events with broken statement.
|
|
449
|
+
recorder.select_events_statement = "BLAH"
|
|
450
|
+
originator_id = uuid4()
|
|
451
|
+
with self.assertRaises(ProgrammingError):
|
|
452
|
+
recorder.select_events(originator_id=originator_id)
|
|
453
|
+
|
|
454
|
+
|
|
455
|
+
class TestPostgresApplicationRecorder(
|
|
456
|
+
SetupPostgresDatastore, ApplicationRecorderTestCase
|
|
457
|
+
):
|
|
458
|
+
def create_recorder(
|
|
459
|
+
self, table_name=EVENTS_TABLE_NAME
|
|
460
|
+
) -> PostgresApplicationRecorder:
|
|
461
|
+
if self.datastore.schema:
|
|
462
|
+
table_name = f"{self.datastore.schema}.{table_name}"
|
|
463
|
+
recorder = PostgresApplicationRecorder(
|
|
464
|
+
self.datastore, events_table_name=table_name
|
|
465
|
+
)
|
|
466
|
+
recorder.create_table()
|
|
467
|
+
return recorder
|
|
468
|
+
|
|
469
|
+
def test_insert_select(self) -> None:
|
|
470
|
+
super().test_insert_select()
|
|
471
|
+
|
|
472
|
+
def test_concurrent_no_conflicts(self):
|
|
473
|
+
super().test_concurrent_no_conflicts()
|
|
474
|
+
|
|
475
|
+
def test_concurrent_throughput(self):
|
|
476
|
+
self.datastore.pool.pool_size = 4
|
|
477
|
+
super().test_concurrent_throughput()
|
|
478
|
+
|
|
479
|
+
def test_retry_select_notifications_after_closing_connection(self):
|
|
480
|
+
# This checks connection is recreated after InterfaceError.
|
|
481
|
+
|
|
482
|
+
# Construct the recorder.
|
|
483
|
+
recorder = self.create_recorder()
|
|
484
|
+
self.datastore.pool.pool_size = 1
|
|
485
|
+
|
|
486
|
+
# Write a stored event.
|
|
487
|
+
originator_id = uuid4()
|
|
488
|
+
stored_event1 = StoredEvent(
|
|
489
|
+
originator_id=originator_id,
|
|
490
|
+
originator_version=0,
|
|
491
|
+
topic="topic1",
|
|
492
|
+
state=b"state1",
|
|
493
|
+
)
|
|
494
|
+
recorder.insert_events([stored_event1])
|
|
495
|
+
|
|
496
|
+
# Close connections.
|
|
497
|
+
pg_close_all_connections()
|
|
498
|
+
self.assertFalse(self.datastore.pool._pool[0].closed)
|
|
499
|
+
|
|
500
|
+
# Select events.
|
|
501
|
+
recorder.select_notifications(start=1, limit=1)
|
|
502
|
+
|
|
503
|
+
def test_retry_max_notification_id_after_closing_connection(self):
|
|
504
|
+
# This checks connection is recreated after InterfaceError.
|
|
505
|
+
|
|
506
|
+
# Construct the recorder.
|
|
507
|
+
recorder = self.create_recorder()
|
|
508
|
+
self.datastore.pool.pool_size = 1
|
|
509
|
+
|
|
510
|
+
# Write a stored event.
|
|
511
|
+
originator_id = uuid4()
|
|
512
|
+
stored_event1 = StoredEvent(
|
|
513
|
+
originator_id=originator_id,
|
|
514
|
+
originator_version=0,
|
|
515
|
+
topic="topic1",
|
|
516
|
+
state=b"state1",
|
|
517
|
+
)
|
|
518
|
+
recorder.insert_events([stored_event1])
|
|
519
|
+
|
|
520
|
+
# Close connections.
|
|
521
|
+
pg_close_all_connections()
|
|
522
|
+
self.assertFalse(self.datastore.pool._pool[0].closed)
|
|
523
|
+
|
|
524
|
+
# Get max notification ID.
|
|
525
|
+
recorder.max_notification_id()
|
|
526
|
+
|
|
527
|
+
def test_insert_lock_timeout_actually_works(self):
|
|
528
|
+
self.datastore.lock_timeout = 1
|
|
529
|
+
recorder: PostgresApplicationRecorder = self.create_recorder()
|
|
530
|
+
|
|
531
|
+
stored_event1 = StoredEvent(
|
|
532
|
+
originator_id=uuid4(),
|
|
533
|
+
originator_version=1,
|
|
534
|
+
topic="topic1",
|
|
535
|
+
state=b"state1",
|
|
536
|
+
)
|
|
537
|
+
stored_event2 = StoredEvent(
|
|
538
|
+
originator_id=uuid4(),
|
|
539
|
+
originator_version=1,
|
|
540
|
+
topic="topic1",
|
|
541
|
+
state=b"state1",
|
|
542
|
+
)
|
|
543
|
+
|
|
544
|
+
table_lock_acquired = Event()
|
|
545
|
+
test_ended = Event()
|
|
546
|
+
table_lock_timed_out = Event()
|
|
547
|
+
|
|
548
|
+
def insert1():
|
|
549
|
+
conn = self.datastore.get_connection()
|
|
550
|
+
with conn as conn, conn.transaction(), conn.cursor() as curs:
|
|
551
|
+
# Lock table.
|
|
552
|
+
recorder._insert_stored_events(curs, [stored_event1])
|
|
553
|
+
table_lock_acquired.set()
|
|
554
|
+
# Wait for other thread to timeout.
|
|
555
|
+
test_ended.wait(timeout=5) # keep the lock
|
|
556
|
+
|
|
557
|
+
def insert2():
|
|
558
|
+
try:
|
|
559
|
+
conn: Connection
|
|
560
|
+
with self.datastore.get_connection() as conn:
|
|
561
|
+
# Wait for other thread to lock table.
|
|
562
|
+
table_lock_acquired.wait(timeout=5)
|
|
563
|
+
# Expect to timeout.
|
|
564
|
+
with conn.transaction(), conn.cursor() as curs:
|
|
565
|
+
recorder._insert_stored_events(curs, [stored_event2])
|
|
566
|
+
except OperationalError as e:
|
|
567
|
+
if "lock timeout" in e.args[0]:
|
|
568
|
+
table_lock_timed_out.set()
|
|
569
|
+
|
|
570
|
+
thread1 = Thread(target=insert1, daemon=True)
|
|
571
|
+
thread1.start()
|
|
572
|
+
thread2 = Thread(target=insert2, daemon=True)
|
|
573
|
+
thread2.start()
|
|
574
|
+
|
|
575
|
+
table_lock_timed_out.wait(timeout=4)
|
|
576
|
+
test_ended.set()
|
|
577
|
+
|
|
578
|
+
thread1.join(timeout=10)
|
|
579
|
+
thread2.join(timeout=10)
|
|
580
|
+
|
|
581
|
+
self.assertTrue(table_lock_timed_out.is_set())
|
|
582
|
+
|
|
583
|
+
|
|
584
|
+
class TestPostgresApplicationRecorderWithSchema(
|
|
585
|
+
WithSchema, TestPostgresApplicationRecorder
|
|
586
|
+
):
|
|
587
|
+
pass
|
|
588
|
+
|
|
589
|
+
|
|
590
|
+
class TestPostgresApplicationRecorderErrors(SetupPostgresDatastore, TestCase):
|
|
591
|
+
def create_recorder(self, table_name=EVENTS_TABLE_NAME):
|
|
592
|
+
return PostgresApplicationRecorder(self.datastore, events_table_name=table_name)
|
|
593
|
+
|
|
594
|
+
def test_excessively_long_table_name_raises_error(self):
|
|
595
|
+
# Add one more character to the table name.
|
|
596
|
+
long_table_name = "s" + EVENTS_TABLE_NAME
|
|
597
|
+
self.assertEqual(len(long_table_name), 64)
|
|
598
|
+
with self.assertRaises(ProgrammingError):
|
|
599
|
+
self.create_recorder(long_table_name)
|
|
600
|
+
|
|
601
|
+
def test_select_notification_raises_programming_error_when_table_not_created(self):
|
|
602
|
+
# Construct the recorder.
|
|
603
|
+
recorder = self.create_recorder()
|
|
604
|
+
|
|
605
|
+
# Select notifications without creating table.
|
|
606
|
+
with self.assertRaises(ProgrammingError):
|
|
607
|
+
recorder.select_notifications(start=1, limit=1)
|
|
608
|
+
|
|
609
|
+
def test_max_notification_id_raises_programming_error_when_table_not_created(self):
|
|
610
|
+
# Construct the recorder.
|
|
611
|
+
recorder = PostgresApplicationRecorder(
|
|
612
|
+
datastore=self.datastore, events_table_name=EVENTS_TABLE_NAME
|
|
613
|
+
)
|
|
614
|
+
|
|
615
|
+
# Select notifications without creating table.
|
|
616
|
+
with self.assertRaises(ProgrammingError):
|
|
617
|
+
recorder.max_notification_id()
|
|
618
|
+
|
|
619
|
+
def test_fetch_ids_after_insert_events(self):
|
|
620
|
+
def make_events() -> List[StoredEvent]:
|
|
621
|
+
return [
|
|
622
|
+
StoredEvent(
|
|
623
|
+
originator_id=uuid4(),
|
|
624
|
+
originator_version=1,
|
|
625
|
+
state=b"",
|
|
626
|
+
topic="",
|
|
627
|
+
)
|
|
628
|
+
]
|
|
629
|
+
|
|
630
|
+
#
|
|
631
|
+
# Check it actually works.
|
|
632
|
+
recorder = PostgresApplicationRecorder(
|
|
633
|
+
datastore=self.datastore, events_table_name=EVENTS_TABLE_NAME
|
|
634
|
+
)
|
|
635
|
+
recorder.create_table()
|
|
636
|
+
max_notification_id = recorder.max_notification_id()
|
|
637
|
+
notification_ids = recorder.insert_events(make_events())
|
|
638
|
+
self.assertEqual(len(notification_ids), 1)
|
|
639
|
+
self.assertEqual(max_notification_id + 1, notification_ids[0])
|
|
640
|
+
|
|
641
|
+
# Events but no lock table statements.
|
|
642
|
+
with self.assertRaises(ProgrammingError):
|
|
643
|
+
recorder = PostgresApplicationRecorder(
|
|
644
|
+
datastore=self.datastore, events_table_name=EVENTS_TABLE_NAME
|
|
645
|
+
)
|
|
646
|
+
recorder.create_table()
|
|
647
|
+
recorder.lock_table_statements = []
|
|
648
|
+
recorder.insert_events(make_events())
|
|
649
|
+
|
|
650
|
+
|
|
651
|
+
TRACKING_TABLE_NAME = "n" * 42 + "notification_tracking"
|
|
652
|
+
_check_identifier_is_max_len(TRACKING_TABLE_NAME)
|
|
653
|
+
|
|
654
|
+
|
|
655
|
+
class TestPostgresProcessRecorder(SetupPostgresDatastore, ProcessRecorderTestCase):
|
|
656
|
+
def drop_tables(self):
|
|
657
|
+
super().drop_tables()
|
|
658
|
+
tracking_table_name = TRACKING_TABLE_NAME
|
|
659
|
+
if self.datastore.schema:
|
|
660
|
+
tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
|
|
661
|
+
drop_postgres_table(self.datastore, tracking_table_name)
|
|
662
|
+
|
|
663
|
+
def create_recorder(self):
|
|
664
|
+
events_table_name = EVENTS_TABLE_NAME
|
|
665
|
+
tracking_table_name = TRACKING_TABLE_NAME
|
|
666
|
+
if self.datastore.schema:
|
|
667
|
+
events_table_name = f"{self.datastore.schema}.{events_table_name}"
|
|
668
|
+
if self.datastore.schema:
|
|
669
|
+
tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
|
|
670
|
+
recorder = PostgresProcessRecorder(
|
|
671
|
+
datastore=self.datastore,
|
|
672
|
+
events_table_name=events_table_name,
|
|
673
|
+
tracking_table_name=tracking_table_name,
|
|
674
|
+
)
|
|
675
|
+
recorder.create_table()
|
|
676
|
+
return recorder
|
|
677
|
+
|
|
678
|
+
def test_insert_select(self):
|
|
679
|
+
super().test_insert_select()
|
|
680
|
+
|
|
681
|
+
def test_performance(self):
|
|
682
|
+
super().test_performance()
|
|
683
|
+
|
|
684
|
+
def test_excessively_long_table_names_raise_error(self):
|
|
685
|
+
with self.assertRaises(ProgrammingError):
|
|
686
|
+
PostgresProcessRecorder(
|
|
687
|
+
datastore=self.datastore,
|
|
688
|
+
events_table_name="e" + EVENTS_TABLE_NAME,
|
|
689
|
+
tracking_table_name=TRACKING_TABLE_NAME,
|
|
690
|
+
)
|
|
691
|
+
|
|
692
|
+
with self.assertRaises(ProgrammingError):
|
|
693
|
+
PostgresProcessRecorder(
|
|
694
|
+
datastore=self.datastore,
|
|
695
|
+
events_table_name=EVENTS_TABLE_NAME,
|
|
696
|
+
tracking_table_name="n" + TRACKING_TABLE_NAME,
|
|
697
|
+
)
|
|
698
|
+
|
|
699
|
+
def test_retry_max_tracking_id_after_closing_connection(self):
|
|
700
|
+
# This checks connection is recreated after InterfaceError.
|
|
701
|
+
|
|
702
|
+
# Construct the recorder.
|
|
703
|
+
recorder = self.create_recorder()
|
|
704
|
+
self.datastore.pool.pool_size = 1
|
|
705
|
+
|
|
706
|
+
# Write a tracking record.
|
|
707
|
+
originator_id = uuid4()
|
|
708
|
+
stored_event1 = StoredEvent(
|
|
709
|
+
originator_id=originator_id,
|
|
710
|
+
originator_version=0,
|
|
711
|
+
topic="topic1",
|
|
712
|
+
state=b"state1",
|
|
713
|
+
)
|
|
714
|
+
recorder.insert_events([stored_event1], tracking=Tracking("upstream", 1))
|
|
715
|
+
|
|
716
|
+
# Close connections.
|
|
717
|
+
pg_close_all_connections()
|
|
718
|
+
self.assertFalse(self.datastore.pool._pool[0].closed)
|
|
719
|
+
|
|
720
|
+
# Get max tracking ID.
|
|
721
|
+
notification_id = recorder.max_tracking_id("upstream")
|
|
722
|
+
self.assertEqual(notification_id, 1)
|
|
723
|
+
|
|
724
|
+
|
|
725
|
+
class TestPostgresProcessRecorderWithSchema(WithSchema, TestPostgresProcessRecorder):
|
|
726
|
+
pass
|
|
727
|
+
|
|
728
|
+
|
|
729
|
+
class TestPostgresProcessRecorderErrors(SetupPostgresDatastore, TestCase):
|
|
730
|
+
def drop_tables(self):
|
|
731
|
+
super().drop_tables()
|
|
732
|
+
drop_postgres_table(self.datastore, TRACKING_TABLE_NAME)
|
|
733
|
+
|
|
734
|
+
def create_recorder(self):
|
|
735
|
+
return PostgresProcessRecorder(
|
|
736
|
+
datastore=self.datastore,
|
|
737
|
+
events_table_name=EVENTS_TABLE_NAME,
|
|
738
|
+
tracking_table_name=TRACKING_TABLE_NAME,
|
|
739
|
+
)
|
|
740
|
+
|
|
741
|
+
def test_max_tracking_id_raises_programming_error_when_table_not_created(self):
|
|
742
|
+
# Construct the recorder.
|
|
743
|
+
recorder = self.create_recorder()
|
|
744
|
+
|
|
745
|
+
# Get max tracking ID without creating table.
|
|
746
|
+
with self.assertRaises(ProgrammingError):
|
|
747
|
+
recorder.max_tracking_id("upstream")
|
|
748
|
+
|
|
749
|
+
|
|
750
|
+
class TestPostgresInfrastructureFactory(InfrastructureFactoryTestCase):
|
|
751
|
+
def test_create_application_recorder(self):
|
|
752
|
+
super().test_create_application_recorder()
|
|
753
|
+
|
|
754
|
+
def expected_factory_class(self):
|
|
755
|
+
return Factory
|
|
756
|
+
|
|
757
|
+
def expected_aggregate_recorder_class(self):
|
|
758
|
+
return PostgresAggregateRecorder
|
|
759
|
+
|
|
760
|
+
def expected_application_recorder_class(self):
|
|
761
|
+
return PostgresApplicationRecorder
|
|
762
|
+
|
|
763
|
+
def expected_process_recorder_class(self):
|
|
764
|
+
return PostgresProcessRecorder
|
|
765
|
+
|
|
766
|
+
def setUp(self) -> None:
|
|
767
|
+
self.env = Environment("TestCase")
|
|
768
|
+
self.env[InfrastructureFactory.PERSISTENCE_MODULE] = Factory.__module__
|
|
769
|
+
self.env[Factory.POSTGRES_DBNAME] = "eventsourcing"
|
|
770
|
+
self.env[Factory.POSTGRES_HOST] = "127.0.0.1"
|
|
771
|
+
self.env[Factory.POSTGRES_PORT] = "5432"
|
|
772
|
+
self.env[Factory.POSTGRES_USER] = "eventsourcing"
|
|
773
|
+
self.env[Factory.POSTGRES_PASSWORD] = "eventsourcing"
|
|
774
|
+
self.drop_tables()
|
|
775
|
+
super().setUp()
|
|
776
|
+
|
|
777
|
+
def tearDown(self) -> None:
|
|
778
|
+
self.drop_tables()
|
|
779
|
+
super().tearDown()
|
|
780
|
+
|
|
781
|
+
def drop_tables(self):
|
|
782
|
+
with PostgresDatastore(
|
|
783
|
+
"eventsourcing",
|
|
784
|
+
"127.0.0.1",
|
|
785
|
+
"5432",
|
|
786
|
+
"eventsourcing",
|
|
787
|
+
"eventsourcing",
|
|
788
|
+
) as datastore:
|
|
789
|
+
drop_postgres_table(datastore, "testcase_events")
|
|
790
|
+
drop_postgres_table(datastore, "testcase_tracking")
|
|
791
|
+
|
|
792
|
+
def test_close(self):
|
|
793
|
+
factory = Factory(self.env)
|
|
794
|
+
conn: Connection
|
|
795
|
+
with factory.datastore.get_connection() as conn:
|
|
796
|
+
conn.execute("SELECT 1")
|
|
797
|
+
self.assertFalse(factory.datastore.pool.closed)
|
|
798
|
+
factory.close()
|
|
799
|
+
self.assertTrue(factory.datastore.pool.closed)
|
|
800
|
+
|
|
801
|
+
def test_conn_max_age_is_set_to_float(self):
|
|
802
|
+
self.env[Factory.POSTGRES_CONN_MAX_AGE] = ""
|
|
803
|
+
self.factory = Factory(self.env)
|
|
804
|
+
self.assertEqual(self.factory.datastore.pool.max_lifetime, 60 * 60.0)
|
|
805
|
+
|
|
806
|
+
def test_conn_max_age_is_set_to_number(self):
|
|
807
|
+
self.env[Factory.POSTGRES_CONN_MAX_AGE] = "0"
|
|
808
|
+
self.factory = Factory(self.env)
|
|
809
|
+
self.assertEqual(self.factory.datastore.pool.max_lifetime, 0)
|
|
810
|
+
|
|
811
|
+
def test_pool_size_is_five_by_default(self):
|
|
812
|
+
self.assertTrue(Factory.POSTGRES_POOL_SIZE not in self.env)
|
|
813
|
+
self.factory = Factory(self.env)
|
|
814
|
+
self.assertEqual(self.factory.datastore.pool.min_size, 5)
|
|
815
|
+
|
|
816
|
+
self.env[Factory.POSTGRES_POOL_SIZE] = ""
|
|
817
|
+
self.factory = Factory(self.env)
|
|
818
|
+
self.assertEqual(self.factory.datastore.pool.min_size, 5)
|
|
819
|
+
|
|
820
|
+
def test_max_overflow_is_ten_by_default(self):
|
|
821
|
+
self.assertTrue(Factory.POSTGRES_MAX_OVERFLOW not in self.env)
|
|
822
|
+
self.factory = Factory(self.env)
|
|
823
|
+
self.assertEqual(self.factory.datastore.pool.max_size, 15)
|
|
824
|
+
|
|
825
|
+
self.env[Factory.POSTGRES_MAX_OVERFLOW] = ""
|
|
826
|
+
self.factory = Factory(self.env)
|
|
827
|
+
self.assertEqual(self.factory.datastore.pool.max_size, 15)
|
|
828
|
+
|
|
829
|
+
def test_max_overflow_is_set(self):
|
|
830
|
+
self.env[Factory.POSTGRES_MAX_OVERFLOW] = "7"
|
|
831
|
+
self.factory = Factory(self.env)
|
|
832
|
+
self.assertEqual(self.factory.datastore.pool.max_size, 12)
|
|
833
|
+
|
|
834
|
+
def test_pool_size_is_set(self):
|
|
835
|
+
self.env[Factory.POSTGRES_POOL_SIZE] = "6"
|
|
836
|
+
self.factory = Factory(self.env)
|
|
837
|
+
self.assertEqual(self.factory.datastore.pool.min_size, 6)
|
|
838
|
+
|
|
839
|
+
def test_connect_timeout_is_thirty_by_default(self):
|
|
840
|
+
self.assertTrue(Factory.POSTGRES_CONNECT_TIMEOUT not in self.env)
|
|
841
|
+
self.factory = Factory(self.env)
|
|
842
|
+
self.assertEqual(self.factory.datastore.pool.timeout, 30)
|
|
843
|
+
|
|
844
|
+
self.env[Factory.POSTGRES_CONNECT_TIMEOUT] = ""
|
|
845
|
+
self.factory = Factory(self.env)
|
|
846
|
+
self.assertEqual(self.factory.datastore.pool.timeout, 30)
|
|
847
|
+
|
|
848
|
+
def test_connect_timeout_is_set(self):
|
|
849
|
+
self.env[Factory.POSTGRES_CONNECT_TIMEOUT] = "8"
|
|
850
|
+
self.factory = Factory(self.env)
|
|
851
|
+
self.assertEqual(self.factory.datastore.pool.timeout, 8)
|
|
852
|
+
|
|
853
|
+
def test_max_waiting_is_0_by_default(self):
|
|
854
|
+
self.assertTrue(Factory.POSTGRES_MAX_WAITING not in self.env)
|
|
855
|
+
self.factory = Factory(self.env)
|
|
856
|
+
self.assertEqual(self.factory.datastore.pool.max_waiting, 0)
|
|
857
|
+
|
|
858
|
+
self.env[Factory.POSTGRES_MAX_WAITING] = ""
|
|
859
|
+
self.factory = Factory(self.env)
|
|
860
|
+
self.assertEqual(self.factory.datastore.pool.max_waiting, 0)
|
|
861
|
+
|
|
862
|
+
def test_max_waiting_is_set(self):
|
|
863
|
+
self.env[Factory.POSTGRES_MAX_WAITING] = "8"
|
|
864
|
+
self.factory = Factory(self.env)
|
|
865
|
+
self.assertEqual(self.factory.datastore.pool.max_waiting, 8)
|
|
866
|
+
|
|
867
|
+
def test_lock_timeout_is_zero_by_default(self):
|
|
868
|
+
self.assertTrue(Factory.POSTGRES_LOCK_TIMEOUT not in self.env)
|
|
869
|
+
self.factory = Factory(self.env)
|
|
870
|
+
self.assertEqual(self.factory.datastore.lock_timeout, 0)
|
|
871
|
+
|
|
872
|
+
self.env[Factory.POSTGRES_LOCK_TIMEOUT] = ""
|
|
873
|
+
self.factory = Factory(self.env)
|
|
874
|
+
self.assertEqual(self.factory.datastore.lock_timeout, 0)
|
|
875
|
+
|
|
876
|
+
def test_lock_timeout_is_set(self):
|
|
877
|
+
self.env[Factory.POSTGRES_LOCK_TIMEOUT] = "1"
|
|
878
|
+
self.factory = Factory(self.env)
|
|
879
|
+
self.assertEqual(self.factory.datastore.lock_timeout, 1)
|
|
880
|
+
|
|
881
|
+
def test_idle_in_transaction_session_timeout_is_5_by_default(self):
|
|
882
|
+
self.assertTrue(
|
|
883
|
+
Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT not in self.env
|
|
884
|
+
)
|
|
885
|
+
self.factory = Factory(self.env)
|
|
886
|
+
self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 5)
|
|
887
|
+
self.factory.close()
|
|
888
|
+
|
|
889
|
+
self.env[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = ""
|
|
890
|
+
self.factory = Factory(self.env)
|
|
891
|
+
self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 5)
|
|
892
|
+
|
|
893
|
+
def test_idle_in_transaction_session_timeout_is_set(self):
|
|
894
|
+
self.env[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = "10"
|
|
895
|
+
self.factory = Factory(self.env)
|
|
896
|
+
self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 10)
|
|
897
|
+
|
|
898
|
+
def test_pre_ping_off_by_default(self):
|
|
899
|
+
self.factory = Factory(self.env)
|
|
900
|
+
self.assertEqual(self.factory.datastore.pre_ping, False)
|
|
901
|
+
|
|
902
|
+
def test_pre_ping_off(self):
|
|
903
|
+
self.env[Factory.POSTGRES_PRE_PING] = "off"
|
|
904
|
+
self.factory = Factory(self.env)
|
|
905
|
+
self.assertEqual(self.factory.datastore.pre_ping, False)
|
|
906
|
+
|
|
907
|
+
def test_pre_ping_on(self):
|
|
908
|
+
self.env[Factory.POSTGRES_PRE_PING] = "on"
|
|
909
|
+
self.factory = Factory(self.env)
|
|
910
|
+
self.assertEqual(self.factory.datastore.pre_ping, True)
|
|
911
|
+
|
|
912
|
+
def test_get_password_topic_not_set(self):
|
|
913
|
+
self.factory = Factory(self.env)
|
|
914
|
+
self.assertIsNone(self.factory.datastore.pool.get_password_func, None)
|
|
915
|
+
|
|
916
|
+
def test_get_password_topic_set(self):
|
|
917
|
+
def get_password_func():
|
|
918
|
+
return "eventsourcing"
|
|
919
|
+
|
|
920
|
+
self.env[Factory.POSTGRES_GET_PASSWORD_TOPIC] = get_topic(get_password_func)
|
|
921
|
+
self.factory = Factory(self.env)
|
|
922
|
+
self.assertEqual(
|
|
923
|
+
self.factory.datastore.pool.get_password_func, get_password_func
|
|
924
|
+
)
|
|
925
|
+
|
|
926
|
+
def test_environment_error_raised_when_conn_max_age_not_a_float(self):
|
|
927
|
+
self.env[Factory.POSTGRES_CONN_MAX_AGE] = "abc"
|
|
928
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
929
|
+
Factory(self.env)
|
|
930
|
+
self.assertEqual(
|
|
931
|
+
cm.exception.args[0],
|
|
932
|
+
"Postgres environment value for key 'POSTGRES_CONN_MAX_AGE' "
|
|
933
|
+
"is invalid. If set, a float or empty string is expected: 'abc'",
|
|
934
|
+
)
|
|
935
|
+
|
|
936
|
+
def test_environment_error_raised_when_connect_timeout_not_an_integer(self):
|
|
937
|
+
self.env[Factory.POSTGRES_CONNECT_TIMEOUT] = "abc"
|
|
938
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
939
|
+
Factory(self.env)
|
|
940
|
+
self.assertEqual(
|
|
941
|
+
cm.exception.args[0],
|
|
942
|
+
"Postgres environment value for key 'POSTGRES_CONNECT_TIMEOUT' "
|
|
943
|
+
"is invalid. If set, an integer or empty string is expected: 'abc'",
|
|
944
|
+
)
|
|
945
|
+
|
|
946
|
+
def test_environment_error_raised_when_max_waiting_not_an_integer(self):
|
|
947
|
+
self.env[Factory.POSTGRES_MAX_WAITING] = "abc"
|
|
948
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
949
|
+
Factory(self.env)
|
|
950
|
+
self.assertEqual(
|
|
951
|
+
cm.exception.args[0],
|
|
952
|
+
"Postgres environment value for key 'POSTGRES_MAX_WAITING' "
|
|
953
|
+
"is invalid. If set, an integer or empty string is expected: 'abc'",
|
|
954
|
+
)
|
|
955
|
+
|
|
956
|
+
def test_environment_error_raised_when_lock_timeout_not_an_integer(self):
|
|
957
|
+
self.env[Factory.POSTGRES_LOCK_TIMEOUT] = "abc"
|
|
958
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
959
|
+
Factory(self.env)
|
|
960
|
+
self.assertEqual(
|
|
961
|
+
cm.exception.args[0],
|
|
962
|
+
"Postgres environment value for key 'POSTGRES_LOCK_TIMEOUT' "
|
|
963
|
+
"is invalid. If set, an integer or empty string is expected: 'abc'",
|
|
964
|
+
)
|
|
965
|
+
|
|
966
|
+
def test_environment_error_raised_when_min_conn_not_an_integer(self):
|
|
967
|
+
self.env[Factory.POSTGRES_POOL_SIZE] = "abc"
|
|
968
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
969
|
+
Factory(self.env)
|
|
970
|
+
self.assertEqual(
|
|
971
|
+
cm.exception.args[0],
|
|
972
|
+
"Postgres environment value for key 'POSTGRES_POOL_SIZE' "
|
|
973
|
+
"is invalid. If set, an integer or empty string is expected: 'abc'",
|
|
974
|
+
)
|
|
975
|
+
|
|
976
|
+
def test_environment_error_raised_when_max_conn_not_an_integer(self):
|
|
977
|
+
self.env[Factory.POSTGRES_MAX_OVERFLOW] = "abc"
|
|
978
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
979
|
+
Factory(self.env)
|
|
980
|
+
self.assertEqual(
|
|
981
|
+
cm.exception.args[0],
|
|
982
|
+
"Postgres environment value for key 'POSTGRES_MAX_OVERFLOW' "
|
|
983
|
+
"is invalid. If set, an integer or empty string is expected: 'abc'",
|
|
984
|
+
)
|
|
985
|
+
|
|
986
|
+
def test_environment_error_raised_when_idle_in_transaction_session_timeout_not_int(
|
|
987
|
+
self,
|
|
988
|
+
):
|
|
989
|
+
self.env[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = "abc"
|
|
990
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
991
|
+
Factory(self.env)
|
|
992
|
+
self.assertEqual(
|
|
993
|
+
cm.exception.args[0],
|
|
994
|
+
"Postgres environment value for key "
|
|
995
|
+
"'POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT' "
|
|
996
|
+
"is invalid. If set, an integer or empty string is expected: 'abc'",
|
|
997
|
+
)
|
|
998
|
+
|
|
999
|
+
def test_environment_error_raised_when_dbname_missing(self):
|
|
1000
|
+
del self.env[Factory.POSTGRES_DBNAME]
|
|
1001
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
1002
|
+
InfrastructureFactory.construct(self.env)
|
|
1003
|
+
self.assertEqual(
|
|
1004
|
+
cm.exception.args[0],
|
|
1005
|
+
"Postgres database name not found in environment "
|
|
1006
|
+
"with key 'POSTGRES_DBNAME'",
|
|
1007
|
+
)
|
|
1008
|
+
|
|
1009
|
+
def test_environment_error_raised_when_dbhost_missing(self):
|
|
1010
|
+
del self.env[Factory.POSTGRES_HOST]
|
|
1011
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
1012
|
+
InfrastructureFactory.construct(self.env)
|
|
1013
|
+
self.assertEqual(
|
|
1014
|
+
cm.exception.args[0],
|
|
1015
|
+
"Postgres host not found in environment with key 'POSTGRES_HOST'",
|
|
1016
|
+
)
|
|
1017
|
+
|
|
1018
|
+
def test_environment_error_raised_when_user_missing(self):
|
|
1019
|
+
del self.env[Factory.POSTGRES_USER]
|
|
1020
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
1021
|
+
InfrastructureFactory.construct(self.env)
|
|
1022
|
+
self.assertEqual(
|
|
1023
|
+
cm.exception.args[0],
|
|
1024
|
+
"Postgres user not found in environment with key 'POSTGRES_USER'",
|
|
1025
|
+
)
|
|
1026
|
+
|
|
1027
|
+
def test_environment_error_raised_when_password_missing(self):
|
|
1028
|
+
del self.env[Factory.POSTGRES_PASSWORD]
|
|
1029
|
+
with self.assertRaises(EnvironmentError) as cm:
|
|
1030
|
+
InfrastructureFactory.construct(self.env)
|
|
1031
|
+
self.assertEqual(
|
|
1032
|
+
cm.exception.args[0],
|
|
1033
|
+
"Postgres password not found in environment with key 'POSTGRES_PASSWORD'",
|
|
1034
|
+
)
|
|
1035
|
+
|
|
1036
|
+
def test_schema_set_to_empty_string(self):
|
|
1037
|
+
self.env[Factory.POSTGRES_SCHEMA] = ""
|
|
1038
|
+
self.factory = Factory(self.env)
|
|
1039
|
+
self.assertEqual(self.factory.datastore.schema, "")
|
|
1040
|
+
|
|
1041
|
+
def test_schema_set_to_whitespace(self):
|
|
1042
|
+
self.env[Factory.POSTGRES_SCHEMA] = " "
|
|
1043
|
+
self.factory = Factory(self.env)
|
|
1044
|
+
self.assertEqual(self.factory.datastore.schema, "")
|
|
1045
|
+
|
|
1046
|
+
def test_scheme_adjusts_table_names_on_aggregate_recorder(self):
|
|
1047
|
+
self.factory = Factory(self.env)
|
|
1048
|
+
|
|
1049
|
+
# Check by default the table name is not qualified.
|
|
1050
|
+
recorder = self.factory.aggregate_recorder("events")
|
|
1051
|
+
assert isinstance(recorder, PostgresAggregateRecorder)
|
|
1052
|
+
self.assertEqual(recorder.events_table_name, "testcase_events")
|
|
1053
|
+
|
|
1054
|
+
# Check by default the table name is not qualified.
|
|
1055
|
+
recorder = self.factory.aggregate_recorder("snapshots")
|
|
1056
|
+
assert isinstance(recorder, PostgresAggregateRecorder)
|
|
1057
|
+
self.assertEqual(recorder.events_table_name, "testcase_snapshots")
|
|
1058
|
+
|
|
1059
|
+
# Set schema in environment.
|
|
1060
|
+
self.env[Factory.POSTGRES_SCHEMA] = "public"
|
|
1061
|
+
self.factory = Factory(self.env)
|
|
1062
|
+
self.assertEqual(self.factory.datastore.schema, "public")
|
|
1063
|
+
|
|
1064
|
+
# Check by default the table name is qualified.
|
|
1065
|
+
recorder = self.factory.aggregate_recorder("events")
|
|
1066
|
+
assert isinstance(recorder, PostgresAggregateRecorder)
|
|
1067
|
+
self.assertEqual(recorder.events_table_name, "public.testcase_events")
|
|
1068
|
+
|
|
1069
|
+
# Check by default the table name is qualified.
|
|
1070
|
+
recorder = self.factory.aggregate_recorder("snapshots")
|
|
1071
|
+
assert isinstance(recorder, PostgresAggregateRecorder)
|
|
1072
|
+
self.assertEqual(recorder.events_table_name, "public.testcase_snapshots")
|
|
1073
|
+
|
|
1074
|
+
def test_scheme_adjusts_table_name_on_application_recorder(self):
|
|
1075
|
+
self.factory = Factory(self.env)
|
|
1076
|
+
|
|
1077
|
+
# Check by default the table name is not qualified.
|
|
1078
|
+
recorder = self.factory.application_recorder()
|
|
1079
|
+
assert isinstance(recorder, PostgresApplicationRecorder)
|
|
1080
|
+
self.assertEqual(recorder.events_table_name, "testcase_events")
|
|
1081
|
+
|
|
1082
|
+
# Set schema in environment.
|
|
1083
|
+
self.env[Factory.POSTGRES_SCHEMA] = "public"
|
|
1084
|
+
self.factory = Factory(self.env)
|
|
1085
|
+
self.assertEqual(self.factory.datastore.schema, "public")
|
|
1086
|
+
|
|
1087
|
+
# Check by default the table name is qualified.
|
|
1088
|
+
recorder = self.factory.application_recorder()
|
|
1089
|
+
assert isinstance(recorder, PostgresApplicationRecorder)
|
|
1090
|
+
self.assertEqual(recorder.events_table_name, "public.testcase_events")
|
|
1091
|
+
|
|
1092
|
+
def test_scheme_adjusts_table_names_on_process_recorder(self):
|
|
1093
|
+
self.factory = Factory(self.env)
|
|
1094
|
+
|
|
1095
|
+
# Check by default the table name is not qualified.
|
|
1096
|
+
recorder = self.factory.process_recorder()
|
|
1097
|
+
assert isinstance(recorder, PostgresProcessRecorder)
|
|
1098
|
+
self.assertEqual(recorder.events_table_name, "testcase_events")
|
|
1099
|
+
self.assertEqual(recorder.tracking_table_name, "testcase_tracking")
|
|
1100
|
+
|
|
1101
|
+
# Set schema in environment.
|
|
1102
|
+
self.env[Factory.POSTGRES_SCHEMA] = "public"
|
|
1103
|
+
self.factory = Factory(self.env)
|
|
1104
|
+
self.assertEqual(self.factory.datastore.schema, "public")
|
|
1105
|
+
|
|
1106
|
+
# Check by default the table name is qualified.
|
|
1107
|
+
recorder = self.factory.process_recorder()
|
|
1108
|
+
assert isinstance(recorder, PostgresProcessRecorder)
|
|
1109
|
+
self.assertEqual(recorder.events_table_name, "public.testcase_events")
|
|
1110
|
+
self.assertEqual(recorder.tracking_table_name, "public.testcase_tracking")
|
|
1111
|
+
|
|
1112
|
+
|
|
1113
|
+
del AggregateRecorderTestCase
|
|
1114
|
+
del ApplicationRecorderTestCase
|
|
1115
|
+
del ProcessRecorderTestCase
|
|
1116
|
+
del InfrastructureFactoryTestCase
|
|
1117
|
+
del SetupPostgresDatastore
|
|
1118
|
+
del WithSchema
|
|
1119
|
+
del TestConnectionPool
|