eventsourcing 9.3.3__py3-none-any.whl → 9.3.4__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

Files changed (127) hide show
  1. {eventsourcing-9.3.3.dist-info → eventsourcing-9.3.4.dist-info}/METADATA +1 -1
  2. eventsourcing-9.3.4.dist-info/RECORD +24 -0
  3. eventsourcing/examples/__init__.py +0 -0
  4. eventsourcing/examples/aggregate1/__init__.py +0 -0
  5. eventsourcing/examples/aggregate1/application.py +0 -27
  6. eventsourcing/examples/aggregate1/domainmodel.py +0 -16
  7. eventsourcing/examples/aggregate1/test_application.py +0 -37
  8. eventsourcing/examples/aggregate2/__init__.py +0 -0
  9. eventsourcing/examples/aggregate2/application.py +0 -27
  10. eventsourcing/examples/aggregate2/domainmodel.py +0 -22
  11. eventsourcing/examples/aggregate2/test_application.py +0 -37
  12. eventsourcing/examples/aggregate3/__init__.py +0 -0
  13. eventsourcing/examples/aggregate3/application.py +0 -27
  14. eventsourcing/examples/aggregate3/domainmodel.py +0 -38
  15. eventsourcing/examples/aggregate3/test_application.py +0 -37
  16. eventsourcing/examples/aggregate4/__init__.py +0 -0
  17. eventsourcing/examples/aggregate4/application.py +0 -27
  18. eventsourcing/examples/aggregate4/domainmodel.py +0 -114
  19. eventsourcing/examples/aggregate4/test_application.py +0 -38
  20. eventsourcing/examples/aggregate5/__init__.py +0 -0
  21. eventsourcing/examples/aggregate5/application.py +0 -27
  22. eventsourcing/examples/aggregate5/domainmodel.py +0 -131
  23. eventsourcing/examples/aggregate5/test_application.py +0 -38
  24. eventsourcing/examples/aggregate6/__init__.py +0 -0
  25. eventsourcing/examples/aggregate6/application.py +0 -30
  26. eventsourcing/examples/aggregate6/domainmodel.py +0 -123
  27. eventsourcing/examples/aggregate6/test_application.py +0 -38
  28. eventsourcing/examples/aggregate6a/__init__.py +0 -0
  29. eventsourcing/examples/aggregate6a/application.py +0 -40
  30. eventsourcing/examples/aggregate6a/domainmodel.py +0 -149
  31. eventsourcing/examples/aggregate6a/test_application.py +0 -45
  32. eventsourcing/examples/aggregate7/__init__.py +0 -0
  33. eventsourcing/examples/aggregate7/application.py +0 -53
  34. eventsourcing/examples/aggregate7/domainmodel.py +0 -142
  35. eventsourcing/examples/aggregate7/persistence.py +0 -57
  36. eventsourcing/examples/aggregate7/test_application.py +0 -45
  37. eventsourcing/examples/aggregate7/test_compression_and_encryption.py +0 -45
  38. eventsourcing/examples/aggregate7/test_snapshotting_intervals.py +0 -67
  39. eventsourcing/examples/aggregate7a/__init__.py +0 -0
  40. eventsourcing/examples/aggregate7a/application.py +0 -56
  41. eventsourcing/examples/aggregate7a/domainmodel.py +0 -168
  42. eventsourcing/examples/aggregate7a/test_application.py +0 -46
  43. eventsourcing/examples/aggregate7a/test_compression_and_encryption.py +0 -45
  44. eventsourcing/examples/aggregate8/__init__.py +0 -0
  45. eventsourcing/examples/aggregate8/application.py +0 -47
  46. eventsourcing/examples/aggregate8/domainmodel.py +0 -71
  47. eventsourcing/examples/aggregate8/persistence.py +0 -57
  48. eventsourcing/examples/aggregate8/test_application.py +0 -44
  49. eventsourcing/examples/aggregate8/test_compression_and_encryption.py +0 -44
  50. eventsourcing/examples/aggregate8/test_snapshotting_intervals.py +0 -38
  51. eventsourcing/examples/bankaccounts/__init__.py +0 -0
  52. eventsourcing/examples/bankaccounts/application.py +0 -70
  53. eventsourcing/examples/bankaccounts/domainmodel.py +0 -56
  54. eventsourcing/examples/bankaccounts/test.py +0 -173
  55. eventsourcing/examples/cargoshipping/__init__.py +0 -0
  56. eventsourcing/examples/cargoshipping/application.py +0 -126
  57. eventsourcing/examples/cargoshipping/domainmodel.py +0 -330
  58. eventsourcing/examples/cargoshipping/interface.py +0 -143
  59. eventsourcing/examples/cargoshipping/test.py +0 -231
  60. eventsourcing/examples/contentmanagement/__init__.py +0 -0
  61. eventsourcing/examples/contentmanagement/application.py +0 -118
  62. eventsourcing/examples/contentmanagement/domainmodel.py +0 -69
  63. eventsourcing/examples/contentmanagement/test.py +0 -180
  64. eventsourcing/examples/contentmanagement/utils.py +0 -26
  65. eventsourcing/examples/contentmanagementsystem/__init__.py +0 -0
  66. eventsourcing/examples/contentmanagementsystem/application.py +0 -54
  67. eventsourcing/examples/contentmanagementsystem/postgres.py +0 -17
  68. eventsourcing/examples/contentmanagementsystem/sqlite.py +0 -17
  69. eventsourcing/examples/contentmanagementsystem/system.py +0 -14
  70. eventsourcing/examples/contentmanagementsystem/test_system.py +0 -180
  71. eventsourcing/examples/searchablecontent/__init__.py +0 -0
  72. eventsourcing/examples/searchablecontent/application.py +0 -45
  73. eventsourcing/examples/searchablecontent/persistence.py +0 -23
  74. eventsourcing/examples/searchablecontent/postgres.py +0 -118
  75. eventsourcing/examples/searchablecontent/sqlite.py +0 -136
  76. eventsourcing/examples/searchablecontent/test_application.py +0 -110
  77. eventsourcing/examples/searchablecontent/test_recorder.py +0 -68
  78. eventsourcing/examples/searchabletimestamps/__init__.py +0 -0
  79. eventsourcing/examples/searchabletimestamps/application.py +0 -32
  80. eventsourcing/examples/searchabletimestamps/persistence.py +0 -20
  81. eventsourcing/examples/searchabletimestamps/postgres.py +0 -110
  82. eventsourcing/examples/searchabletimestamps/sqlite.py +0 -99
  83. eventsourcing/examples/searchabletimestamps/test_searchabletimestamps.py +0 -94
  84. eventsourcing/examples/test_invoice.py +0 -176
  85. eventsourcing/examples/test_parking_lot.py +0 -206
  86. eventsourcing/tests/application_tests/__init__.py +0 -0
  87. eventsourcing/tests/application_tests/test_application_with_automatic_snapshotting.py +0 -55
  88. eventsourcing/tests/application_tests/test_application_with_popo.py +0 -22
  89. eventsourcing/tests/application_tests/test_application_with_postgres.py +0 -75
  90. eventsourcing/tests/application_tests/test_application_with_sqlite.py +0 -72
  91. eventsourcing/tests/application_tests/test_cache.py +0 -134
  92. eventsourcing/tests/application_tests/test_event_sourced_log.py +0 -162
  93. eventsourcing/tests/application_tests/test_notificationlog.py +0 -232
  94. eventsourcing/tests/application_tests/test_notificationlogreader.py +0 -126
  95. eventsourcing/tests/application_tests/test_processapplication.py +0 -110
  96. eventsourcing/tests/application_tests/test_processingpolicy.py +0 -109
  97. eventsourcing/tests/application_tests/test_repository.py +0 -504
  98. eventsourcing/tests/application_tests/test_snapshotting.py +0 -68
  99. eventsourcing/tests/application_tests/test_upcasting.py +0 -459
  100. eventsourcing/tests/docs_tests/__init__.py +0 -0
  101. eventsourcing/tests/docs_tests/test_docs.py +0 -293
  102. eventsourcing/tests/domain_tests/__init__.py +0 -0
  103. eventsourcing/tests/domain_tests/test_aggregate.py +0 -1200
  104. eventsourcing/tests/domain_tests/test_aggregate_decorators.py +0 -1604
  105. eventsourcing/tests/domain_tests/test_domainevent.py +0 -80
  106. eventsourcing/tests/interface_tests/__init__.py +0 -0
  107. eventsourcing/tests/interface_tests/test_remotenotificationlog.py +0 -258
  108. eventsourcing/tests/persistence_tests/__init__.py +0 -0
  109. eventsourcing/tests/persistence_tests/test_aes.py +0 -93
  110. eventsourcing/tests/persistence_tests/test_connection_pool.py +0 -722
  111. eventsourcing/tests/persistence_tests/test_eventstore.py +0 -72
  112. eventsourcing/tests/persistence_tests/test_infrastructure_factory.py +0 -21
  113. eventsourcing/tests/persistence_tests/test_mapper.py +0 -113
  114. eventsourcing/tests/persistence_tests/test_noninterleaving_notification_ids.py +0 -69
  115. eventsourcing/tests/persistence_tests/test_popo.py +0 -124
  116. eventsourcing/tests/persistence_tests/test_postgres.py +0 -1120
  117. eventsourcing/tests/persistence_tests/test_sqlite.py +0 -348
  118. eventsourcing/tests/persistence_tests/test_transcoder.py +0 -44
  119. eventsourcing/tests/system_tests/__init__.py +0 -0
  120. eventsourcing/tests/system_tests/test_runner.py +0 -935
  121. eventsourcing/tests/system_tests/test_system.py +0 -284
  122. eventsourcing/tests/utils_tests/__init__.py +0 -0
  123. eventsourcing/tests/utils_tests/test_utils.py +0 -226
  124. eventsourcing-9.3.3.dist-info/RECORD +0 -145
  125. {eventsourcing-9.3.3.dist-info → eventsourcing-9.3.4.dist-info}/AUTHORS +0 -0
  126. {eventsourcing-9.3.3.dist-info → eventsourcing-9.3.4.dist-info}/LICENSE +0 -0
  127. {eventsourcing-9.3.3.dist-info → eventsourcing-9.3.4.dist-info}/WHEEL +0 -0
@@ -1,1120 +0,0 @@
1
- from __future__ import annotations
2
-
3
- import sys
4
- from threading import Event, Thread
5
- from time import sleep
6
- from typing import List
7
- from unittest import TestCase, skipIf
8
- from uuid import uuid4
9
-
10
- import psycopg
11
- from psycopg import Connection
12
- from psycopg_pool import ConnectionPool
13
-
14
- from eventsourcing.persistence import (
15
- DatabaseError,
16
- DataError,
17
- InfrastructureFactory,
18
- IntegrityError,
19
- InterfaceError,
20
- InternalError,
21
- NotSupportedError,
22
- OperationalError,
23
- PersistenceError,
24
- ProgrammingError,
25
- StoredEvent,
26
- Tracking,
27
- )
28
- from eventsourcing.postgres import (
29
- Factory,
30
- PostgresAggregateRecorder,
31
- PostgresApplicationRecorder,
32
- PostgresDatastore,
33
- PostgresProcessRecorder,
34
- )
35
- from eventsourcing.tests.persistence import (
36
- AggregateRecorderTestCase,
37
- ApplicationRecorderTestCase,
38
- InfrastructureFactoryTestCase,
39
- ProcessRecorderTestCase,
40
- )
41
- from eventsourcing.tests.persistence_tests.test_connection_pool import (
42
- TestConnectionPool,
43
- )
44
- from eventsourcing.tests.postgres_utils import (
45
- drop_postgres_table,
46
- pg_close_all_connections,
47
- )
48
- from eventsourcing.utils import Environment, get_topic
49
-
50
-
51
- class TestPostgresDatastore(TestCase):
52
- def test_is_pipeline_supported(self):
53
- self.assertTrue(psycopg.Pipeline.is_supported())
54
-
55
- def test_has_connection_pool(self):
56
- with PostgresDatastore(
57
- dbname="eventsourcing",
58
- host="127.0.0.1",
59
- port="5432",
60
- user="eventsourcing",
61
- password="eventsourcing", # noqa: S106
62
- ) as datastore:
63
- self.assertIsInstance(datastore.pool, ConnectionPool)
64
-
65
- def test_get_connection(self):
66
- with PostgresDatastore(
67
- dbname="eventsourcing",
68
- host="127.0.0.1",
69
- port="5432",
70
- user="eventsourcing",
71
- password="eventsourcing", # noqa: S106
72
- ) as datastore:
73
- conn: Connection
74
- with datastore.get_connection() as conn:
75
- self.assertIsInstance(conn, Connection)
76
-
77
- def test_context_manager_converts_exceptions_and_conditionally_calls_close(self):
78
- cases = [
79
- (InterfaceError, psycopg.InterfaceError(), True),
80
- (DataError, psycopg.DataError(), False),
81
- (OperationalError, psycopg.OperationalError(), True),
82
- (IntegrityError, psycopg.IntegrityError(), False),
83
- (InternalError, psycopg.InternalError(), False),
84
- (ProgrammingError, psycopg.ProgrammingError(), False),
85
- (NotSupportedError, psycopg.NotSupportedError(), False),
86
- (DatabaseError, psycopg.DatabaseError(), False),
87
- (PersistenceError, psycopg.Error(), True),
88
- (TypeError, TypeError(), True),
89
- (TypeError, TypeError, True),
90
- ]
91
- with PostgresDatastore(
92
- dbname="eventsourcing",
93
- host="127.0.0.1",
94
- port="5432",
95
- user="eventsourcing",
96
- password="eventsourcing", # noqa: S106
97
- ) as datastore:
98
- for expected_exc_type, raised_exc, expect_conn_closed in cases:
99
- with self.assertRaises(expected_exc_type):
100
- conn: Connection
101
- with datastore.get_connection() as conn:
102
- self.assertFalse(conn.closed)
103
- raise raised_exc
104
- self.assertTrue(conn.closed is expect_conn_closed, raised_exc)
105
-
106
- def test_transaction_from_datastore(self):
107
- with PostgresDatastore(
108
- dbname="eventsourcing",
109
- host="127.0.0.1",
110
- port="5432",
111
- user="eventsourcing",
112
- password="eventsourcing", # noqa: S106
113
- ) as datastore, datastore.transaction(commit=False) as curs:
114
- # As a convenience, we can use the transaction() method.
115
- curs.execute("SELECT 1")
116
- self.assertEqual(curs.fetchall(), [{"?column?": 1}])
117
-
118
- def test_connect_failure_raises_operational_error(self):
119
- datastore = PostgresDatastore(
120
- dbname="eventsourcing",
121
- host="127.0.0.1",
122
- port="4321", # wrong port
123
- user="eventsourcing",
124
- password="eventsourcing", # noqa: S106
125
- pool_open_timeout=2,
126
- )
127
- with self.assertRaises(OperationalError), datastore.get_connection():
128
- pass
129
-
130
- with PostgresDatastore(
131
- dbname="eventsourcing",
132
- host="127.0.0.1",
133
- port="987654321", # bad value
134
- user="eventsourcing",
135
- password="eventsourcing", # noqa: S106
136
- pool_open_timeout=2,
137
- ) as datastore, self.assertRaises(OperationalError), datastore.get_connection():
138
- pass
139
-
140
- @skipIf(
141
- sys.version_info[:2] < (3, 8),
142
- "The 'check' argument and the check_connection() method aren't supported.",
143
- )
144
- def test_pre_ping(self):
145
- # Define method to open and close a connection, and then execute a statement.
146
- def open_close_execute(*, pre_ping: bool):
147
- with PostgresDatastore(
148
- dbname="eventsourcing",
149
- host="127.0.0.1",
150
- port="5432",
151
- user="eventsourcing",
152
- password="eventsourcing", # noqa: S106
153
- pool_size=1,
154
- pre_ping=pre_ping,
155
- ) as datastore:
156
-
157
- # Create a connection.
158
- conn: Connection
159
- with datastore.get_connection() as conn, conn.cursor() as curs:
160
- curs.execute("SELECT 1")
161
- self.assertEqual(curs.fetchall(), [{"?column?": 1}])
162
-
163
- # Close all connections via separate connection.
164
- pg_close_all_connections()
165
-
166
- # Check the connection doesn't think it's closed.
167
- self.assertTrue(datastore.pool._pool)
168
- self.assertFalse(datastore.pool._pool[0].closed)
169
-
170
- # Get a closed connection.
171
- conn: Connection
172
- with datastore.get_connection() as conn:
173
- self.assertFalse(conn.closed)
174
-
175
- with conn.cursor() as curs:
176
- curs.execute("SELECT 1")
177
-
178
- # Check using the closed connection gives an error.
179
- with self.assertRaises(OperationalError):
180
- open_close_execute(pre_ping=False)
181
-
182
- # Now try that again with pre-ping enabled.
183
- open_close_execute(pre_ping=True)
184
-
185
- def test_idle_in_transaction_session_timeout(self):
186
- with PostgresDatastore(
187
- dbname="eventsourcing",
188
- host="127.0.0.1",
189
- port="5432",
190
- user="eventsourcing",
191
- password="eventsourcing", # noqa: S106
192
- idle_in_transaction_session_timeout=1,
193
- ) as datastore:
194
-
195
- # Error on commit is raised.
196
- with self.assertRaises(
197
- OperationalError
198
- ), datastore.get_connection() as curs:
199
- curs.execute("BEGIN")
200
- curs.execute("SELECT 1")
201
- self.assertFalse(curs.closed)
202
- sleep(2)
203
-
204
- # Error on commit is raised.
205
- with self.assertRaises(OperationalError), datastore.transaction(
206
- commit=True
207
- ) as curs:
208
- # curs.execute("BEGIN")
209
- curs.execute("SELECT 1")
210
- self.assertFalse(curs.closed)
211
- sleep(2)
212
-
213
- # Force rollback. Error is ignored.
214
- with datastore.transaction(commit=False) as curs:
215
- # curs.execute("BEGIN")
216
- curs.execute("SELECT 1")
217
- self.assertFalse(curs.closed)
218
- sleep(2)
219
-
220
- # Autocommit mode - transaction is commited in time.
221
- with datastore.get_connection() as curs:
222
- curs.execute("SELECT 1")
223
- self.assertFalse(curs.closed)
224
- sleep(2)
225
-
226
- def test_get_password_func(self):
227
- # Check correct password is required, wrong password causes operational error.
228
- with PostgresDatastore(
229
- dbname="eventsourcing",
230
- host="127.0.0.1",
231
- port="5432",
232
- user="eventsourcing",
233
- password="wrong", # noqa: S106
234
- pool_size=1,
235
- connect_timeout=3,
236
- ) as datastore:
237
-
238
- conn: Connection
239
- with self.assertRaises(
240
- OperationalError
241
- ), datastore.get_connection() as conn, conn.cursor() as curs:
242
- curs.execute("SELECT 1")
243
-
244
- # Define a "get password" function, with a generator that returns
245
- # wrong password a few times first.
246
- def password_token_generator():
247
- yield "wrong"
248
- yield "wrong"
249
- yield "eventsourcing"
250
-
251
- password_generator = password_token_generator()
252
-
253
- def get_password_func():
254
- return next(password_generator)
255
-
256
- # Construct datastore with "get password" function.
257
- with PostgresDatastore(
258
- dbname="eventsourcing",
259
- host="127.0.0.1",
260
- port="5432",
261
- user="eventsourcing",
262
- password="",
263
- pool_size=1,
264
- get_password_func=get_password_func,
265
- connect_timeout=3,
266
- ) as datastore, datastore.get_connection() as conn, conn.cursor() as curs:
267
- # Create a connection, and check it works (this test depends on psycopg
268
- # retrying attempt to connect, should call "get password" twice).
269
- curs.execute("SELECT 1")
270
- self.assertEqual(curs.fetchall(), [{"?column?": 1}])
271
-
272
-
273
- # Use maximally long identifier for table name.
274
- EVENTS_TABLE_NAME = "s" * 50 + "stored_events"
275
-
276
- MAX_IDENTIFIER_LEN = 63
277
-
278
-
279
- def _check_identifier_is_max_len(identifier):
280
- if len(identifier) != MAX_IDENTIFIER_LEN:
281
- msg = "Expected length of name string to be max identifier length"
282
- raise ValueError(msg)
283
-
284
-
285
- _check_identifier_is_max_len(EVENTS_TABLE_NAME)
286
-
287
-
288
- class SetupPostgresDatastore(TestCase):
289
- schema = ""
290
-
291
- def setUp(self) -> None:
292
- super().setUp()
293
- self.datastore = PostgresDatastore(
294
- "eventsourcing",
295
- "127.0.0.1",
296
- "5432",
297
- "eventsourcing",
298
- "eventsourcing",
299
- schema=self.schema,
300
- )
301
- self.drop_tables()
302
-
303
- def tearDown(self) -> None:
304
- super().tearDown()
305
- self.drop_tables()
306
- self.datastore.close()
307
-
308
- def drop_tables(self):
309
- events_table_name = EVENTS_TABLE_NAME
310
- if self.datastore.schema:
311
- events_table_name = f"{self.datastore.schema}.{events_table_name}"
312
- drop_postgres_table(self.datastore, events_table_name)
313
-
314
-
315
- class WithSchema(SetupPostgresDatastore):
316
- schema = "myschema"
317
-
318
- def test_datastore_has_schema(self):
319
- self.assertEqual(self.datastore.schema, self.schema)
320
-
321
-
322
- class TestPostgresAggregateRecorder(SetupPostgresDatastore, AggregateRecorderTestCase):
323
- def create_recorder(
324
- self, table_name=EVENTS_TABLE_NAME
325
- ) -> PostgresAggregateRecorder:
326
- if self.datastore.schema:
327
- table_name = f"{self.datastore.schema}.{table_name}"
328
- recorder = PostgresAggregateRecorder(
329
- datastore=self.datastore, events_table_name=table_name
330
- )
331
- recorder.create_table()
332
- return recorder
333
-
334
- def drop_tables(self):
335
- super().drop_tables()
336
- drop_postgres_table(self.datastore, "stored_events")
337
-
338
- def test_create_table(self):
339
- recorder = PostgresAggregateRecorder(
340
- datastore=self.datastore, events_table_name="stored_events"
341
- )
342
- recorder.create_table()
343
-
344
- def test_insert_and_select(self):
345
- super().test_insert_and_select()
346
-
347
- def test_performance(self):
348
- super().test_performance()
349
-
350
- def test_retry_insert_events_after_closing_connection(self):
351
- # This checks connection is recreated after connections are closed.
352
- self.datastore.pool.pool_size = 1
353
-
354
- # Construct the recorder.
355
- recorder = self.create_recorder()
356
-
357
- # Check we have open connections.
358
- self.assertTrue(self.datastore.pool._pool)
359
-
360
- # Close connections.
361
- pg_close_all_connections()
362
- self.assertFalse(self.datastore.pool._pool[0].closed)
363
-
364
- # Write a stored event.
365
- stored_event1 = StoredEvent(
366
- originator_id=uuid4(),
367
- originator_version=0,
368
- topic="topic1",
369
- state=b"state1",
370
- )
371
- recorder.insert_events([stored_event1])
372
-
373
-
374
- class TestPostgresAggregateRecorderWithSchema(
375
- WithSchema, TestPostgresAggregateRecorder
376
- ):
377
- pass
378
-
379
-
380
- class TestPostgresAggregateRecorderErrors(SetupPostgresDatastore, TestCase):
381
- def create_recorder(self, table_name=EVENTS_TABLE_NAME):
382
- return PostgresAggregateRecorder(
383
- datastore=self.datastore, events_table_name=table_name
384
- )
385
-
386
- def test_excessively_long_table_name_raises_error(self):
387
- # Add one more character to the table name.
388
- long_table_name = "s" + EVENTS_TABLE_NAME
389
- self.assertEqual(len(long_table_name), 64)
390
- with self.assertRaises(ProgrammingError):
391
- self.create_recorder(long_table_name)
392
-
393
- def test_create_table_raises_programming_error_when_sql_is_broken(self):
394
- recorder = self.create_recorder()
395
-
396
- # Mess up the statement.
397
- recorder.create_table_statements = ["BLAH"]
398
- with self.assertRaises(ProgrammingError):
399
- recorder.create_table()
400
-
401
- def test_insert_events_raises_programming_error_when_table_not_created(self):
402
- # Construct the recorder.
403
- recorder = self.create_recorder()
404
-
405
- # Write a stored event without creating the table.
406
- stored_event1 = StoredEvent(
407
- originator_id=uuid4(),
408
- originator_version=0,
409
- topic="topic1",
410
- state=b"state1",
411
- )
412
- with self.assertRaises(ProgrammingError):
413
- recorder.insert_events([stored_event1])
414
-
415
- def test_insert_events_raises_programming_error_when_sql_is_broken(self):
416
- # Construct the recorder.
417
- recorder = self.create_recorder()
418
-
419
- # Create the table.
420
- recorder.create_table()
421
-
422
- # Write a stored event with broken statement.
423
- recorder.insert_events_statement = "BLAH"
424
- stored_event1 = StoredEvent(
425
- originator_id=uuid4(),
426
- originator_version=0,
427
- topic="topic1",
428
- state=b"state1",
429
- )
430
- with self.assertRaises(ProgrammingError):
431
- recorder.insert_events([stored_event1])
432
-
433
- def test_select_events_raises_programming_error_when_table_not_created(self):
434
- # Construct the recorder.
435
- recorder = self.create_recorder()
436
-
437
- # Select events without creating the table.
438
- originator_id = uuid4()
439
- with self.assertRaises(ProgrammingError):
440
- recorder.select_events(originator_id=originator_id)
441
-
442
- def test_select_events_raises_programming_error_when_sql_is_broken(self):
443
- # Construct the recorder.
444
- recorder = self.create_recorder()
445
-
446
- # Create the table.
447
- recorder.create_table()
448
-
449
- # Select events with broken statement.
450
- recorder.select_events_statement = "BLAH"
451
- originator_id = uuid4()
452
- with self.assertRaises(ProgrammingError):
453
- recorder.select_events(originator_id=originator_id)
454
-
455
-
456
- class TestPostgresApplicationRecorder(
457
- SetupPostgresDatastore, ApplicationRecorderTestCase
458
- ):
459
- def create_recorder(
460
- self, table_name=EVENTS_TABLE_NAME
461
- ) -> PostgresApplicationRecorder:
462
- if self.datastore.schema:
463
- table_name = f"{self.datastore.schema}.{table_name}"
464
- recorder = PostgresApplicationRecorder(
465
- self.datastore, events_table_name=table_name
466
- )
467
- recorder.create_table()
468
- return recorder
469
-
470
- def test_insert_select(self) -> None:
471
- super().test_insert_select()
472
-
473
- def test_concurrent_no_conflicts(self):
474
- super().test_concurrent_no_conflicts()
475
-
476
- def test_concurrent_throughput(self):
477
- self.datastore.pool.pool_size = 4
478
- super().test_concurrent_throughput()
479
-
480
- def test_retry_select_notifications_after_closing_connection(self):
481
- # This checks connection is recreated after InterfaceError.
482
-
483
- # Construct the recorder.
484
- recorder = self.create_recorder()
485
- self.datastore.pool.pool_size = 1
486
-
487
- # Write a stored event.
488
- originator_id = uuid4()
489
- stored_event1 = StoredEvent(
490
- originator_id=originator_id,
491
- originator_version=0,
492
- topic="topic1",
493
- state=b"state1",
494
- )
495
- recorder.insert_events([stored_event1])
496
-
497
- # Close connections.
498
- pg_close_all_connections()
499
- self.assertFalse(self.datastore.pool._pool[0].closed)
500
-
501
- # Select events.
502
- recorder.select_notifications(start=1, limit=1)
503
-
504
- def test_retry_max_notification_id_after_closing_connection(self):
505
- # This checks connection is recreated after InterfaceError.
506
-
507
- # Construct the recorder.
508
- recorder = self.create_recorder()
509
- self.datastore.pool.pool_size = 1
510
-
511
- # Write a stored event.
512
- originator_id = uuid4()
513
- stored_event1 = StoredEvent(
514
- originator_id=originator_id,
515
- originator_version=0,
516
- topic="topic1",
517
- state=b"state1",
518
- )
519
- recorder.insert_events([stored_event1])
520
-
521
- # Close connections.
522
- pg_close_all_connections()
523
- self.assertFalse(self.datastore.pool._pool[0].closed)
524
-
525
- # Get max notification ID.
526
- recorder.max_notification_id()
527
-
528
- def test_insert_lock_timeout_actually_works(self):
529
- self.datastore.lock_timeout = 1
530
- recorder: PostgresApplicationRecorder = self.create_recorder()
531
-
532
- stored_event1 = StoredEvent(
533
- originator_id=uuid4(),
534
- originator_version=1,
535
- topic="topic1",
536
- state=b"state1",
537
- )
538
- stored_event2 = StoredEvent(
539
- originator_id=uuid4(),
540
- originator_version=1,
541
- topic="topic1",
542
- state=b"state1",
543
- )
544
-
545
- table_lock_acquired = Event()
546
- test_ended = Event()
547
- table_lock_timed_out = Event()
548
-
549
- def insert1():
550
- conn = self.datastore.get_connection()
551
- with conn as conn, conn.transaction(), conn.cursor() as curs:
552
- # Lock table.
553
- recorder._insert_stored_events(curs, [stored_event1])
554
- table_lock_acquired.set()
555
- # Wait for other thread to timeout.
556
- test_ended.wait(timeout=5) # keep the lock
557
-
558
- def insert2():
559
- try:
560
- conn: Connection
561
- with self.datastore.get_connection() as conn:
562
- # Wait for other thread to lock table.
563
- table_lock_acquired.wait(timeout=5)
564
- # Expect to timeout.
565
- with conn.transaction(), conn.cursor() as curs:
566
- recorder._insert_stored_events(curs, [stored_event2])
567
- except OperationalError as e:
568
- if "lock timeout" in e.args[0]:
569
- table_lock_timed_out.set()
570
-
571
- thread1 = Thread(target=insert1, daemon=True)
572
- thread1.start()
573
- thread2 = Thread(target=insert2, daemon=True)
574
- thread2.start()
575
-
576
- table_lock_timed_out.wait(timeout=4)
577
- test_ended.set()
578
-
579
- thread1.join(timeout=10)
580
- thread2.join(timeout=10)
581
-
582
- self.assertTrue(table_lock_timed_out.is_set())
583
-
584
-
585
- class TestPostgresApplicationRecorderWithSchema(
586
- WithSchema, TestPostgresApplicationRecorder
587
- ):
588
- pass
589
-
590
-
591
- class TestPostgresApplicationRecorderErrors(SetupPostgresDatastore, TestCase):
592
- def create_recorder(self, table_name=EVENTS_TABLE_NAME):
593
- return PostgresApplicationRecorder(self.datastore, events_table_name=table_name)
594
-
595
- def test_excessively_long_table_name_raises_error(self):
596
- # Add one more character to the table name.
597
- long_table_name = "s" + EVENTS_TABLE_NAME
598
- self.assertEqual(len(long_table_name), 64)
599
- with self.assertRaises(ProgrammingError):
600
- self.create_recorder(long_table_name)
601
-
602
- def test_select_notification_raises_programming_error_when_table_not_created(self):
603
- # Construct the recorder.
604
- recorder = self.create_recorder()
605
-
606
- # Select notifications without creating table.
607
- with self.assertRaises(ProgrammingError):
608
- recorder.select_notifications(start=1, limit=1)
609
-
610
- def test_max_notification_id_raises_programming_error_when_table_not_created(self):
611
- # Construct the recorder.
612
- recorder = PostgresApplicationRecorder(
613
- datastore=self.datastore, events_table_name=EVENTS_TABLE_NAME
614
- )
615
-
616
- # Select notifications without creating table.
617
- with self.assertRaises(ProgrammingError):
618
- recorder.max_notification_id()
619
-
620
- def test_fetch_ids_after_insert_events(self):
621
- def make_events() -> List[StoredEvent]:
622
- return [
623
- StoredEvent(
624
- originator_id=uuid4(),
625
- originator_version=1,
626
- state=b"",
627
- topic="",
628
- )
629
- ]
630
-
631
- #
632
- # Check it actually works.
633
- recorder = PostgresApplicationRecorder(
634
- datastore=self.datastore, events_table_name=EVENTS_TABLE_NAME
635
- )
636
- recorder.create_table()
637
- max_notification_id = recorder.max_notification_id()
638
- notification_ids = recorder.insert_events(make_events())
639
- self.assertEqual(len(notification_ids), 1)
640
- self.assertEqual(max_notification_id + 1, notification_ids[0])
641
-
642
- # Events but no lock table statements.
643
- with self.assertRaises(ProgrammingError):
644
- recorder = PostgresApplicationRecorder(
645
- datastore=self.datastore, events_table_name=EVENTS_TABLE_NAME
646
- )
647
- recorder.create_table()
648
- recorder.lock_table_statements = []
649
- recorder.insert_events(make_events())
650
-
651
-
652
- TRACKING_TABLE_NAME = "n" * 42 + "notification_tracking"
653
- _check_identifier_is_max_len(TRACKING_TABLE_NAME)
654
-
655
-
656
- class TestPostgresProcessRecorder(SetupPostgresDatastore, ProcessRecorderTestCase):
657
- def drop_tables(self):
658
- super().drop_tables()
659
- tracking_table_name = TRACKING_TABLE_NAME
660
- if self.datastore.schema:
661
- tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
662
- drop_postgres_table(self.datastore, tracking_table_name)
663
-
664
- def create_recorder(self):
665
- events_table_name = EVENTS_TABLE_NAME
666
- tracking_table_name = TRACKING_TABLE_NAME
667
- if self.datastore.schema:
668
- events_table_name = f"{self.datastore.schema}.{events_table_name}"
669
- if self.datastore.schema:
670
- tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
671
- recorder = PostgresProcessRecorder(
672
- datastore=self.datastore,
673
- events_table_name=events_table_name,
674
- tracking_table_name=tracking_table_name,
675
- )
676
- recorder.create_table()
677
- return recorder
678
-
679
- def test_insert_select(self):
680
- super().test_insert_select()
681
-
682
- def test_performance(self):
683
- super().test_performance()
684
-
685
- def test_excessively_long_table_names_raise_error(self):
686
- with self.assertRaises(ProgrammingError):
687
- PostgresProcessRecorder(
688
- datastore=self.datastore,
689
- events_table_name="e" + EVENTS_TABLE_NAME,
690
- tracking_table_name=TRACKING_TABLE_NAME,
691
- )
692
-
693
- with self.assertRaises(ProgrammingError):
694
- PostgresProcessRecorder(
695
- datastore=self.datastore,
696
- events_table_name=EVENTS_TABLE_NAME,
697
- tracking_table_name="n" + TRACKING_TABLE_NAME,
698
- )
699
-
700
- def test_retry_max_tracking_id_after_closing_connection(self):
701
- # This checks connection is recreated after InterfaceError.
702
-
703
- # Construct the recorder.
704
- recorder = self.create_recorder()
705
- self.datastore.pool.pool_size = 1
706
-
707
- # Write a tracking record.
708
- originator_id = uuid4()
709
- stored_event1 = StoredEvent(
710
- originator_id=originator_id,
711
- originator_version=0,
712
- topic="topic1",
713
- state=b"state1",
714
- )
715
- recorder.insert_events([stored_event1], tracking=Tracking("upstream", 1))
716
-
717
- # Close connections.
718
- pg_close_all_connections()
719
- self.assertFalse(self.datastore.pool._pool[0].closed)
720
-
721
- # Get max tracking ID.
722
- notification_id = recorder.max_tracking_id("upstream")
723
- self.assertEqual(notification_id, 1)
724
-
725
-
726
- class TestPostgresProcessRecorderWithSchema(WithSchema, TestPostgresProcessRecorder):
727
- pass
728
-
729
-
730
- class TestPostgresProcessRecorderErrors(SetupPostgresDatastore, TestCase):
731
- def drop_tables(self):
732
- super().drop_tables()
733
- drop_postgres_table(self.datastore, TRACKING_TABLE_NAME)
734
-
735
- def create_recorder(self):
736
- return PostgresProcessRecorder(
737
- datastore=self.datastore,
738
- events_table_name=EVENTS_TABLE_NAME,
739
- tracking_table_name=TRACKING_TABLE_NAME,
740
- )
741
-
742
- def test_max_tracking_id_raises_programming_error_when_table_not_created(self):
743
- # Construct the recorder.
744
- recorder = self.create_recorder()
745
-
746
- # Get max tracking ID without creating table.
747
- with self.assertRaises(ProgrammingError):
748
- recorder.max_tracking_id("upstream")
749
-
750
-
751
- class TestPostgresInfrastructureFactory(InfrastructureFactoryTestCase):
752
- def test_create_application_recorder(self):
753
- super().test_create_application_recorder()
754
-
755
- def expected_factory_class(self):
756
- return Factory
757
-
758
- def expected_aggregate_recorder_class(self):
759
- return PostgresAggregateRecorder
760
-
761
- def expected_application_recorder_class(self):
762
- return PostgresApplicationRecorder
763
-
764
- def expected_process_recorder_class(self):
765
- return PostgresProcessRecorder
766
-
767
- def setUp(self) -> None:
768
- self.env = Environment("TestCase")
769
- self.env[InfrastructureFactory.PERSISTENCE_MODULE] = Factory.__module__
770
- self.env[Factory.POSTGRES_DBNAME] = "eventsourcing"
771
- self.env[Factory.POSTGRES_HOST] = "127.0.0.1"
772
- self.env[Factory.POSTGRES_PORT] = "5432"
773
- self.env[Factory.POSTGRES_USER] = "eventsourcing"
774
- self.env[Factory.POSTGRES_PASSWORD] = "eventsourcing"
775
- self.drop_tables()
776
- super().setUp()
777
-
778
- def tearDown(self) -> None:
779
- self.drop_tables()
780
- super().tearDown()
781
-
782
- def drop_tables(self):
783
- with PostgresDatastore(
784
- "eventsourcing",
785
- "127.0.0.1",
786
- "5432",
787
- "eventsourcing",
788
- "eventsourcing",
789
- ) as datastore:
790
- drop_postgres_table(datastore, "testcase_events")
791
- drop_postgres_table(datastore, "testcase_tracking")
792
-
793
- def test_close(self):
794
- factory = Factory(self.env)
795
- conn: Connection
796
- with factory.datastore.get_connection() as conn:
797
- conn.execute("SELECT 1")
798
- self.assertFalse(factory.datastore.pool.closed)
799
- factory.close()
800
- self.assertTrue(factory.datastore.pool.closed)
801
-
802
- def test_conn_max_age_is_set_to_float(self):
803
- self.env[Factory.POSTGRES_CONN_MAX_AGE] = ""
804
- self.factory = Factory(self.env)
805
- self.assertEqual(self.factory.datastore.pool.max_lifetime, 60 * 60.0)
806
-
807
- def test_conn_max_age_is_set_to_number(self):
808
- self.env[Factory.POSTGRES_CONN_MAX_AGE] = "0"
809
- self.factory = Factory(self.env)
810
- self.assertEqual(self.factory.datastore.pool.max_lifetime, 0)
811
-
812
- def test_pool_size_is_five_by_default(self):
813
- self.assertTrue(Factory.POSTGRES_POOL_SIZE not in self.env)
814
- self.factory = Factory(self.env)
815
- self.assertEqual(self.factory.datastore.pool.min_size, 5)
816
-
817
- self.env[Factory.POSTGRES_POOL_SIZE] = ""
818
- self.factory = Factory(self.env)
819
- self.assertEqual(self.factory.datastore.pool.min_size, 5)
820
-
821
- def test_max_overflow_is_ten_by_default(self):
822
- self.assertTrue(Factory.POSTGRES_MAX_OVERFLOW not in self.env)
823
- self.factory = Factory(self.env)
824
- self.assertEqual(self.factory.datastore.pool.max_size, 15)
825
-
826
- self.env[Factory.POSTGRES_MAX_OVERFLOW] = ""
827
- self.factory = Factory(self.env)
828
- self.assertEqual(self.factory.datastore.pool.max_size, 15)
829
-
830
- def test_max_overflow_is_set(self):
831
- self.env[Factory.POSTGRES_MAX_OVERFLOW] = "7"
832
- self.factory = Factory(self.env)
833
- self.assertEqual(self.factory.datastore.pool.max_size, 12)
834
-
835
- def test_pool_size_is_set(self):
836
- self.env[Factory.POSTGRES_POOL_SIZE] = "6"
837
- self.factory = Factory(self.env)
838
- self.assertEqual(self.factory.datastore.pool.min_size, 6)
839
-
840
- def test_connect_timeout_is_thirty_by_default(self):
841
- self.assertTrue(Factory.POSTGRES_CONNECT_TIMEOUT not in self.env)
842
- self.factory = Factory(self.env)
843
- self.assertEqual(self.factory.datastore.pool.timeout, 30)
844
-
845
- self.env[Factory.POSTGRES_CONNECT_TIMEOUT] = ""
846
- self.factory = Factory(self.env)
847
- self.assertEqual(self.factory.datastore.pool.timeout, 30)
848
-
849
- def test_connect_timeout_is_set(self):
850
- self.env[Factory.POSTGRES_CONNECT_TIMEOUT] = "8"
851
- self.factory = Factory(self.env)
852
- self.assertEqual(self.factory.datastore.pool.timeout, 8)
853
-
854
- def test_max_waiting_is_0_by_default(self):
855
- self.assertTrue(Factory.POSTGRES_MAX_WAITING not in self.env)
856
- self.factory = Factory(self.env)
857
- self.assertEqual(self.factory.datastore.pool.max_waiting, 0)
858
-
859
- self.env[Factory.POSTGRES_MAX_WAITING] = ""
860
- self.factory = Factory(self.env)
861
- self.assertEqual(self.factory.datastore.pool.max_waiting, 0)
862
-
863
- def test_max_waiting_is_set(self):
864
- self.env[Factory.POSTGRES_MAX_WAITING] = "8"
865
- self.factory = Factory(self.env)
866
- self.assertEqual(self.factory.datastore.pool.max_waiting, 8)
867
-
868
- def test_lock_timeout_is_zero_by_default(self):
869
- self.assertTrue(Factory.POSTGRES_LOCK_TIMEOUT not in self.env)
870
- self.factory = Factory(self.env)
871
- self.assertEqual(self.factory.datastore.lock_timeout, 0)
872
-
873
- self.env[Factory.POSTGRES_LOCK_TIMEOUT] = ""
874
- self.factory = Factory(self.env)
875
- self.assertEqual(self.factory.datastore.lock_timeout, 0)
876
-
877
- def test_lock_timeout_is_set(self):
878
- self.env[Factory.POSTGRES_LOCK_TIMEOUT] = "1"
879
- self.factory = Factory(self.env)
880
- self.assertEqual(self.factory.datastore.lock_timeout, 1)
881
-
882
- def test_idle_in_transaction_session_timeout_is_5_by_default(self):
883
- self.assertTrue(
884
- Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT not in self.env
885
- )
886
- self.factory = Factory(self.env)
887
- self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 5)
888
- self.factory.close()
889
-
890
- self.env[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = ""
891
- self.factory = Factory(self.env)
892
- self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 5)
893
-
894
- def test_idle_in_transaction_session_timeout_is_set(self):
895
- self.env[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = "10"
896
- self.factory = Factory(self.env)
897
- self.assertEqual(self.factory.datastore.idle_in_transaction_session_timeout, 10)
898
-
899
- def test_pre_ping_off_by_default(self):
900
- self.factory = Factory(self.env)
901
- self.assertEqual(self.factory.datastore.pre_ping, False)
902
-
903
- def test_pre_ping_off(self):
904
- self.env[Factory.POSTGRES_PRE_PING] = "off"
905
- self.factory = Factory(self.env)
906
- self.assertEqual(self.factory.datastore.pre_ping, False)
907
-
908
- def test_pre_ping_on(self):
909
- self.env[Factory.POSTGRES_PRE_PING] = "on"
910
- self.factory = Factory(self.env)
911
- self.assertEqual(self.factory.datastore.pre_ping, True)
912
-
913
- def test_get_password_topic_not_set(self):
914
- self.factory = Factory(self.env)
915
- self.assertIsNone(self.factory.datastore.pool.get_password_func, None)
916
-
917
- def test_get_password_topic_set(self):
918
- def get_password_func():
919
- return "eventsourcing"
920
-
921
- self.env[Factory.POSTGRES_GET_PASSWORD_TOPIC] = get_topic(get_password_func)
922
- self.factory = Factory(self.env)
923
- self.assertEqual(
924
- self.factory.datastore.pool.get_password_func, get_password_func
925
- )
926
-
927
- def test_environment_error_raised_when_conn_max_age_not_a_float(self):
928
- self.env[Factory.POSTGRES_CONN_MAX_AGE] = "abc"
929
- with self.assertRaises(EnvironmentError) as cm:
930
- Factory(self.env)
931
- self.assertEqual(
932
- cm.exception.args[0],
933
- "Postgres environment value for key 'POSTGRES_CONN_MAX_AGE' "
934
- "is invalid. If set, a float or empty string is expected: 'abc'",
935
- )
936
-
937
- def test_environment_error_raised_when_connect_timeout_not_an_integer(self):
938
- self.env[Factory.POSTGRES_CONNECT_TIMEOUT] = "abc"
939
- with self.assertRaises(EnvironmentError) as cm:
940
- Factory(self.env)
941
- self.assertEqual(
942
- cm.exception.args[0],
943
- "Postgres environment value for key 'POSTGRES_CONNECT_TIMEOUT' "
944
- "is invalid. If set, an integer or empty string is expected: 'abc'",
945
- )
946
-
947
- def test_environment_error_raised_when_max_waiting_not_an_integer(self):
948
- self.env[Factory.POSTGRES_MAX_WAITING] = "abc"
949
- with self.assertRaises(EnvironmentError) as cm:
950
- Factory(self.env)
951
- self.assertEqual(
952
- cm.exception.args[0],
953
- "Postgres environment value for key 'POSTGRES_MAX_WAITING' "
954
- "is invalid. If set, an integer or empty string is expected: 'abc'",
955
- )
956
-
957
- def test_environment_error_raised_when_lock_timeout_not_an_integer(self):
958
- self.env[Factory.POSTGRES_LOCK_TIMEOUT] = "abc"
959
- with self.assertRaises(EnvironmentError) as cm:
960
- Factory(self.env)
961
- self.assertEqual(
962
- cm.exception.args[0],
963
- "Postgres environment value for key 'POSTGRES_LOCK_TIMEOUT' "
964
- "is invalid. If set, an integer or empty string is expected: 'abc'",
965
- )
966
-
967
- def test_environment_error_raised_when_min_conn_not_an_integer(self):
968
- self.env[Factory.POSTGRES_POOL_SIZE] = "abc"
969
- with self.assertRaises(EnvironmentError) as cm:
970
- Factory(self.env)
971
- self.assertEqual(
972
- cm.exception.args[0],
973
- "Postgres environment value for key 'POSTGRES_POOL_SIZE' "
974
- "is invalid. If set, an integer or empty string is expected: 'abc'",
975
- )
976
-
977
- def test_environment_error_raised_when_max_conn_not_an_integer(self):
978
- self.env[Factory.POSTGRES_MAX_OVERFLOW] = "abc"
979
- with self.assertRaises(EnvironmentError) as cm:
980
- Factory(self.env)
981
- self.assertEqual(
982
- cm.exception.args[0],
983
- "Postgres environment value for key 'POSTGRES_MAX_OVERFLOW' "
984
- "is invalid. If set, an integer or empty string is expected: 'abc'",
985
- )
986
-
987
- def test_environment_error_raised_when_idle_in_transaction_session_timeout_not_int(
988
- self,
989
- ):
990
- self.env[Factory.POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT] = "abc"
991
- with self.assertRaises(EnvironmentError) as cm:
992
- Factory(self.env)
993
- self.assertEqual(
994
- cm.exception.args[0],
995
- "Postgres environment value for key "
996
- "'POSTGRES_IDLE_IN_TRANSACTION_SESSION_TIMEOUT' "
997
- "is invalid. If set, an integer or empty string is expected: 'abc'",
998
- )
999
-
1000
- def test_environment_error_raised_when_dbname_missing(self):
1001
- del self.env[Factory.POSTGRES_DBNAME]
1002
- with self.assertRaises(EnvironmentError) as cm:
1003
- InfrastructureFactory.construct(self.env)
1004
- self.assertEqual(
1005
- cm.exception.args[0],
1006
- "Postgres database name not found in environment "
1007
- "with key 'POSTGRES_DBNAME'",
1008
- )
1009
-
1010
- def test_environment_error_raised_when_dbhost_missing(self):
1011
- del self.env[Factory.POSTGRES_HOST]
1012
- with self.assertRaises(EnvironmentError) as cm:
1013
- InfrastructureFactory.construct(self.env)
1014
- self.assertEqual(
1015
- cm.exception.args[0],
1016
- "Postgres host not found in environment with key 'POSTGRES_HOST'",
1017
- )
1018
-
1019
- def test_environment_error_raised_when_user_missing(self):
1020
- del self.env[Factory.POSTGRES_USER]
1021
- with self.assertRaises(EnvironmentError) as cm:
1022
- InfrastructureFactory.construct(self.env)
1023
- self.assertEqual(
1024
- cm.exception.args[0],
1025
- "Postgres user not found in environment with key 'POSTGRES_USER'",
1026
- )
1027
-
1028
- def test_environment_error_raised_when_password_missing(self):
1029
- del self.env[Factory.POSTGRES_PASSWORD]
1030
- with self.assertRaises(EnvironmentError) as cm:
1031
- InfrastructureFactory.construct(self.env)
1032
- self.assertEqual(
1033
- cm.exception.args[0],
1034
- "Postgres password not found in environment with key 'POSTGRES_PASSWORD'",
1035
- )
1036
-
1037
- def test_schema_set_to_empty_string(self):
1038
- self.env[Factory.POSTGRES_SCHEMA] = ""
1039
- self.factory = Factory(self.env)
1040
- self.assertEqual(self.factory.datastore.schema, "")
1041
-
1042
- def test_schema_set_to_whitespace(self):
1043
- self.env[Factory.POSTGRES_SCHEMA] = " "
1044
- self.factory = Factory(self.env)
1045
- self.assertEqual(self.factory.datastore.schema, "")
1046
-
1047
- def test_scheme_adjusts_table_names_on_aggregate_recorder(self):
1048
- self.factory = Factory(self.env)
1049
-
1050
- # Check by default the table name is not qualified.
1051
- recorder = self.factory.aggregate_recorder("events")
1052
- assert isinstance(recorder, PostgresAggregateRecorder)
1053
- self.assertEqual(recorder.events_table_name, "testcase_events")
1054
-
1055
- # Check by default the table name is not qualified.
1056
- recorder = self.factory.aggregate_recorder("snapshots")
1057
- assert isinstance(recorder, PostgresAggregateRecorder)
1058
- self.assertEqual(recorder.events_table_name, "testcase_snapshots")
1059
-
1060
- # Set schema in environment.
1061
- self.env[Factory.POSTGRES_SCHEMA] = "public"
1062
- self.factory = Factory(self.env)
1063
- self.assertEqual(self.factory.datastore.schema, "public")
1064
-
1065
- # Check by default the table name is qualified.
1066
- recorder = self.factory.aggregate_recorder("events")
1067
- assert isinstance(recorder, PostgresAggregateRecorder)
1068
- self.assertEqual(recorder.events_table_name, "public.testcase_events")
1069
-
1070
- # Check by default the table name is qualified.
1071
- recorder = self.factory.aggregate_recorder("snapshots")
1072
- assert isinstance(recorder, PostgresAggregateRecorder)
1073
- self.assertEqual(recorder.events_table_name, "public.testcase_snapshots")
1074
-
1075
- def test_scheme_adjusts_table_name_on_application_recorder(self):
1076
- self.factory = Factory(self.env)
1077
-
1078
- # Check by default the table name is not qualified.
1079
- recorder = self.factory.application_recorder()
1080
- assert isinstance(recorder, PostgresApplicationRecorder)
1081
- self.assertEqual(recorder.events_table_name, "testcase_events")
1082
-
1083
- # Set schema in environment.
1084
- self.env[Factory.POSTGRES_SCHEMA] = "public"
1085
- self.factory = Factory(self.env)
1086
- self.assertEqual(self.factory.datastore.schema, "public")
1087
-
1088
- # Check by default the table name is qualified.
1089
- recorder = self.factory.application_recorder()
1090
- assert isinstance(recorder, PostgresApplicationRecorder)
1091
- self.assertEqual(recorder.events_table_name, "public.testcase_events")
1092
-
1093
- def test_scheme_adjusts_table_names_on_process_recorder(self):
1094
- self.factory = Factory(self.env)
1095
-
1096
- # Check by default the table name is not qualified.
1097
- recorder = self.factory.process_recorder()
1098
- assert isinstance(recorder, PostgresProcessRecorder)
1099
- self.assertEqual(recorder.events_table_name, "testcase_events")
1100
- self.assertEqual(recorder.tracking_table_name, "testcase_tracking")
1101
-
1102
- # Set schema in environment.
1103
- self.env[Factory.POSTGRES_SCHEMA] = "public"
1104
- self.factory = Factory(self.env)
1105
- self.assertEqual(self.factory.datastore.schema, "public")
1106
-
1107
- # Check by default the table name is qualified.
1108
- recorder = self.factory.process_recorder()
1109
- assert isinstance(recorder, PostgresProcessRecorder)
1110
- self.assertEqual(recorder.events_table_name, "public.testcase_events")
1111
- self.assertEqual(recorder.tracking_table_name, "public.testcase_tracking")
1112
-
1113
-
1114
- del AggregateRecorderTestCase
1115
- del ApplicationRecorderTestCase
1116
- del ProcessRecorderTestCase
1117
- del InfrastructureFactoryTestCase
1118
- del SetupPostgresDatastore
1119
- del WithSchema
1120
- del TestConnectionPool