eventsourcing 9.4.5__py3-none-any.whl → 9.5.0a0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

eventsourcing/postgres.py CHANGED
@@ -5,15 +5,17 @@ import logging
5
5
  from asyncio import CancelledError
6
6
  from contextlib import contextmanager
7
7
  from threading import Thread
8
- from typing import TYPE_CHECKING, Any, Callable, cast
8
+ from typing import TYPE_CHECKING, Any, Callable, Literal, NamedTuple, cast
9
9
 
10
10
  import psycopg
11
11
  import psycopg.errors
12
12
  import psycopg_pool
13
13
  from psycopg import Connection, Cursor, Error
14
+ from psycopg.errors import DuplicateObject
14
15
  from psycopg.generators import notifies
15
16
  from psycopg.rows import DictRow, dict_row
16
17
  from psycopg.sql import SQL, Composed, Identifier
18
+ from psycopg.types.composite import CompositeInfo, register_composite
17
19
  from typing_extensions import TypeVar
18
20
 
19
21
  from eventsourcing.persistence import (
@@ -46,6 +48,7 @@ if TYPE_CHECKING:
46
48
  from psycopg.abc import Query
47
49
  from typing_extensions import Self
48
50
 
51
+
49
52
  logging.getLogger("psycopg.pool").setLevel(logging.ERROR)
50
53
  logging.getLogger("psycopg").setLevel(logging.ERROR)
51
54
 
@@ -58,6 +61,13 @@ logging.getLogger("psycopg").setLevel(logging.ERROR)
58
61
  NO_TRACEBACK = (Error, KeyboardInterrupt, CancelledError)
59
62
 
60
63
 
64
+ class PgStoredEvent(NamedTuple):
65
+ originator_id: UUID | str
66
+ originator_version: int
67
+ topic: str
68
+ state: bytes
69
+
70
+
61
71
  class ConnectionPool(psycopg_pool.ConnectionPool[Any]):
62
72
  def __init__(
63
73
  self,
@@ -95,13 +105,29 @@ class PostgresDatastore:
95
105
  pool_open_timeout: float | None = None,
96
106
  get_password_func: Callable[[], str] | None = None,
97
107
  single_row_tracking: bool = True,
108
+ originator_id_type: Literal["uuid", "text"] = "uuid",
109
+ enable_db_functions: bool = False,
98
110
  ):
99
111
  self.idle_in_transaction_session_timeout = idle_in_transaction_session_timeout
100
112
  self.pre_ping = pre_ping
101
113
  self.pool_open_timeout = pool_open_timeout
102
114
  self.single_row_tracking = single_row_tracking
115
+ self.lock_timeout = lock_timeout
116
+ self.schema = schema.strip() or "public"
117
+ if originator_id_type.lower() not in ("uuid", "text"):
118
+ msg = (
119
+ f"Invalid originator_id_type '{originator_id_type}', "
120
+ f"must be 'uuid' or 'text'"
121
+ )
122
+ raise ValueError(msg)
123
+ self.originator_id_type = originator_id_type.lower()
124
+
125
+ self.enable_db_functions = enable_db_functions
103
126
 
104
127
  check = ConnectionPool.check_connection if pre_ping else None
128
+ self.db_type_names = set[str]()
129
+ self.psycopg_type_adapters: dict[str, CompositeInfo] = {}
130
+ self.psycopg_python_types: dict[str, Any] = {}
105
131
  self.pool = ConnectionPool(
106
132
  get_password_func=get_password_func,
107
133
  connection_class=Connection[DictRow],
@@ -122,20 +148,45 @@ class PostgresDatastore:
122
148
  max_lifetime=conn_max_age,
123
149
  check=check,
124
150
  )
125
- self.lock_timeout = lock_timeout
126
- self.schema = schema.strip() or "public"
127
151
 
128
152
  def after_connect_func(self) -> Callable[[Connection[Any]], None]:
129
- statement = SQL("SET idle_in_transaction_session_timeout = '{0}ms'").format(
130
- int(self.idle_in_transaction_session_timeout * 1000)
131
- )
153
+ set_idle_in_transaction_session_timeout_statement = SQL(
154
+ "SET idle_in_transaction_session_timeout = '{0}ms'"
155
+ ).format(int(self.idle_in_transaction_session_timeout * 1000))
132
156
 
157
+ # Avoid passing a bound method to the pool,
158
+ # to avoid creating a circular ref to self.
133
159
  def after_connect(conn: Connection[DictRow]) -> None:
160
+ # Put connection in auto-commit mode.
134
161
  conn.autocommit = True
135
- conn.cursor().execute(statement)
162
+
163
+ # Set idle in transaction session timeout.
164
+ conn.cursor().execute(set_idle_in_transaction_session_timeout_statement)
136
165
 
137
166
  return after_connect
138
167
 
168
+ def register_type_adapters(self) -> None:
169
+ # Construct and/or register composite type adapters.
170
+ unregistered_names = [
171
+ name
172
+ for name in self.db_type_names
173
+ if name not in self.psycopg_type_adapters
174
+ ]
175
+ if not unregistered_names:
176
+ return
177
+ with self.get_connection() as conn:
178
+ for name in unregistered_names:
179
+ # Construct type adapter from database info.
180
+ info = CompositeInfo.fetch(conn, f"{self.schema}.{name}")
181
+ if info is None:
182
+ continue
183
+ # Register the type adapter centrally.
184
+ register_composite(info, conn)
185
+ # Cache the python type for our own use.
186
+ self.psycopg_type_adapters[name] = info
187
+ assert info.python_type is not None, info
188
+ self.psycopg_python_types[name] = info.python_type
189
+
139
190
  @contextmanager
140
191
  def get_connection(self) -> Iterator[Connection[DictRow]]:
141
192
  try:
@@ -144,6 +195,11 @@ class PostgresDatastore:
144
195
  self.pool.open(wait, timeout)
145
196
 
146
197
  with self.pool.connection() as conn:
198
+ # Make sure the connection has the type adapters.
199
+ for info in self.psycopg_type_adapters.values():
200
+ if not conn.adapters.types.get(info.oid):
201
+ register_composite(info, conn)
202
+ # Yield connection.
147
203
  yield conn
148
204
  except psycopg.InterfaceError as e:
149
205
  # conn.close()
@@ -170,13 +226,19 @@ class PostgresDatastore:
170
226
  # conn.close()
171
227
  raise
172
228
 
229
+ @contextmanager
230
+ def cursor(self) -> Iterator[Cursor[DictRow]]:
231
+ with self.get_connection() as conn:
232
+ yield conn.cursor()
233
+
173
234
  @contextmanager
174
235
  def transaction(self, *, commit: bool = False) -> Iterator[Cursor[DictRow]]:
175
236
  with self.get_connection() as conn, conn.transaction(force_rollback=not commit):
176
237
  yield conn.cursor()
177
238
 
178
239
  def close(self) -> None:
179
- self.pool.close()
240
+ with contextlib.suppress(AttributeError):
241
+ self.pool.close()
180
242
 
181
243
  def __enter__(self) -> Self:
182
244
  return self
@@ -202,24 +264,46 @@ class PostgresRecorder:
202
264
  datastore: PostgresDatastore,
203
265
  ):
204
266
  self.datastore = datastore
205
- self.create_table_statements = self.construct_create_table_statements()
267
+ self.sql_create_statements: list[Composed] = []
206
268
 
207
269
  @staticmethod
208
- def check_table_name_length(table_name: str) -> None:
270
+ def check_identifier_length(table_name: str) -> None:
209
271
  if len(table_name) > PostgresRecorder.MAX_IDENTIFIER_LEN:
210
- msg = f"Table name too long: {table_name}"
272
+ msg = f"Identifier too long: {table_name}"
211
273
  raise ProgrammingError(msg)
212
274
 
213
- def construct_create_table_statements(self) -> list[Composed]:
214
- return []
215
-
216
275
  def create_table(self) -> None:
276
+ # Create composite types.
277
+ for statement in self.sql_create_statements:
278
+ if "CREATE TYPE" in statement.as_string():
279
+ # Do in own transaction, because there is no 'IF NOT EXISTS' option
280
+ # when creating types, and if exists, then a DuplicateObject error
281
+ # is raised, terminating the transaction and causing an opaque error.
282
+ with (
283
+ self.datastore.transaction(commit=True) as curs,
284
+ contextlib.suppress(DuplicateObject),
285
+ ):
286
+ curs.execute(statement, prepare=False)
287
+ # try:
288
+ # except psycopg.errors.SyntaxError as e:
289
+ # msg = f"Syntax error: '{e}' in: {statement.as_string()}"
290
+ # raise ProgrammingError(msg) from e
291
+
292
+ # Create tables, indexes, types, functions, and procedures.
217
293
  with self.datastore.transaction(commit=True) as curs:
218
294
  self._create_table(curs)
219
295
 
296
+ # Register type adapters.
297
+ self.datastore.register_type_adapters()
298
+
220
299
  def _create_table(self, curs: Cursor[DictRow]) -> None:
221
- for statement in self.create_table_statements:
222
- curs.execute(statement, prepare=False)
300
+ for statement in self.sql_create_statements:
301
+ if "CREATE TYPE" not in statement.as_string():
302
+ try:
303
+ curs.execute(statement, prepare=False)
304
+ except psycopg.errors.SyntaxError as e:
305
+ msg = f"Syntax error: '{e}' in: {statement.as_string()}"
306
+ raise ProgrammingError(msg) from e
223
307
 
224
308
 
225
309
  class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
@@ -230,82 +314,102 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
230
314
  events_table_name: str = "stored_events",
231
315
  ):
232
316
  super().__init__(datastore)
233
- self.check_table_name_length(events_table_name)
317
+ self.check_identifier_length(events_table_name)
234
318
  self.events_table_name = events_table_name
235
319
  # Index names can't be qualified names, but
236
320
  # are created in the same schema as the table.
237
321
  self.notification_id_index_name = (
238
322
  f"{self.events_table_name}_notification_id_idx"
239
323
  )
240
- self.create_table_statements.append(
324
+
325
+ self.stored_event_type_name = (
326
+ f"stored_event_{self.datastore.originator_id_type}"
327
+ )
328
+ self.datastore.db_type_names.add(self.stored_event_type_name)
329
+ self.datastore.register_type_adapters()
330
+ self.create_table_statement_index = len(self.sql_create_statements)
331
+ self.sql_create_statements.append(
241
332
  SQL(
242
- "CREATE TABLE IF NOT EXISTS {0}.{1} ("
243
- "originator_id uuid NOT NULL, "
333
+ "CREATE TABLE IF NOT EXISTS {schema}.{table} ("
334
+ "originator_id {originator_id_type} NOT NULL, "
244
335
  "originator_version bigint NOT NULL, "
245
336
  "topic text, "
246
337
  "state bytea, "
247
338
  "PRIMARY KEY "
248
339
  "(originator_id, originator_version)) "
249
- "WITH (autovacuum_enabled=false)"
340
+ "WITH ("
341
+ " autovacuum_enabled = true,"
342
+ " autovacuum_vacuum_threshold = 100000000,"
343
+ " autovacuum_vacuum_scale_factor = 0.5,"
344
+ " autovacuum_analyze_threshold = 1000,"
345
+ " autovacuum_analyze_scale_factor = 0.01"
346
+ ")"
250
347
  ).format(
251
- Identifier(self.datastore.schema),
252
- Identifier(self.events_table_name),
348
+ schema=Identifier(self.datastore.schema),
349
+ table=Identifier(self.events_table_name),
350
+ originator_id_type=Identifier(self.datastore.originator_id_type),
253
351
  )
254
352
  )
255
353
 
256
354
  self.insert_events_statement = SQL(
257
- "INSERT INTO {0}.{1} VALUES (%s, %s, %s, %s)"
355
+ " INSERT INTO {schema}.{table} AS t ("
356
+ " originator_id, originator_version, topic, state)"
357
+ " SELECT originator_id, originator_version, topic, state"
358
+ " FROM unnest(%s::{schema}.{stored_event_type}[])"
258
359
  ).format(
259
- Identifier(self.datastore.schema),
260
- Identifier(self.events_table_name),
360
+ schema=Identifier(self.datastore.schema),
361
+ table=Identifier(self.events_table_name),
362
+ stored_event_type=Identifier(self.stored_event_type_name),
261
363
  )
262
364
 
263
365
  self.select_events_statement = SQL(
264
- "SELECT * FROM {0}.{1} WHERE originator_id = %s"
366
+ "SELECT * FROM {schema}.{table} WHERE originator_id = %s"
265
367
  ).format(
266
- Identifier(self.datastore.schema),
267
- Identifier(self.events_table_name),
368
+ schema=Identifier(self.datastore.schema),
369
+ table=Identifier(self.events_table_name),
268
370
  )
269
371
 
270
372
  self.lock_table_statements: list[Query] = []
271
373
 
374
+ self.sql_create_statements.append(
375
+ SQL(
376
+ "CREATE TYPE {schema}.{name} "
377
+ "AS (originator_id {originator_id_type}, "
378
+ "originator_version bigint, "
379
+ "topic text, "
380
+ "state bytea)"
381
+ ).format(
382
+ schema=Identifier(self.datastore.schema),
383
+ name=Identifier(self.stored_event_type_name),
384
+ originator_id_type=Identifier(self.datastore.originator_id_type),
385
+ )
386
+ )
387
+
388
+ def construct_pg_stored_event(
389
+ self,
390
+ originator_id: UUID | str,
391
+ originator_version: int,
392
+ topic: str,
393
+ state: bytes,
394
+ ) -> PgStoredEvent:
395
+ try:
396
+ return self.datastore.psycopg_python_types[self.stored_event_type_name](
397
+ originator_id, originator_version, topic, state
398
+ )
399
+ except KeyError:
400
+ msg = f"Composite type '{self.stored_event_type_name}' not found"
401
+ raise ProgrammingError(msg) from None
402
+
272
403
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
273
404
  def insert_events(
274
405
  self, stored_events: Sequence[StoredEvent], **kwargs: Any
275
406
  ) -> Sequence[int] | None:
276
- exc: Exception | None = None
277
- notification_ids: Sequence[int] | None = None
278
- with self.datastore.get_connection() as conn:
279
- with conn.pipeline() as pipeline, conn.transaction():
280
- # Do other things first, so they can be pipelined too.
281
- with conn.cursor() as curs:
282
- self._insert_events(curs, stored_events, **kwargs)
283
- # Then use a different cursor for the executemany() call.
284
- with conn.cursor() as curs:
285
- try:
286
- self._insert_stored_events(curs, stored_events, **kwargs)
287
- # Sync now, so any uniqueness constraint violation causes an
288
- # IntegrityError to be raised here, rather an InternalError
289
- # being raised sometime later e.g. when commit() is called.
290
- pipeline.sync()
291
- notification_ids = self._fetch_ids_after_insert_events(
292
- curs, stored_events, **kwargs
293
- )
294
- except Exception as e:
295
- # Avoid psycopg emitting a pipeline warning.
296
- exc = e
297
- if exc:
298
- # Reraise exception after pipeline context manager has exited.
299
- raise exc
300
- return notification_ids
301
-
302
- def _insert_events(
303
- self,
304
- curs: Cursor[DictRow],
305
- stored_events: Sequence[StoredEvent],
306
- **_: Any,
307
- ) -> None:
308
- pass
407
+ # Only do something if there is something to do.
408
+ if len(stored_events) > 0:
409
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
410
+ assert conn.autocommit
411
+ self._insert_stored_events(curs, stored_events, **kwargs)
412
+ return None
309
413
 
310
414
  def _insert_stored_events(
311
415
  self,
@@ -313,40 +417,21 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
313
417
  stored_events: Sequence[StoredEvent],
314
418
  **_: Any,
315
419
  ) -> None:
316
- # Only do something if there is something to do.
317
- if len(stored_events) > 0:
318
- self._lock_table(curs)
319
-
320
- self._notify_channel(curs)
321
-
322
- # Insert events.
323
- curs.executemany(
324
- query=self.insert_events_statement,
325
- params_seq=[
326
- (
327
- stored_event.originator_id,
328
- stored_event.originator_version,
329
- stored_event.topic,
330
- stored_event.state,
331
- )
332
- for stored_event in stored_events
333
- ],
334
- returning="RETURNING" in self.insert_events_statement.as_string(),
420
+ # Construct composite type.
421
+ pg_stored_events = [
422
+ self.construct_pg_stored_event(
423
+ stored_event.originator_id,
424
+ stored_event.originator_version,
425
+ stored_event.topic,
426
+ stored_event.state,
335
427
  )
336
-
337
- def _lock_table(self, curs: Cursor[DictRow]) -> None:
338
- pass
339
-
340
- def _notify_channel(self, curs: Cursor[DictRow]) -> None:
341
- pass
342
-
343
- def _fetch_ids_after_insert_events(
344
- self,
345
- curs: Cursor[DictRow],
346
- stored_events: Sequence[StoredEvent],
347
- **kwargs: Any,
348
- ) -> Sequence[int] | None:
349
- return None
428
+ for stored_event in stored_events
429
+ ]
430
+ # Insert events.
431
+ curs.execute(
432
+ query=self.insert_events_statement,
433
+ params=(pg_stored_events,),
434
+ )
350
435
 
351
436
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
352
437
  def select_events(
@@ -396,42 +481,47 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
396
481
  events_table_name: str = "stored_events",
397
482
  ):
398
483
  super().__init__(datastore, events_table_name=events_table_name)
399
- self.create_table_statements[-1] = SQL(
400
- "CREATE TABLE IF NOT EXISTS {0}.{1} ("
401
- "originator_id uuid NOT NULL, "
484
+ self.sql_create_statements[self.create_table_statement_index] = SQL(
485
+ "CREATE TABLE IF NOT EXISTS {schema}.{table} ("
486
+ "originator_id {originator_id_type} NOT NULL, "
402
487
  "originator_version bigint NOT NULL, "
403
488
  "topic text, "
404
489
  "state bytea, "
405
490
  "notification_id bigserial, "
406
491
  "PRIMARY KEY "
407
492
  "(originator_id, originator_version)) "
408
- "WITH (autovacuum_enabled=false)"
493
+ "WITH ("
494
+ " autovacuum_enabled = true,"
495
+ " autovacuum_vacuum_threshold = 100000000,"
496
+ " autovacuum_vacuum_scale_factor = 0.5,"
497
+ " autovacuum_analyze_threshold = 1000,"
498
+ " autovacuum_analyze_scale_factor = 0.01"
499
+ ")"
409
500
  ).format(
410
- Identifier(self.datastore.schema),
411
- Identifier(self.events_table_name),
501
+ schema=Identifier(self.datastore.schema),
502
+ table=Identifier(self.events_table_name),
503
+ originator_id_type=Identifier(self.datastore.originator_id_type),
412
504
  )
413
505
 
414
- self.create_table_statements.append(
506
+ self.sql_create_statements.append(
415
507
  SQL(
416
- "CREATE UNIQUE INDEX IF NOT EXISTS {0} "
417
- "ON {1}.{2} (notification_id ASC);"
508
+ "CREATE UNIQUE INDEX IF NOT EXISTS {index} "
509
+ "ON {schema}.{table} (notification_id ASC);"
418
510
  ).format(
419
- Identifier(self.notification_id_index_name),
420
- Identifier(self.datastore.schema),
421
- Identifier(self.events_table_name),
511
+ index=Identifier(self.notification_id_index_name),
512
+ schema=Identifier(self.datastore.schema),
513
+ table=Identifier(self.events_table_name),
422
514
  )
423
515
  )
424
516
 
425
517
  self.channel_name = self.events_table_name.replace(".", "_")
426
- self.insert_events_statement = self.insert_events_statement + SQL(
427
- " RETURNING notification_id"
428
- )
518
+ self.insert_events_statement += SQL(" RETURNING notification_id")
429
519
 
430
520
  self.max_notification_id_statement = SQL(
431
- "SELECT MAX(notification_id) FROM {0}.{1}"
521
+ "SELECT MAX(notification_id) FROM {schema}.{table}"
432
522
  ).format(
433
- Identifier(self.datastore.schema),
434
- Identifier(self.events_table_name),
523
+ schema=Identifier(self.datastore.schema),
524
+ table=Identifier(self.events_table_name),
435
525
  )
436
526
 
437
527
  self.lock_table_statements = [
@@ -442,6 +532,108 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
442
532
  ),
443
533
  ]
444
534
 
535
+ self.pg_function_name_insert_events = (
536
+ f"es_insert_events_{self.datastore.originator_id_type}"
537
+ )
538
+ self.sql_invoke_pg_function_insert_events = SQL(
539
+ "SELECT * FROM {insert_events}((%s))"
540
+ ).format(insert_events=Identifier(self.pg_function_name_insert_events))
541
+
542
+ self.sql_create_pg_function_insert_events = SQL(
543
+ "CREATE OR REPLACE FUNCTION {insert_events}(events {schema}.{event}[]) "
544
+ "RETURNS SETOF bigint "
545
+ "LANGUAGE plpgsql "
546
+ "AS "
547
+ "$BODY$"
548
+ "BEGIN"
549
+ " SET LOCAL lock_timeout = '{lock_timeout}s';"
550
+ " NOTIFY {channel};"
551
+ " RETURN QUERY"
552
+ " INSERT INTO {schema}.{table} AS t ("
553
+ " originator_id, originator_version, topic, state)"
554
+ " SELECT originator_id, originator_version, topic, state"
555
+ " FROM unnest(events)"
556
+ " RETURNING notification_id;"
557
+ "END;"
558
+ "$BODY$"
559
+ ).format(
560
+ insert_events=Identifier(self.pg_function_name_insert_events),
561
+ lock_timeout=self.datastore.lock_timeout,
562
+ channel=Identifier(self.channel_name),
563
+ event=Identifier(self.stored_event_type_name),
564
+ schema=Identifier(self.datastore.schema),
565
+ table=Identifier(self.events_table_name),
566
+ )
567
+ self.create_insert_function_statement_index = len(self.sql_create_statements)
568
+ self.sql_create_statements.append(self.sql_create_pg_function_insert_events)
569
+
570
+ @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
571
+ def insert_events(
572
+ self, stored_events: Sequence[StoredEvent], **kwargs: Any
573
+ ) -> Sequence[int] | None:
574
+ if self.datastore.enable_db_functions:
575
+ pg_stored_events = [
576
+ self.construct_pg_stored_event(
577
+ originator_id=e.originator_id,
578
+ originator_version=e.originator_version,
579
+ topic=e.topic,
580
+ state=e.state,
581
+ )
582
+ for e in stored_events
583
+ ]
584
+ with self.datastore.get_connection() as conn, conn.cursor() as curs:
585
+ curs.execute(
586
+ self.sql_invoke_pg_function_insert_events,
587
+ (pg_stored_events,),
588
+ prepare=True,
589
+ )
590
+ return [r[self.pg_function_name_insert_events] for r in curs.fetchall()]
591
+
592
+ exc: Exception | None = None
593
+ notification_ids: Sequence[int] | None = None
594
+ with self.datastore.get_connection() as conn:
595
+ with conn.pipeline() as pipeline, conn.transaction():
596
+ # Do other things first, so they can be pipelined too.
597
+ with conn.cursor() as curs:
598
+ self._insert_events(curs, stored_events, **kwargs)
599
+ # Then use a different cursor for the executemany() call.
600
+ if len(stored_events) > 0:
601
+ with conn.cursor() as curs:
602
+ try:
603
+ self._insert_stored_events(curs, stored_events, **kwargs)
604
+ # Sync now, so any uniqueness constraint violation causes an
605
+ # IntegrityError to be raised here, rather an InternalError
606
+ # being raised sometime later e.g. when commit() is called.
607
+ pipeline.sync()
608
+ notification_ids = self._fetch_ids_after_insert_events(
609
+ curs, stored_events, **kwargs
610
+ )
611
+ except Exception as e:
612
+ # Avoid psycopg emitting a pipeline warning.
613
+ exc = e
614
+ if exc:
615
+ # Reraise exception after pipeline context manager has exited.
616
+ raise exc
617
+ return notification_ids
618
+
619
+ def _insert_events(
620
+ self,
621
+ curs: Cursor[DictRow],
622
+ stored_events: Sequence[StoredEvent],
623
+ **_: Any,
624
+ ) -> None:
625
+ pass
626
+
627
+ def _insert_stored_events(
628
+ self,
629
+ curs: Cursor[DictRow],
630
+ stored_events: Sequence[StoredEvent],
631
+ **kwargs: Any,
632
+ ) -> None:
633
+ self._lock_table(curs)
634
+ self._notify_channel(curs)
635
+ super()._insert_stored_events(curs, stored_events, **kwargs)
636
+
445
637
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
446
638
  def select_notifications(
447
639
  self,
@@ -456,9 +648,9 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
456
648
  from 'start', limited by 'limit'.
457
649
  """
458
650
  params: list[int | str | Sequence[str]] = []
459
- statement = SQL("SELECT * FROM {0}.{1}").format(
460
- Identifier(self.datastore.schema),
461
- Identifier(self.events_table_name),
651
+ statement = SQL("SELECT * FROM {schema}.{table}").format(
652
+ schema=Identifier(self.datastore.schema),
653
+ table=Identifier(self.events_table_name),
462
654
  )
463
655
  has_where = False
464
656
  if start is not None:
@@ -521,7 +713,7 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
521
713
  def _lock_table(self, curs: Cursor[DictRow]) -> None:
522
714
  # Acquire "EXCLUSIVE" table lock, to serialize transactions that insert
523
715
  # stored events, so that readers don't pass over gaps that are filled in
524
- # later. We want each transaction that will be issued with notifications
716
+ # later. We want each transaction that will be issued with notification
525
717
  # IDs by the notification ID sequence to receive all its notification IDs
526
718
  # and then commit, before another transaction is issued with any notification
527
719
  # IDs. In other words, we want the insert order to be the same as the commit
@@ -549,20 +741,18 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
549
741
  self,
550
742
  curs: Cursor[DictRow],
551
743
  stored_events: Sequence[StoredEvent],
552
- **kwargs: Any,
744
+ **_: Any,
553
745
  ) -> Sequence[int] | None:
554
746
  notification_ids: list[int] = []
555
- len_events = len(stored_events)
556
- if len_events:
557
- while curs.nextset() and len(notification_ids) != len_events:
558
- if curs.statusmessage and curs.statusmessage.startswith("INSERT"):
559
- row = curs.fetchone()
560
- assert row is not None
561
- notification_ids.append(row["notification_id"])
562
- if len(notification_ids) != len(stored_events):
563
- msg = "Couldn't get all notification IDs "
564
- msg += f"(got {len(notification_ids)}, expected {len(stored_events)})"
565
- raise ProgrammingError(msg)
747
+ assert curs.statusmessage and curs.statusmessage.startswith(
748
+ "INSERT"
749
+ ), curs.statusmessage
750
+ try:
751
+ notification_ids = [row["notification_id"] for row in curs.fetchall()]
752
+ except psycopg.ProgrammingError as e:
753
+ msg = "Couldn't get all notification IDs "
754
+ msg += f"(got {len(notification_ids)}, expected {len(stored_events)})"
755
+ raise ProgrammingError(msg) from e
566
756
  return notification_ids
567
757
 
568
758
  def subscribe(
@@ -620,7 +810,7 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
620
810
  **kwargs: Any,
621
811
  ):
622
812
  super().__init__(datastore, **kwargs)
623
- self.check_table_name_length(tracking_table_name)
813
+ self.check_identifier_length(tracking_table_name)
624
814
  self.tracking_table_name = tracking_table_name
625
815
  self.tracking_table_exists: bool = False
626
816
  self.tracking_migration_previous: int | None = None
@@ -629,13 +819,20 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
629
819
  self.has_checked_for_multi_row_tracking_table: bool = False
630
820
  if self.datastore.single_row_tracking:
631
821
  # For single-row tracking.
632
- self.create_table_statements.append(
822
+ self.sql_create_statements.append(
633
823
  SQL(
634
824
  "CREATE TABLE IF NOT EXISTS {0}.{1} ("
635
825
  "application_name text, "
636
826
  "notification_id bigint, "
637
827
  "PRIMARY KEY "
638
828
  "(application_name))"
829
+ "WITH ("
830
+ " autovacuum_enabled = true,"
831
+ " autovacuum_vacuum_threshold = 100000000,"
832
+ " autovacuum_vacuum_scale_factor = 0.5,"
833
+ " autovacuum_analyze_threshold = 1000,"
834
+ " autovacuum_analyze_scale_factor = 0.01"
835
+ ")"
639
836
  ).format(
640
837
  Identifier(self.datastore.schema),
641
838
  Identifier(self.tracking_table_name),
@@ -654,13 +851,20 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
654
851
  )
655
852
  else:
656
853
  # For legacy multi-row tracking.
657
- self.create_table_statements.append(
854
+ self.sql_create_statements.append(
658
855
  SQL(
659
856
  "CREATE TABLE IF NOT EXISTS {0}.{1} ("
660
857
  "application_name text, "
661
858
  "notification_id bigint, "
662
859
  "PRIMARY KEY "
663
860
  "(application_name, notification_id))"
861
+ "WITH ("
862
+ " autovacuum_enabled = true,"
863
+ " autovacuum_vacuum_threshold = 100000000,"
864
+ " autovacuum_vacuum_scale_factor = 0.5,"
865
+ " autovacuum_analyze_threshold = 1000,"
866
+ " autovacuum_analyze_scale_factor = 0.01"
867
+ ")"
664
868
  ).format(
665
869
  Identifier(self.datastore.schema),
666
870
  Identifier(self.tracking_table_name),
@@ -873,6 +1077,8 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
873
1077
  )
874
1078
  POSTGRES_SCHEMA = "POSTGRES_SCHEMA"
875
1079
  POSTGRES_SINGLE_ROW_TRACKING = "SINGLE_ROW_TRACKING"
1080
+ ORIGINATOR_ID_TYPE = "ORIGINATOR_ID_TYPE"
1081
+ POSTGRES_ENABLE_DB_FUNCTIONS = "POSTGRES_ENABLE_DB_FUNCTIONS"
876
1082
  CREATE_TABLE = "CREATE_TABLE"
877
1083
 
878
1084
  aggregate_recorder_class = PostgresAggregateRecorder
@@ -1038,6 +1244,21 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
1038
1244
  self.env.get(self.POSTGRES_SINGLE_ROW_TRACKING, "t")
1039
1245
  )
1040
1246
 
1247
+ originator_id_type = cast(
1248
+ Literal["uuid", "text"],
1249
+ self.env.get(self.ORIGINATOR_ID_TYPE, "uuid"),
1250
+ )
1251
+ if originator_id_type.lower() not in ("uuid", "text"):
1252
+ msg = (
1253
+ f"Invalid {self.ORIGINATOR_ID_TYPE} '{originator_id_type}', "
1254
+ f"must be 'uuid' or 'text'"
1255
+ )
1256
+ raise OSError(msg)
1257
+
1258
+ enable_db_functions = strtobool(
1259
+ self.env.get(self.POSTGRES_ENABLE_DB_FUNCTIONS) or "no"
1260
+ )
1261
+
1041
1262
  self.datastore = PostgresDatastore(
1042
1263
  dbname=dbname,
1043
1264
  host=host,
@@ -1055,6 +1276,8 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
1055
1276
  schema=schema,
1056
1277
  get_password_func=get_password_func,
1057
1278
  single_row_tracking=single_row_tracking,
1279
+ originator_id_type=originator_id_type,
1280
+ enable_db_functions=enable_db_functions,
1058
1281
  )
1059
1282
 
1060
1283
  def env_create_table(self) -> bool: