eventsourcing 9.4.0a8__py3-none-any.whl → 9.4.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

eventsourcing/postgres.py CHANGED
@@ -5,7 +5,7 @@ import logging
5
5
  from asyncio import CancelledError
6
6
  from contextlib import contextmanager
7
7
  from threading import Thread
8
- from typing import TYPE_CHECKING, Any, Callable
8
+ from typing import TYPE_CHECKING, Any, Callable, cast
9
9
 
10
10
  import psycopg
11
11
  import psycopg.errors
@@ -13,6 +13,8 @@ import psycopg_pool
13
13
  from psycopg import Connection, Cursor, Error
14
14
  from psycopg.generators import notifies
15
15
  from psycopg.rows import DictRow, dict_row
16
+ from psycopg.sql import SQL, Composed, Identifier
17
+ from typing_extensions import TypeVar
16
18
 
17
19
  from eventsourcing.persistence import (
18
20
  AggregateRecorder,
@@ -41,6 +43,7 @@ if TYPE_CHECKING:
41
43
  from collections.abc import Iterator, Sequence
42
44
  from uuid import UUID
43
45
 
46
+ from psycopg.abc import Query
44
47
  from typing_extensions import Self
45
48
 
46
49
  logging.getLogger("psycopg.pool").setLevel(logging.CRITICAL)
@@ -118,12 +121,11 @@ class PostgresDatastore:
118
121
  check=check,
119
122
  )
120
123
  self.lock_timeout = lock_timeout
121
- self.schema = schema.strip()
124
+ self.schema = schema.strip() or "public"
122
125
 
123
126
  def after_connect_func(self) -> Callable[[Connection[Any]], None]:
124
- statement = (
125
- "SET idle_in_transaction_session_timeout = "
126
- f"'{self.idle_in_transaction_session_timeout}s'"
127
+ statement = SQL("SET idle_in_transaction_session_timeout = '{0}s'").format(
128
+ self.idle_in_transaction_session_timeout
127
129
  )
128
130
 
129
131
  def after_connect(conn: Connection[DictRow]) -> None:
@@ -168,7 +170,6 @@ class PostgresDatastore:
168
170
 
169
171
  @contextmanager
170
172
  def transaction(self, *, commit: bool = False) -> Iterator[Cursor[DictRow]]:
171
- conn: Connection[DictRow]
172
173
  with self.get_connection() as conn, conn.transaction(force_rollback=not commit):
173
174
  yield conn.cursor()
174
175
 
@@ -195,17 +196,12 @@ class PostgresRecorder:
195
196
  self.datastore = datastore
196
197
  self.create_table_statements = self.construct_create_table_statements()
197
198
 
198
- def construct_create_table_statements(self) -> list[str]:
199
+ def construct_create_table_statements(self) -> list[Composed]:
199
200
  return []
200
201
 
201
202
  def check_table_name_length(self, table_name: str) -> None:
202
- schema_prefix = self.datastore.schema + "."
203
- if table_name.startswith(schema_prefix):
204
- unqualified_table_name = table_name[len(schema_prefix) :]
205
- else:
206
- unqualified_table_name = table_name
207
- if len(unqualified_table_name) > 63:
208
- msg = f"Table name too long: {unqualified_table_name}"
203
+ if len(table_name) > 63:
204
+ msg = f"Table name too long: {table_name}"
209
205
  raise ProgrammingError(msg)
210
206
 
211
207
  def create_table(self) -> None:
@@ -226,38 +222,45 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
226
222
  self.events_table_name = events_table_name
227
223
  # Index names can't be qualified names, but
228
224
  # are created in the same schema as the table.
229
- if "." in self.events_table_name:
230
- unqualified_table_name = self.events_table_name.split(".")[-1]
231
- else:
232
- unqualified_table_name = self.events_table_name
233
225
  self.notification_id_index_name = (
234
- f"{unqualified_table_name}_notification_id_idx "
226
+ f"{self.events_table_name}_notification_id_idx"
235
227
  )
236
228
  self.create_table_statements.append(
237
- "CREATE TABLE IF NOT EXISTS "
238
- f"{self.events_table_name} ("
239
- "originator_id uuid NOT NULL, "
240
- "originator_version bigint NOT NULL, "
241
- "topic text, "
242
- "state bytea, "
243
- "PRIMARY KEY "
244
- "(originator_id, originator_version)) "
245
- "WITH (autovacuum_enabled=false)"
229
+ SQL(
230
+ "CREATE TABLE IF NOT EXISTS {0}.{1} ("
231
+ "originator_id uuid NOT NULL, "
232
+ "originator_version bigint NOT NULL, "
233
+ "topic text, "
234
+ "state bytea, "
235
+ "PRIMARY KEY "
236
+ "(originator_id, originator_version)) "
237
+ "WITH (autovacuum_enabled=false)"
238
+ ).format(
239
+ Identifier(self.datastore.schema),
240
+ Identifier(self.events_table_name),
241
+ )
246
242
  )
247
243
 
248
- self.insert_events_statement = (
249
- f"INSERT INTO {self.events_table_name} VALUES (%s, %s, %s, %s)"
244
+ self.insert_events_statement = SQL(
245
+ "INSERT INTO {0}.{1} VALUES (%s, %s, %s, %s)"
246
+ ).format(
247
+ Identifier(self.datastore.schema),
248
+ Identifier(self.events_table_name),
250
249
  )
251
- self.select_events_statement = (
252
- f"SELECT * FROM {self.events_table_name} WHERE originator_id = %s"
250
+
251
+ self.select_events_statement = SQL(
252
+ "SELECT * FROM {0}.{1} WHERE originator_id = %s"
253
+ ).format(
254
+ Identifier(self.datastore.schema),
255
+ Identifier(self.events_table_name),
253
256
  )
254
- self.lock_table_statements: list[str] = []
257
+
258
+ self.lock_table_statements: list[Query] = []
255
259
 
256
260
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
257
261
  def insert_events(
258
262
  self, stored_events: list[StoredEvent], **kwargs: Any
259
263
  ) -> Sequence[int] | None:
260
- conn: Connection[DictRow]
261
264
  exc: Exception | None = None
262
265
  notification_ids: Sequence[int] | None = None
263
266
  with self.datastore.get_connection() as conn:
@@ -316,7 +319,7 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
316
319
  )
317
320
  for stored_event in stored_events
318
321
  ],
319
- returning="RETURNING" in self.insert_events_statement,
322
+ returning="RETURNING" in self.insert_events_statement.as_string(),
320
323
  )
321
324
 
322
325
  def _lock_table(self, curs: Cursor[DictRow]) -> None:
@@ -347,18 +350,18 @@ class PostgresAggregateRecorder(PostgresRecorder, AggregateRecorder):
347
350
  params: list[Any] = [originator_id]
348
351
  if gt is not None:
349
352
  params.append(gt)
350
- statement += " AND originator_version > %s"
353
+ statement += SQL(" AND originator_version > %s")
351
354
  if lte is not None:
352
355
  params.append(lte)
353
- statement += " AND originator_version <= %s"
354
- statement += " ORDER BY originator_version"
356
+ statement += SQL(" AND originator_version <= %s")
357
+ statement += SQL(" ORDER BY originator_version")
355
358
  if desc is False:
356
- statement += " ASC"
359
+ statement += SQL(" ASC")
357
360
  else:
358
- statement += " DESC"
361
+ statement += SQL(" DESC")
359
362
  if limit is not None:
360
363
  params.append(limit)
361
- statement += " LIMIT %s"
364
+ statement += SQL(" LIMIT %s")
362
365
 
363
366
  with self.datastore.get_connection() as conn, conn.cursor() as curs:
364
367
  curs.execute(statement, params, prepare=True)
@@ -381,9 +384,8 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
381
384
  events_table_name: str = "stored_events",
382
385
  ):
383
386
  super().__init__(datastore, events_table_name=events_table_name)
384
- self.create_table_statements[-1] = (
385
- "CREATE TABLE IF NOT EXISTS "
386
- f"{self.events_table_name} ("
387
+ self.create_table_statements[-1] = SQL(
388
+ "CREATE TABLE IF NOT EXISTS {0}.{1} ("
387
389
  "originator_id uuid NOT NULL, "
388
390
  "originator_version bigint NOT NULL, "
389
391
  "topic text, "
@@ -392,20 +394,40 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
392
394
  "PRIMARY KEY "
393
395
  "(originator_id, originator_version)) "
394
396
  "WITH (autovacuum_enabled=false)"
397
+ ).format(
398
+ Identifier(self.datastore.schema),
399
+ Identifier(self.events_table_name),
395
400
  )
401
+
396
402
  self.create_table_statements.append(
397
- "CREATE UNIQUE INDEX IF NOT EXISTS "
398
- f"{self.notification_id_index_name}"
399
- f"ON {self.events_table_name} (notification_id ASC);"
403
+ SQL(
404
+ "CREATE UNIQUE INDEX IF NOT EXISTS {0} "
405
+ "ON {1}.{2} (notification_id ASC);"
406
+ ).format(
407
+ Identifier(self.notification_id_index_name),
408
+ Identifier(self.datastore.schema),
409
+ Identifier(self.events_table_name),
410
+ )
400
411
  )
412
+
401
413
  self.channel_name = self.events_table_name.replace(".", "_")
402
- self.insert_events_statement += " RETURNING notification_id"
403
- self.max_notification_id_statement = (
404
- f"SELECT MAX(notification_id) FROM {self.events_table_name}"
414
+ self.insert_events_statement = self.insert_events_statement + SQL(
415
+ " RETURNING notification_id"
416
+ )
417
+
418
+ self.max_notification_id_statement = SQL(
419
+ "SELECT MAX(notification_id) FROM {0}.{1}"
420
+ ).format(
421
+ Identifier(self.datastore.schema),
422
+ Identifier(self.events_table_name),
405
423
  )
424
+
406
425
  self.lock_table_statements = [
407
- f"SET LOCAL lock_timeout = '{self.datastore.lock_timeout}s'",
408
- f"LOCK TABLE {self.events_table_name} IN EXCLUSIVE MODE",
426
+ SQL("SET LOCAL lock_timeout = '{0}s'").format(self.datastore.lock_timeout),
427
+ SQL("LOCK TABLE {0}.{1} IN EXCLUSIVE MODE").format(
428
+ Identifier(self.datastore.schema),
429
+ Identifier(self.events_table_name),
430
+ ),
409
431
  ]
410
432
 
411
433
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
@@ -424,37 +446,44 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
424
446
  """
425
447
 
426
448
  params: list[int | str | Sequence[str]] = []
427
- statement = f"SELECT * FROM {self.events_table_name}"
449
+ statement = SQL("SELECT * FROM {0}.{1}").format(
450
+ Identifier(self.datastore.schema),
451
+ Identifier(self.events_table_name),
452
+ )
428
453
  has_where = False
429
454
  if start is not None:
430
- statement += " WHERE"
455
+ statement += SQL(" WHERE")
431
456
  has_where = True
432
457
  params.append(start)
433
458
  if inclusive_of_start:
434
- statement += " notification_id>=%s"
459
+ statement += SQL(" notification_id>=%s")
435
460
  else:
436
- statement += " notification_id>%s"
461
+ statement += SQL(" notification_id>%s")
437
462
 
438
463
  if stop is not None:
439
464
  if not has_where:
440
465
  has_where = True
441
- statement += " WHERE"
466
+ statement += SQL(" WHERE")
442
467
  else:
443
- statement += " AND"
468
+ statement += SQL(" AND")
444
469
 
445
470
  params.append(stop)
446
- statement += " notification_id <= %s"
471
+ statement += SQL(" notification_id <= %s")
447
472
 
448
473
  if topics:
474
+ # Check sequence and ensure list of strings.
475
+ assert isinstance(topics, (tuple, list)), topics
476
+ topics = list(topics) if isinstance(topics, tuple) else topics
477
+ assert all(isinstance(t, str) for t in topics), topics
449
478
  if not has_where:
450
- statement += " WHERE"
479
+ statement += SQL(" WHERE")
451
480
  else:
452
- statement += " AND"
481
+ statement += SQL(" AND")
453
482
  params.append(topics)
454
- statement += " topic = ANY(%s)"
483
+ statement += SQL(" topic = ANY(%s)")
455
484
 
456
485
  params.append(limit)
457
- statement += " ORDER BY notification_id LIMIT %s"
486
+ statement += SQL(" ORDER BY notification_id LIMIT %s")
458
487
 
459
488
  connection = self.datastore.get_connection()
460
489
  with connection as conn, conn.cursor() as curs:
@@ -475,7 +504,6 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
475
504
  """
476
505
  Returns the maximum notification ID.
477
506
  """
478
- conn: Connection[DictRow]
479
507
  with self.datastore.get_connection() as conn, conn.cursor() as curs:
480
508
  curs.execute(self.max_notification_id_statement)
481
509
  fetchone = curs.fetchone()
@@ -507,7 +535,7 @@ class PostgresApplicationRecorder(PostgresAggregateRecorder, ApplicationRecorder
507
535
  curs.execute(lock_statement, prepare=True)
508
536
 
509
537
  def _notify_channel(self, curs: Cursor[DictRow]) -> None:
510
- curs.execute("NOTIFY " + self.channel_name)
538
+ curs.execute(SQL("NOTIFY {0}").format(Identifier(self.channel_name)))
511
539
 
512
540
  def _fetch_ids_after_insert_events(
513
541
  self,
@@ -554,7 +582,9 @@ class PostgresSubscription(ListenNotifySubscription[PostgresApplicationRecorder]
554
582
  def _listen(self) -> None:
555
583
  try:
556
584
  with self._recorder.datastore.get_connection() as conn:
557
- conn.execute("LISTEN " + self._recorder.channel_name)
585
+ conn.execute(
586
+ SQL("LISTEN {0}").format(Identifier(self._recorder.channel_name))
587
+ )
558
588
  while not self._has_been_stopped and not self._thread_error:
559
589
  # This block simplifies psycopg's conn.notifies(), because
560
590
  # we aren't interested in the actual notify messages, and
@@ -585,30 +615,42 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
585
615
  self.check_table_name_length(tracking_table_name)
586
616
  self.tracking_table_name = tracking_table_name
587
617
  self.create_table_statements.append(
588
- "CREATE TABLE IF NOT EXISTS "
589
- f"{self.tracking_table_name} ("
590
- "application_name text, "
591
- "notification_id bigint, "
592
- "PRIMARY KEY "
593
- "(application_name, notification_id))"
618
+ SQL(
619
+ "CREATE TABLE IF NOT EXISTS {0}.{1} ("
620
+ "application_name text, "
621
+ "notification_id bigint, "
622
+ "PRIMARY KEY "
623
+ "(application_name, notification_id))"
624
+ ).format(
625
+ Identifier(self.datastore.schema),
626
+ Identifier(self.tracking_table_name),
627
+ )
594
628
  )
595
- self.insert_tracking_statement = (
596
- f"INSERT INTO {self.tracking_table_name} VALUES (%s, %s)"
629
+
630
+ self.insert_tracking_statement = SQL(
631
+ "INSERT INTO {0}.{1} VALUES (%s, %s)"
632
+ ).format(
633
+ Identifier(self.datastore.schema),
634
+ Identifier(self.tracking_table_name),
597
635
  )
598
- self.max_tracking_id_statement = (
599
- "SELECT MAX(notification_id) "
600
- f"FROM {self.tracking_table_name} "
601
- "WHERE application_name=%s"
636
+
637
+ self.max_tracking_id_statement = SQL(
638
+ "SELECT MAX(notification_id) FROM {0}.{1} WHERE application_name=%s"
639
+ ).format(
640
+ Identifier(self.datastore.schema),
641
+ Identifier(self.tracking_table_name),
602
642
  )
603
- self.count_tracking_id_statement = (
604
- "SELECT COUNT(*) "
605
- f"FROM {self.tracking_table_name} "
643
+
644
+ self.count_tracking_id_statement = SQL(
645
+ "SELECT COUNT(*) FROM {0}.{1} "
606
646
  "WHERE application_name=%s AND notification_id=%s"
647
+ ).format(
648
+ Identifier(self.datastore.schema),
649
+ Identifier(self.tracking_table_name),
607
650
  )
608
651
 
609
652
  @retry((InterfaceError, OperationalError), max_attempts=10, wait=0.2)
610
653
  def insert_tracking(self, tracking: Tracking) -> None:
611
- conn: Connection[DictRow]
612
654
  with (
613
655
  self.datastore.get_connection() as conn,
614
656
  conn.transaction(),
@@ -648,7 +690,6 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
648
690
  ) -> bool:
649
691
  if notification_id is None:
650
692
  return True
651
- conn: Connection[DictRow]
652
693
  with self.datastore.get_connection() as conn, conn.cursor() as curs:
653
694
  curs.execute(
654
695
  query=self.count_tracking_id_statement,
@@ -660,6 +701,13 @@ class PostgresTrackingRecorder(PostgresRecorder, TrackingRecorder):
660
701
  return bool(fetchone["count"])
661
702
 
662
703
 
704
+ TPostgresTrackingRecorder = TypeVar(
705
+ "TPostgresTrackingRecorder",
706
+ bound=PostgresTrackingRecorder,
707
+ default=PostgresTrackingRecorder,
708
+ )
709
+
710
+
663
711
  class PostgresProcessRecorder(
664
712
  PostgresTrackingRecorder, PostgresApplicationRecorder, ProcessRecorder
665
713
  ):
@@ -887,8 +935,6 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
887
935
  def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
888
936
  prefix = self.env.name.lower() or "stored"
889
937
  events_table_name = prefix + "_" + purpose
890
- if self.datastore.schema:
891
- events_table_name = f"{self.datastore.schema}.{events_table_name}"
892
938
  recorder = type(self).aggregate_recorder_class(
893
939
  datastore=self.datastore,
894
940
  events_table_name=events_table_name,
@@ -900,9 +946,6 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
900
946
  def application_recorder(self) -> ApplicationRecorder:
901
947
  prefix = self.env.name.lower() or "stored"
902
948
  events_table_name = prefix + "_events"
903
- if self.datastore.schema:
904
- events_table_name = f"{self.datastore.schema}.{events_table_name}"
905
-
906
949
  application_recorder_topic = self.env.get(self.APPLICATION_RECORDER_TOPIC)
907
950
  if application_recorder_topic:
908
951
  application_recorder_class: type[PostgresApplicationRecorder] = (
@@ -921,18 +964,18 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
921
964
  return recorder
922
965
 
923
966
  def tracking_recorder(
924
- self, tracking_recorder_class: type[PostgresTrackingRecorder] | None = None
925
- ) -> PostgresTrackingRecorder:
967
+ self, tracking_recorder_class: type[TPostgresTrackingRecorder] | None = None
968
+ ) -> TPostgresTrackingRecorder:
926
969
  prefix = self.env.name.lower() or "notification"
927
970
  tracking_table_name = prefix + "_tracking"
928
- if self.datastore.schema:
929
- tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
930
971
  if tracking_recorder_class is None:
931
972
  tracking_recorder_topic = self.env.get(self.TRACKING_RECORDER_TOPIC)
932
973
  if tracking_recorder_topic:
933
974
  tracking_recorder_class = resolve_topic(tracking_recorder_topic)
934
975
  else:
935
- tracking_recorder_class = type(self).tracking_recorder_class
976
+ tracking_recorder_class = cast(
977
+ type[TPostgresTrackingRecorder], type(self).tracking_recorder_class
978
+ )
936
979
  assert tracking_recorder_class is not None
937
980
  assert issubclass(tracking_recorder_class, PostgresTrackingRecorder)
938
981
  recorder = tracking_recorder_class(
@@ -948,10 +991,6 @@ class PostgresFactory(InfrastructureFactory[PostgresTrackingRecorder]):
948
991
  events_table_name = prefix + "_events"
949
992
  prefix = self.env.name.lower() or "notification"
950
993
  tracking_table_name = prefix + "_tracking"
951
- if self.datastore.schema:
952
- events_table_name = f"{self.datastore.schema}.{events_table_name}"
953
- tracking_table_name = f"{self.datastore.schema}.{tracking_table_name}"
954
-
955
994
  process_recorder_topic = self.env.get(self.PROCESS_RECORDER_TOPIC)
956
995
  if process_recorder_topic:
957
996
  process_recorder_class: type[PostgresTrackingRecorder] = resolve_topic(