eventsourcing 9.5.0b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eventsourcing/__init__.py +0 -0
- eventsourcing/application.py +998 -0
- eventsourcing/cipher.py +107 -0
- eventsourcing/compressor.py +15 -0
- eventsourcing/cryptography.py +91 -0
- eventsourcing/dcb/__init__.py +0 -0
- eventsourcing/dcb/api.py +144 -0
- eventsourcing/dcb/application.py +159 -0
- eventsourcing/dcb/domain.py +369 -0
- eventsourcing/dcb/msgpack.py +38 -0
- eventsourcing/dcb/persistence.py +193 -0
- eventsourcing/dcb/popo.py +178 -0
- eventsourcing/dcb/postgres_tt.py +704 -0
- eventsourcing/dcb/tests.py +608 -0
- eventsourcing/dispatch.py +80 -0
- eventsourcing/domain.py +1964 -0
- eventsourcing/interface.py +164 -0
- eventsourcing/persistence.py +1429 -0
- eventsourcing/popo.py +267 -0
- eventsourcing/postgres.py +1441 -0
- eventsourcing/projection.py +502 -0
- eventsourcing/py.typed +0 -0
- eventsourcing/sqlite.py +816 -0
- eventsourcing/system.py +1203 -0
- eventsourcing/tests/__init__.py +3 -0
- eventsourcing/tests/application.py +483 -0
- eventsourcing/tests/domain.py +105 -0
- eventsourcing/tests/persistence.py +1744 -0
- eventsourcing/tests/postgres_utils.py +131 -0
- eventsourcing/utils.py +257 -0
- eventsourcing-9.5.0b3.dist-info/METADATA +253 -0
- eventsourcing-9.5.0b3.dist-info/RECORD +35 -0
- eventsourcing-9.5.0b3.dist-info/WHEEL +4 -0
- eventsourcing-9.5.0b3.dist-info/licenses/AUTHORS +10 -0
- eventsourcing-9.5.0b3.dist-info/licenses/LICENSE +29 -0
eventsourcing/sqlite.py
ADDED
|
@@ -0,0 +1,816 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import sqlite3
|
|
4
|
+
from contextlib import contextmanager
|
|
5
|
+
from typing import TYPE_CHECKING, Any, Literal, cast
|
|
6
|
+
from uuid import UUID
|
|
7
|
+
|
|
8
|
+
from eventsourcing.persistence import (
|
|
9
|
+
AggregateRecorder,
|
|
10
|
+
ApplicationRecorder,
|
|
11
|
+
Connection,
|
|
12
|
+
ConnectionPool,
|
|
13
|
+
Cursor,
|
|
14
|
+
DatabaseError,
|
|
15
|
+
DataError,
|
|
16
|
+
InfrastructureFactory,
|
|
17
|
+
IntegrityError,
|
|
18
|
+
InterfaceError,
|
|
19
|
+
InternalError,
|
|
20
|
+
Notification,
|
|
21
|
+
NotSupportedError,
|
|
22
|
+
OperationalError,
|
|
23
|
+
PersistenceError,
|
|
24
|
+
ProcessRecorder,
|
|
25
|
+
ProgrammingError,
|
|
26
|
+
Recorder,
|
|
27
|
+
StoredEvent,
|
|
28
|
+
Subscription,
|
|
29
|
+
Tracking,
|
|
30
|
+
TrackingRecorder,
|
|
31
|
+
)
|
|
32
|
+
from eventsourcing.utils import Environment, EnvType, resolve_topic, strtobool
|
|
33
|
+
|
|
34
|
+
if TYPE_CHECKING:
|
|
35
|
+
from collections.abc import Iterator, Sequence
|
|
36
|
+
from types import TracebackType
|
|
37
|
+
|
|
38
|
+
SQLITE3_DEFAULT_LOCK_TIMEOUT = 5
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
class SQLiteCursor(Cursor):
|
|
42
|
+
def __init__(self, sqlite_cursor: sqlite3.Cursor):
|
|
43
|
+
self.sqlite_cursor = sqlite_cursor
|
|
44
|
+
|
|
45
|
+
def __enter__(self) -> sqlite3.Cursor:
|
|
46
|
+
return self.sqlite_cursor
|
|
47
|
+
|
|
48
|
+
def __exit__(self, *args: object, **kwargs: Any) -> None:
|
|
49
|
+
self.sqlite_cursor.close()
|
|
50
|
+
|
|
51
|
+
def execute(self, *args: Any, **kwargs: Any) -> None:
|
|
52
|
+
self.sqlite_cursor.execute(*args, **kwargs)
|
|
53
|
+
|
|
54
|
+
def executemany(self, *args: Any, **kwargs: Any) -> None:
|
|
55
|
+
self.sqlite_cursor.executemany(*args, **kwargs)
|
|
56
|
+
|
|
57
|
+
def fetchall(self) -> Any:
|
|
58
|
+
return self.sqlite_cursor.fetchall()
|
|
59
|
+
|
|
60
|
+
def fetchone(self) -> Any:
|
|
61
|
+
return self.sqlite_cursor.fetchone()
|
|
62
|
+
|
|
63
|
+
@property
|
|
64
|
+
def lastrowid(self) -> Any:
|
|
65
|
+
return self.sqlite_cursor.lastrowid
|
|
66
|
+
|
|
67
|
+
|
|
68
|
+
class SQLiteConnection(Connection[SQLiteCursor]):
|
|
69
|
+
def __init__(self, sqlite_conn: sqlite3.Connection, max_age: float | None):
|
|
70
|
+
super().__init__(max_age=max_age)
|
|
71
|
+
self._sqlite_conn = sqlite_conn
|
|
72
|
+
|
|
73
|
+
@contextmanager
|
|
74
|
+
def transaction(self, *, commit: bool) -> Iterator[SQLiteCursor]:
|
|
75
|
+
# Context managed cursor, and context managed transaction.
|
|
76
|
+
with SQLiteTransaction(self, commit=commit) as curs, curs:
|
|
77
|
+
yield curs
|
|
78
|
+
|
|
79
|
+
def cursor(self) -> SQLiteCursor:
|
|
80
|
+
return SQLiteCursor(self._sqlite_conn.cursor())
|
|
81
|
+
|
|
82
|
+
def rollback(self) -> None:
|
|
83
|
+
self._sqlite_conn.rollback()
|
|
84
|
+
|
|
85
|
+
def commit(self) -> None:
|
|
86
|
+
self._sqlite_conn.commit()
|
|
87
|
+
|
|
88
|
+
def _close(self) -> None:
|
|
89
|
+
self._sqlite_conn.close()
|
|
90
|
+
super()._close()
|
|
91
|
+
|
|
92
|
+
|
|
93
|
+
class SQLiteTransaction:
|
|
94
|
+
def __init__(self, connection: SQLiteConnection, *, commit: bool = False):
|
|
95
|
+
self.connection = connection
|
|
96
|
+
self.commit = commit
|
|
97
|
+
|
|
98
|
+
def __enter__(self) -> SQLiteCursor:
|
|
99
|
+
# We must issue a "BEGIN" explicitly
|
|
100
|
+
# when running in auto-commit mode.
|
|
101
|
+
cursor = self.connection.cursor()
|
|
102
|
+
cursor.execute("BEGIN")
|
|
103
|
+
return cursor
|
|
104
|
+
|
|
105
|
+
def __exit__(
|
|
106
|
+
self,
|
|
107
|
+
exc_type: type[BaseException] | None,
|
|
108
|
+
exc_val: BaseException | None,
|
|
109
|
+
exc_tb: TracebackType | None,
|
|
110
|
+
) -> None:
|
|
111
|
+
try:
|
|
112
|
+
if exc_val:
|
|
113
|
+
# Roll back all changes
|
|
114
|
+
# if an exception occurs.
|
|
115
|
+
self.connection.rollback()
|
|
116
|
+
raise exc_val
|
|
117
|
+
if not self.commit:
|
|
118
|
+
self.connection.rollback()
|
|
119
|
+
else:
|
|
120
|
+
self.connection.commit()
|
|
121
|
+
except sqlite3.InterfaceError as e:
|
|
122
|
+
raise InterfaceError(e) from e
|
|
123
|
+
except sqlite3.DataError as e:
|
|
124
|
+
raise DataError(e) from e
|
|
125
|
+
except sqlite3.OperationalError as e:
|
|
126
|
+
raise OperationalError(e) from e
|
|
127
|
+
except sqlite3.IntegrityError as e:
|
|
128
|
+
raise IntegrityError(e) from e
|
|
129
|
+
except sqlite3.InternalError as e:
|
|
130
|
+
raise InternalError(e) from e
|
|
131
|
+
except sqlite3.ProgrammingError as e:
|
|
132
|
+
raise ProgrammingError(e) from e
|
|
133
|
+
except sqlite3.NotSupportedError as e:
|
|
134
|
+
raise NotSupportedError(e) from e
|
|
135
|
+
except sqlite3.DatabaseError as e:
|
|
136
|
+
raise DatabaseError(e) from e
|
|
137
|
+
except sqlite3.Error as e:
|
|
138
|
+
raise PersistenceError(e) from e
|
|
139
|
+
|
|
140
|
+
|
|
141
|
+
class SQLiteConnectionPool(ConnectionPool[SQLiteConnection]):
|
|
142
|
+
def __init__(
|
|
143
|
+
self,
|
|
144
|
+
*,
|
|
145
|
+
db_name: str,
|
|
146
|
+
lock_timeout: int | None = None,
|
|
147
|
+
pool_size: int = 5,
|
|
148
|
+
max_overflow: int = 10,
|
|
149
|
+
pool_timeout: float = 5.0,
|
|
150
|
+
max_age: float | None = None,
|
|
151
|
+
pre_ping: bool = False,
|
|
152
|
+
):
|
|
153
|
+
self.db_name = db_name
|
|
154
|
+
self.lock_timeout = lock_timeout
|
|
155
|
+
self.is_sqlite_memory_mode = self.detect_memory_mode(db_name)
|
|
156
|
+
self.is_journal_mode_wal = False
|
|
157
|
+
self.journal_mode_was_changed_to_wal = False
|
|
158
|
+
super().__init__(
|
|
159
|
+
pool_size=pool_size,
|
|
160
|
+
max_overflow=max_overflow,
|
|
161
|
+
pool_timeout=pool_timeout,
|
|
162
|
+
max_age=max_age,
|
|
163
|
+
pre_ping=pre_ping,
|
|
164
|
+
mutually_exclusive_read_write=self.is_sqlite_memory_mode,
|
|
165
|
+
)
|
|
166
|
+
|
|
167
|
+
@staticmethod
|
|
168
|
+
def detect_memory_mode(db_name: str) -> bool:
|
|
169
|
+
return bool(db_name) and (":memory:" in db_name or "mode=memory" in db_name)
|
|
170
|
+
|
|
171
|
+
def _create_connection(self) -> SQLiteConnection:
|
|
172
|
+
# Make a connection to an SQLite database.
|
|
173
|
+
try:
|
|
174
|
+
c = sqlite3.connect(
|
|
175
|
+
database=self.db_name,
|
|
176
|
+
uri=True,
|
|
177
|
+
check_same_thread=False,
|
|
178
|
+
isolation_level=None, # Auto-commit mode.
|
|
179
|
+
cached_statements=True,
|
|
180
|
+
timeout=self.lock_timeout or SQLITE3_DEFAULT_LOCK_TIMEOUT,
|
|
181
|
+
)
|
|
182
|
+
except (sqlite3.Error, TypeError) as e:
|
|
183
|
+
raise InterfaceError(e) from e
|
|
184
|
+
|
|
185
|
+
# Use WAL (write-ahead log) mode if file-based database.
|
|
186
|
+
if not self.is_sqlite_memory_mode and not self.is_journal_mode_wal:
|
|
187
|
+
cursor = c.cursor()
|
|
188
|
+
cursor.execute("PRAGMA journal_mode;")
|
|
189
|
+
mode = cursor.fetchone()[0]
|
|
190
|
+
if mode.lower() == "wal":
|
|
191
|
+
self.is_journal_mode_wal = True
|
|
192
|
+
else:
|
|
193
|
+
cursor.execute("PRAGMA journal_mode=WAL;")
|
|
194
|
+
self.is_journal_mode_wal = True
|
|
195
|
+
self.journal_mode_was_changed_to_wal = True
|
|
196
|
+
|
|
197
|
+
# Set the row factory.
|
|
198
|
+
c.row_factory = sqlite3.Row
|
|
199
|
+
|
|
200
|
+
# Return the connection.
|
|
201
|
+
return SQLiteConnection(sqlite_conn=c, max_age=self.max_age)
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
class SQLiteDatastore:
|
|
205
|
+
def __init__(
|
|
206
|
+
self,
|
|
207
|
+
db_name: str,
|
|
208
|
+
*,
|
|
209
|
+
lock_timeout: int | None = None,
|
|
210
|
+
pool_size: int = 5,
|
|
211
|
+
max_overflow: int = 10,
|
|
212
|
+
pool_timeout: float = 5.0,
|
|
213
|
+
max_age: float | None = None,
|
|
214
|
+
pre_ping: bool = False,
|
|
215
|
+
single_row_tracking: bool = True,
|
|
216
|
+
originator_id_type: Literal["uuid", "text"] = "uuid",
|
|
217
|
+
):
|
|
218
|
+
self.pool = SQLiteConnectionPool(
|
|
219
|
+
db_name=db_name,
|
|
220
|
+
lock_timeout=lock_timeout,
|
|
221
|
+
pool_size=pool_size,
|
|
222
|
+
max_overflow=max_overflow,
|
|
223
|
+
pool_timeout=pool_timeout,
|
|
224
|
+
max_age=max_age,
|
|
225
|
+
pre_ping=pre_ping,
|
|
226
|
+
)
|
|
227
|
+
self.single_row_tracking = single_row_tracking
|
|
228
|
+
self.originator_id_type = originator_id_type
|
|
229
|
+
|
|
230
|
+
@contextmanager
|
|
231
|
+
def transaction(self, *, commit: bool) -> Iterator[SQLiteCursor]:
|
|
232
|
+
connection = self.get_connection(commit=commit)
|
|
233
|
+
with connection as conn, conn.transaction(commit=commit) as curs:
|
|
234
|
+
yield curs
|
|
235
|
+
|
|
236
|
+
@contextmanager
|
|
237
|
+
def get_connection(self, *, commit: bool) -> Iterator[SQLiteConnection]:
|
|
238
|
+
# Using reader-writer interlocking is necessary for in-memory databases,
|
|
239
|
+
# but also speeds up (and provides "fairness") to file-based databases.
|
|
240
|
+
conn = self.pool.get_connection(is_writer=commit)
|
|
241
|
+
try:
|
|
242
|
+
yield conn
|
|
243
|
+
finally:
|
|
244
|
+
self.pool.put_connection(conn)
|
|
245
|
+
|
|
246
|
+
def close(self) -> None:
|
|
247
|
+
self.pool.close()
|
|
248
|
+
|
|
249
|
+
def __del__(self) -> None:
|
|
250
|
+
self.close()
|
|
251
|
+
|
|
252
|
+
|
|
253
|
+
class SQLiteRecorder(Recorder):
|
|
254
|
+
def __init__(
|
|
255
|
+
self,
|
|
256
|
+
datastore: SQLiteDatastore,
|
|
257
|
+
):
|
|
258
|
+
assert isinstance(datastore, SQLiteDatastore)
|
|
259
|
+
self.datastore = datastore
|
|
260
|
+
self.create_table_statements = self.construct_create_table_statements()
|
|
261
|
+
|
|
262
|
+
def construct_create_table_statements(self) -> list[str]:
|
|
263
|
+
return []
|
|
264
|
+
|
|
265
|
+
def create_table(self) -> None:
|
|
266
|
+
with self.datastore.transaction(commit=True) as c:
|
|
267
|
+
self._create_table(c)
|
|
268
|
+
|
|
269
|
+
def _create_table(self, c: SQLiteCursor) -> None:
|
|
270
|
+
for statement in self.create_table_statements:
|
|
271
|
+
c.execute(statement)
|
|
272
|
+
|
|
273
|
+
def convert_originator_id(self, originator_id: str) -> UUID | str:
|
|
274
|
+
return (
|
|
275
|
+
UUID(originator_id)
|
|
276
|
+
if self.datastore.originator_id_type == "uuid"
|
|
277
|
+
else originator_id
|
|
278
|
+
)
|
|
279
|
+
|
|
280
|
+
|
|
281
|
+
class SQLiteAggregateRecorder(SQLiteRecorder, AggregateRecorder):
|
|
282
|
+
def __init__(
|
|
283
|
+
self,
|
|
284
|
+
datastore: SQLiteDatastore,
|
|
285
|
+
events_table_name: str = "stored_events",
|
|
286
|
+
):
|
|
287
|
+
self.events_table_name = events_table_name
|
|
288
|
+
super().__init__(datastore)
|
|
289
|
+
self.insert_events_statement = (
|
|
290
|
+
f"INSERT INTO {self.events_table_name} VALUES (?,?,?,?)"
|
|
291
|
+
)
|
|
292
|
+
self.select_events_statement = (
|
|
293
|
+
f"SELECT * FROM {self.events_table_name} WHERE originator_id=? "
|
|
294
|
+
)
|
|
295
|
+
|
|
296
|
+
def construct_create_table_statements(self) -> list[str]:
|
|
297
|
+
statements = super().construct_create_table_statements()
|
|
298
|
+
statements.append(
|
|
299
|
+
"CREATE TABLE IF NOT EXISTS "
|
|
300
|
+
f"{self.events_table_name} ("
|
|
301
|
+
"originator_id TEXT, "
|
|
302
|
+
"originator_version INTEGER, "
|
|
303
|
+
"topic TEXT, "
|
|
304
|
+
"state BLOB, "
|
|
305
|
+
"PRIMARY KEY "
|
|
306
|
+
"(originator_id, originator_version)) "
|
|
307
|
+
"WITHOUT ROWID"
|
|
308
|
+
)
|
|
309
|
+
return statements
|
|
310
|
+
|
|
311
|
+
def insert_events(
|
|
312
|
+
self, stored_events: Sequence[StoredEvent], **kwargs: Any
|
|
313
|
+
) -> Sequence[int] | None:
|
|
314
|
+
with self.datastore.transaction(commit=True) as c:
|
|
315
|
+
return self._insert_events(c, stored_events, **kwargs)
|
|
316
|
+
|
|
317
|
+
def _insert_events(
|
|
318
|
+
self,
|
|
319
|
+
c: SQLiteCursor,
|
|
320
|
+
stored_events: Sequence[StoredEvent],
|
|
321
|
+
**_: Any,
|
|
322
|
+
) -> Sequence[int] | None:
|
|
323
|
+
params = [
|
|
324
|
+
(
|
|
325
|
+
(
|
|
326
|
+
s.originator_id.hex
|
|
327
|
+
if isinstance(s.originator_id, UUID)
|
|
328
|
+
else s.originator_id
|
|
329
|
+
),
|
|
330
|
+
s.originator_version,
|
|
331
|
+
s.topic,
|
|
332
|
+
s.state,
|
|
333
|
+
)
|
|
334
|
+
for s in stored_events
|
|
335
|
+
]
|
|
336
|
+
c.executemany(self.insert_events_statement, params)
|
|
337
|
+
return None
|
|
338
|
+
|
|
339
|
+
def select_events(
|
|
340
|
+
self,
|
|
341
|
+
originator_id: UUID | str,
|
|
342
|
+
*,
|
|
343
|
+
gt: int | None = None,
|
|
344
|
+
lte: int | None = None,
|
|
345
|
+
desc: bool = False,
|
|
346
|
+
limit: int | None = None,
|
|
347
|
+
) -> Sequence[StoredEvent]:
|
|
348
|
+
statement = self.select_events_statement
|
|
349
|
+
params: list[Any] = [
|
|
350
|
+
originator_id.hex if isinstance(originator_id, UUID) else originator_id
|
|
351
|
+
]
|
|
352
|
+
if gt is not None:
|
|
353
|
+
statement += "AND originator_version>? "
|
|
354
|
+
params.append(gt)
|
|
355
|
+
if lte is not None:
|
|
356
|
+
statement += "AND originator_version<=? "
|
|
357
|
+
params.append(lte)
|
|
358
|
+
statement += "ORDER BY originator_version "
|
|
359
|
+
if desc is False:
|
|
360
|
+
statement += "ASC "
|
|
361
|
+
else:
|
|
362
|
+
statement += "DESC "
|
|
363
|
+
if limit is not None:
|
|
364
|
+
statement += "LIMIT ? "
|
|
365
|
+
params.append(limit)
|
|
366
|
+
with self.datastore.transaction(commit=False) as c:
|
|
367
|
+
c.execute(statement, params)
|
|
368
|
+
return [
|
|
369
|
+
StoredEvent(
|
|
370
|
+
originator_id=self.convert_originator_id(row["originator_id"]),
|
|
371
|
+
originator_version=row["originator_version"],
|
|
372
|
+
topic=row["topic"],
|
|
373
|
+
state=row["state"],
|
|
374
|
+
)
|
|
375
|
+
for row in c.fetchall()
|
|
376
|
+
]
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class SQLiteApplicationRecorder(
|
|
380
|
+
SQLiteAggregateRecorder,
|
|
381
|
+
ApplicationRecorder,
|
|
382
|
+
):
|
|
383
|
+
def __init__(
|
|
384
|
+
self,
|
|
385
|
+
datastore: SQLiteDatastore,
|
|
386
|
+
events_table_name: str = "stored_events",
|
|
387
|
+
):
|
|
388
|
+
super().__init__(datastore, events_table_name)
|
|
389
|
+
self.select_max_notification_id_statement = (
|
|
390
|
+
f"SELECT MAX(rowid) FROM {self.events_table_name}"
|
|
391
|
+
)
|
|
392
|
+
|
|
393
|
+
def construct_create_table_statements(self) -> list[str]:
|
|
394
|
+
statement = (
|
|
395
|
+
"CREATE TABLE IF NOT EXISTS "
|
|
396
|
+
f"{self.events_table_name} ("
|
|
397
|
+
"originator_id TEXT, "
|
|
398
|
+
"originator_version INTEGER, "
|
|
399
|
+
"topic TEXT, "
|
|
400
|
+
"state BLOB, "
|
|
401
|
+
"PRIMARY KEY "
|
|
402
|
+
"(originator_id, originator_version))"
|
|
403
|
+
)
|
|
404
|
+
return [statement]
|
|
405
|
+
|
|
406
|
+
def _insert_events(
|
|
407
|
+
self,
|
|
408
|
+
c: SQLiteCursor,
|
|
409
|
+
stored_events: Sequence[StoredEvent],
|
|
410
|
+
**_: Any,
|
|
411
|
+
) -> Sequence[int] | None:
|
|
412
|
+
returning = []
|
|
413
|
+
for s in stored_events:
|
|
414
|
+
c.execute(
|
|
415
|
+
self.insert_events_statement,
|
|
416
|
+
(
|
|
417
|
+
(
|
|
418
|
+
s.originator_id.hex
|
|
419
|
+
if isinstance(s.originator_id, UUID)
|
|
420
|
+
else s.originator_id
|
|
421
|
+
),
|
|
422
|
+
s.originator_version,
|
|
423
|
+
s.topic,
|
|
424
|
+
s.state,
|
|
425
|
+
),
|
|
426
|
+
)
|
|
427
|
+
returning.append(c.lastrowid)
|
|
428
|
+
return returning
|
|
429
|
+
|
|
430
|
+
def select_notifications(
|
|
431
|
+
self,
|
|
432
|
+
start: int | None,
|
|
433
|
+
limit: int,
|
|
434
|
+
stop: int | None = None,
|
|
435
|
+
topics: Sequence[str] = (),
|
|
436
|
+
*,
|
|
437
|
+
inclusive_of_start: bool = True,
|
|
438
|
+
) -> Sequence[Notification]:
|
|
439
|
+
"""Returns a list of event notifications
|
|
440
|
+
from 'start', limited by 'limit'.
|
|
441
|
+
"""
|
|
442
|
+
params: list[int | str] = []
|
|
443
|
+
statement = f"SELECT rowid, * FROM {self.events_table_name} "
|
|
444
|
+
has_where = False
|
|
445
|
+
if start is not None:
|
|
446
|
+
has_where = True
|
|
447
|
+
statement += "WHERE "
|
|
448
|
+
params.append(start)
|
|
449
|
+
if inclusive_of_start:
|
|
450
|
+
statement += "rowid>=? "
|
|
451
|
+
else:
|
|
452
|
+
statement += "rowid>? "
|
|
453
|
+
|
|
454
|
+
if stop is not None:
|
|
455
|
+
if not has_where:
|
|
456
|
+
has_where = True
|
|
457
|
+
statement += "WHERE "
|
|
458
|
+
else:
|
|
459
|
+
statement += "AND "
|
|
460
|
+
params.append(stop)
|
|
461
|
+
statement += "rowid<=? "
|
|
462
|
+
|
|
463
|
+
if topics:
|
|
464
|
+
if not has_where:
|
|
465
|
+
statement += "WHERE "
|
|
466
|
+
else:
|
|
467
|
+
statement += "AND "
|
|
468
|
+
params += list(topics)
|
|
469
|
+
statement += f"topic IN ({','.join('?' * len(topics))}) "
|
|
470
|
+
|
|
471
|
+
params.append(limit)
|
|
472
|
+
statement += "ORDER BY rowid LIMIT ?"
|
|
473
|
+
|
|
474
|
+
with self.datastore.transaction(commit=False) as c:
|
|
475
|
+
c.execute(statement, params)
|
|
476
|
+
return [
|
|
477
|
+
Notification(
|
|
478
|
+
id=row["rowid"],
|
|
479
|
+
originator_id=self.convert_originator_id(row["originator_id"]),
|
|
480
|
+
originator_version=row["originator_version"],
|
|
481
|
+
topic=row["topic"],
|
|
482
|
+
state=row["state"],
|
|
483
|
+
)
|
|
484
|
+
for row in c.fetchall()
|
|
485
|
+
]
|
|
486
|
+
|
|
487
|
+
def max_notification_id(self) -> int:
|
|
488
|
+
"""Returns the maximum notification ID."""
|
|
489
|
+
with self.datastore.transaction(commit=False) as c:
|
|
490
|
+
return self._max_notification_id(c)
|
|
491
|
+
|
|
492
|
+
def _max_notification_id(self, c: SQLiteCursor) -> int:
|
|
493
|
+
c.execute(self.select_max_notification_id_statement)
|
|
494
|
+
return c.fetchone()[0]
|
|
495
|
+
|
|
496
|
+
def subscribe(
|
|
497
|
+
self, gt: int | None = None, topics: Sequence[str] = ()
|
|
498
|
+
) -> Subscription[ApplicationRecorder]:
|
|
499
|
+
"""This method is not implemented on this class."""
|
|
500
|
+
msg = f"The {type(self).__qualname__} recorder does not support subscriptions"
|
|
501
|
+
raise NotImplementedError(msg)
|
|
502
|
+
|
|
503
|
+
|
|
504
|
+
class SQLiteTrackingRecorder(SQLiteRecorder, TrackingRecorder):
|
|
505
|
+
def __init__(
|
|
506
|
+
self,
|
|
507
|
+
datastore: SQLiteDatastore,
|
|
508
|
+
**kwargs: Any,
|
|
509
|
+
):
|
|
510
|
+
super().__init__(datastore, **kwargs)
|
|
511
|
+
self.tracking_table_exists: bool = False
|
|
512
|
+
self.tracking_migration_previous: int | None = None
|
|
513
|
+
self.tracking_migration_current: int | None = None
|
|
514
|
+
self.table_migration_identifier = "__migration__"
|
|
515
|
+
self.has_checked_for_multi_row_tracking_table: bool = False
|
|
516
|
+
if self.datastore.single_row_tracking:
|
|
517
|
+
self.insert_tracking_statement = (
|
|
518
|
+
"INSERT INTO tracking "
|
|
519
|
+
"VALUES (:application_name, :notification_id) "
|
|
520
|
+
"ON CONFLICT (application_name) DO UPDATE "
|
|
521
|
+
"SET notification_id = :notification_id "
|
|
522
|
+
"WHERE tracking.notification_id < :notification_id "
|
|
523
|
+
"RETURNING notification_id"
|
|
524
|
+
)
|
|
525
|
+
else:
|
|
526
|
+
self.insert_tracking_statement = (
|
|
527
|
+
"INSERT INTO tracking VALUES (:application_name, :notification_id)"
|
|
528
|
+
)
|
|
529
|
+
self.select_max_tracking_id_statement = (
|
|
530
|
+
"SELECT MAX(notification_id) FROM tracking WHERE application_name=?"
|
|
531
|
+
)
|
|
532
|
+
|
|
533
|
+
def construct_create_table_statements(self) -> list[str]:
|
|
534
|
+
statements = super().construct_create_table_statements()
|
|
535
|
+
if self.datastore.single_row_tracking:
|
|
536
|
+
statements.append(
|
|
537
|
+
"CREATE TABLE IF NOT EXISTS tracking ("
|
|
538
|
+
"application_name TEXT, "
|
|
539
|
+
"notification_id INTEGER, "
|
|
540
|
+
"PRIMARY KEY "
|
|
541
|
+
"(application_name)) "
|
|
542
|
+
"WITHOUT ROWID"
|
|
543
|
+
)
|
|
544
|
+
else:
|
|
545
|
+
statements.append(
|
|
546
|
+
"CREATE TABLE IF NOT EXISTS tracking ("
|
|
547
|
+
"application_name TEXT, "
|
|
548
|
+
"notification_id INTEGER, "
|
|
549
|
+
"PRIMARY KEY "
|
|
550
|
+
"(application_name, notification_id)) "
|
|
551
|
+
"WITHOUT ROWID"
|
|
552
|
+
)
|
|
553
|
+
return statements
|
|
554
|
+
|
|
555
|
+
def create_table(self) -> None:
|
|
556
|
+
# Get the migration version.
|
|
557
|
+
try:
|
|
558
|
+
self.tracking_migration_current = self.tracking_migration_previous = (
|
|
559
|
+
self.max_tracking_id(self.table_migration_identifier)
|
|
560
|
+
)
|
|
561
|
+
except OperationalError:
|
|
562
|
+
pass
|
|
563
|
+
else:
|
|
564
|
+
self.tracking_table_exists = True
|
|
565
|
+
super().create_table()
|
|
566
|
+
if (
|
|
567
|
+
not self.datastore.single_row_tracking
|
|
568
|
+
and self.tracking_migration_current is not None
|
|
569
|
+
):
|
|
570
|
+
msg = "Can't do multi-row tracking with single-row tracking table"
|
|
571
|
+
raise OperationalError(msg)
|
|
572
|
+
|
|
573
|
+
def _create_table(self, c: SQLiteCursor) -> None:
|
|
574
|
+
max_tracking_ids: dict[str, int] = {}
|
|
575
|
+
if (
|
|
576
|
+
self.datastore.single_row_tracking
|
|
577
|
+
and self.tracking_table_exists
|
|
578
|
+
and not self.tracking_migration_previous
|
|
579
|
+
):
|
|
580
|
+
# Migrate tracking to use single-row per application name.
|
|
581
|
+
# - Get all application names.
|
|
582
|
+
c.execute("SELECT DISTINCT application_name FROM tracking")
|
|
583
|
+
application_names: list[str] = [
|
|
584
|
+
select_row["application_name"] for select_row in c.fetchall()
|
|
585
|
+
]
|
|
586
|
+
|
|
587
|
+
# - Get max tracking ID for each application name.
|
|
588
|
+
for application_name in application_names:
|
|
589
|
+
c.execute(self.select_max_tracking_id_statement, (application_name,))
|
|
590
|
+
max_tracking_id_row = c.fetchone()
|
|
591
|
+
assert max_tracking_id_row is not None
|
|
592
|
+
max_tracking_ids[application_name] = max_tracking_id_row[0]
|
|
593
|
+
# - Rename the table.
|
|
594
|
+
drop_table_statement = "ALTER TABLE tracking RENAME TO old1_tracking"
|
|
595
|
+
c.execute(drop_table_statement)
|
|
596
|
+
# Create the table.
|
|
597
|
+
super()._create_table(c)
|
|
598
|
+
# - Maybe insert migration tracking record and application tracking records.
|
|
599
|
+
if self.datastore.single_row_tracking and (
|
|
600
|
+
not self.tracking_table_exists
|
|
601
|
+
or (self.tracking_table_exists and not self.tracking_migration_previous)
|
|
602
|
+
):
|
|
603
|
+
# - Assume we just created a table for single-row tracking.
|
|
604
|
+
self._insert_tracking(c, Tracking(self.table_migration_identifier, 1))
|
|
605
|
+
self.tracking_migration_current = 1
|
|
606
|
+
for application_name, max_tracking_id in max_tracking_ids.items():
|
|
607
|
+
self._insert_tracking(c, Tracking(application_name, max_tracking_id))
|
|
608
|
+
|
|
609
|
+
def insert_tracking(self, tracking: Tracking) -> None:
|
|
610
|
+
with self.datastore.transaction(commit=True) as c:
|
|
611
|
+
self._insert_tracking(c, tracking)
|
|
612
|
+
|
|
613
|
+
def _insert_tracking(
|
|
614
|
+
self,
|
|
615
|
+
c: SQLiteCursor,
|
|
616
|
+
tracking: Tracking,
|
|
617
|
+
) -> None:
|
|
618
|
+
self._check_has_multi_row_tracking_table(c)
|
|
619
|
+
|
|
620
|
+
c.execute(
|
|
621
|
+
self.insert_tracking_statement,
|
|
622
|
+
{
|
|
623
|
+
"application_name": tracking.application_name,
|
|
624
|
+
"notification_id": tracking.notification_id,
|
|
625
|
+
},
|
|
626
|
+
)
|
|
627
|
+
if self.datastore.single_row_tracking:
|
|
628
|
+
fetchone = c.fetchone()
|
|
629
|
+
if fetchone is None:
|
|
630
|
+
msg = (
|
|
631
|
+
"Failed to record tracking for "
|
|
632
|
+
f"{tracking.application_name} {tracking.notification_id}"
|
|
633
|
+
)
|
|
634
|
+
raise IntegrityError(msg)
|
|
635
|
+
|
|
636
|
+
def _check_has_multi_row_tracking_table(self, c: SQLiteCursor) -> None:
|
|
637
|
+
if (
|
|
638
|
+
not self.datastore.single_row_tracking
|
|
639
|
+
and not self.has_checked_for_multi_row_tracking_table
|
|
640
|
+
and self._max_tracking_id(self.table_migration_identifier, c)
|
|
641
|
+
):
|
|
642
|
+
msg = "Can't do multi-row tracking with single-row tracking table"
|
|
643
|
+
raise OperationalError(msg)
|
|
644
|
+
self.has_checked_for_multi_row_tracking_table = True
|
|
645
|
+
|
|
646
|
+
def max_tracking_id(self, application_name: str) -> int | None:
|
|
647
|
+
with self.datastore.transaction(commit=False) as c:
|
|
648
|
+
return self._max_tracking_id(application_name, c)
|
|
649
|
+
|
|
650
|
+
def _max_tracking_id(self, application_name: str, c: SQLiteCursor) -> int | None:
|
|
651
|
+
params = [application_name]
|
|
652
|
+
c.execute(self.select_max_tracking_id_statement, params)
|
|
653
|
+
return c.fetchone()[0]
|
|
654
|
+
|
|
655
|
+
|
|
656
|
+
class SQLiteProcessRecorder(
|
|
657
|
+
SQLiteTrackingRecorder,
|
|
658
|
+
SQLiteApplicationRecorder,
|
|
659
|
+
ProcessRecorder,
|
|
660
|
+
):
|
|
661
|
+
def __init__(
|
|
662
|
+
self,
|
|
663
|
+
datastore: SQLiteDatastore,
|
|
664
|
+
*,
|
|
665
|
+
events_table_name: str = "stored_events",
|
|
666
|
+
):
|
|
667
|
+
super().__init__(datastore, events_table_name=events_table_name)
|
|
668
|
+
|
|
669
|
+
def _insert_events(
|
|
670
|
+
self,
|
|
671
|
+
c: SQLiteCursor,
|
|
672
|
+
stored_events: Sequence[StoredEvent],
|
|
673
|
+
**kwargs: Any,
|
|
674
|
+
) -> Sequence[int] | None:
|
|
675
|
+
returning = super()._insert_events(c, stored_events, **kwargs)
|
|
676
|
+
tracking: Tracking | None = kwargs.get("tracking")
|
|
677
|
+
if tracking is not None:
|
|
678
|
+
self._insert_tracking(c, tracking)
|
|
679
|
+
return returning
|
|
680
|
+
|
|
681
|
+
|
|
682
|
+
class SQLiteFactory(InfrastructureFactory[SQLiteTrackingRecorder]):
|
|
683
|
+
SQLITE_DBNAME = "SQLITE_DBNAME"
|
|
684
|
+
SQLITE_LOCK_TIMEOUT = "SQLITE_LOCK_TIMEOUT"
|
|
685
|
+
SQLITE_SINGLE_ROW_TRACKING = "SINGLE_ROW_TRACKING"
|
|
686
|
+
ORIGINATOR_ID_TYPE = "ORIGINATOR_ID_TYPE"
|
|
687
|
+
CREATE_TABLE = "CREATE_TABLE"
|
|
688
|
+
|
|
689
|
+
aggregate_recorder_class = SQLiteAggregateRecorder
|
|
690
|
+
application_recorder_class = SQLiteApplicationRecorder
|
|
691
|
+
tracking_recorder_class = SQLiteTrackingRecorder
|
|
692
|
+
process_recorder_class = SQLiteProcessRecorder
|
|
693
|
+
|
|
694
|
+
def __init__(self, env: Environment | EnvType | None):
|
|
695
|
+
super().__init__(env)
|
|
696
|
+
db_name = self.env.get(self.SQLITE_DBNAME)
|
|
697
|
+
if not db_name:
|
|
698
|
+
msg = (
|
|
699
|
+
"SQLite database name not found "
|
|
700
|
+
"in environment with keys: "
|
|
701
|
+
f"{', '.join(self.env.create_keys(self.SQLITE_DBNAME))}"
|
|
702
|
+
)
|
|
703
|
+
raise OSError(msg)
|
|
704
|
+
|
|
705
|
+
lock_timeout_str = (
|
|
706
|
+
self.env.get(self.SQLITE_LOCK_TIMEOUT) or ""
|
|
707
|
+
).strip() or None
|
|
708
|
+
|
|
709
|
+
lock_timeout: int | None = None
|
|
710
|
+
if lock_timeout_str is not None:
|
|
711
|
+
try:
|
|
712
|
+
lock_timeout = int(lock_timeout_str)
|
|
713
|
+
except ValueError:
|
|
714
|
+
msg = (
|
|
715
|
+
"SQLite environment value for key "
|
|
716
|
+
f"'{self.SQLITE_LOCK_TIMEOUT}' is invalid. "
|
|
717
|
+
"If set, an int or empty string is expected: "
|
|
718
|
+
f"'{lock_timeout_str}'"
|
|
719
|
+
)
|
|
720
|
+
raise OSError(msg) from None
|
|
721
|
+
|
|
722
|
+
single_row_tracking = strtobool(
|
|
723
|
+
self.env.get(self.SQLITE_SINGLE_ROW_TRACKING, "t")
|
|
724
|
+
)
|
|
725
|
+
|
|
726
|
+
originator_id_type = cast(
|
|
727
|
+
Literal["uuid", "text"],
|
|
728
|
+
self.env.get(self.ORIGINATOR_ID_TYPE, "uuid"),
|
|
729
|
+
)
|
|
730
|
+
if originator_id_type.lower() not in ("uuid", "text"):
|
|
731
|
+
msg = (
|
|
732
|
+
f"Invalid {self.ORIGINATOR_ID_TYPE} '{originator_id_type}', "
|
|
733
|
+
f"must be 'uuid' or 'text'"
|
|
734
|
+
)
|
|
735
|
+
raise OSError(msg)
|
|
736
|
+
|
|
737
|
+
self.datastore = SQLiteDatastore(
|
|
738
|
+
db_name=db_name,
|
|
739
|
+
lock_timeout=lock_timeout,
|
|
740
|
+
single_row_tracking=single_row_tracking,
|
|
741
|
+
originator_id_type=originator_id_type,
|
|
742
|
+
)
|
|
743
|
+
|
|
744
|
+
def aggregate_recorder(self, purpose: str = "events") -> AggregateRecorder:
|
|
745
|
+
events_table_name = "stored_" + purpose
|
|
746
|
+
recorder = self.aggregate_recorder_class(
|
|
747
|
+
datastore=self.datastore,
|
|
748
|
+
events_table_name=events_table_name,
|
|
749
|
+
)
|
|
750
|
+
if self.env_create_table():
|
|
751
|
+
recorder.create_table()
|
|
752
|
+
return recorder
|
|
753
|
+
|
|
754
|
+
def application_recorder(self) -> ApplicationRecorder:
|
|
755
|
+
application_recorder_topic = self.env.get(self.APPLICATION_RECORDER_TOPIC)
|
|
756
|
+
|
|
757
|
+
if application_recorder_topic:
|
|
758
|
+
application_recorder_class: type[SQLiteApplicationRecorder] = resolve_topic(
|
|
759
|
+
application_recorder_topic
|
|
760
|
+
)
|
|
761
|
+
assert issubclass(application_recorder_class, SQLiteApplicationRecorder)
|
|
762
|
+
else:
|
|
763
|
+
application_recorder_class = self.application_recorder_class
|
|
764
|
+
|
|
765
|
+
recorder = application_recorder_class(datastore=self.datastore)
|
|
766
|
+
|
|
767
|
+
if self.env_create_table():
|
|
768
|
+
recorder.create_table()
|
|
769
|
+
return recorder
|
|
770
|
+
|
|
771
|
+
def tracking_recorder(
|
|
772
|
+
self, tracking_recorder_class: type[SQLiteTrackingRecorder] | None = None
|
|
773
|
+
) -> SQLiteTrackingRecorder:
|
|
774
|
+
if tracking_recorder_class is None:
|
|
775
|
+
tracking_recorder_topic = self.env.get(self.TRACKING_RECORDER_TOPIC)
|
|
776
|
+
|
|
777
|
+
if tracking_recorder_topic:
|
|
778
|
+
tracking_recorder_class = resolve_topic(tracking_recorder_topic)
|
|
779
|
+
else:
|
|
780
|
+
tracking_recorder_class = self.tracking_recorder_class
|
|
781
|
+
|
|
782
|
+
assert tracking_recorder_class is not None
|
|
783
|
+
assert issubclass(tracking_recorder_class, SQLiteTrackingRecorder)
|
|
784
|
+
|
|
785
|
+
recorder = tracking_recorder_class(datastore=self.datastore)
|
|
786
|
+
|
|
787
|
+
if self.env_create_table():
|
|
788
|
+
recorder.create_table()
|
|
789
|
+
return recorder
|
|
790
|
+
|
|
791
|
+
def process_recorder(self) -> ProcessRecorder:
|
|
792
|
+
process_recorder_topic = self.env.get(self.PROCESS_RECORDER_TOPIC)
|
|
793
|
+
|
|
794
|
+
if process_recorder_topic:
|
|
795
|
+
process_recorder_class: type[SQLiteProcessRecorder] = resolve_topic(
|
|
796
|
+
process_recorder_topic
|
|
797
|
+
)
|
|
798
|
+
assert issubclass(process_recorder_class, SQLiteProcessRecorder)
|
|
799
|
+
else:
|
|
800
|
+
process_recorder_class = self.process_recorder_class
|
|
801
|
+
|
|
802
|
+
recorder = process_recorder_class(datastore=self.datastore)
|
|
803
|
+
|
|
804
|
+
if self.env_create_table():
|
|
805
|
+
recorder.create_table()
|
|
806
|
+
return recorder
|
|
807
|
+
|
|
808
|
+
def env_create_table(self) -> bool:
|
|
809
|
+
default = "yes"
|
|
810
|
+
return bool(strtobool(self.env.get(self.CREATE_TABLE, default) or default))
|
|
811
|
+
|
|
812
|
+
def close(self) -> None:
|
|
813
|
+
self.datastore.close()
|
|
814
|
+
|
|
815
|
+
|
|
816
|
+
Factory = SQLiteFactory
|