eventsourcing 9.5.0b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eventsourcing/__init__.py +0 -0
- eventsourcing/application.py +998 -0
- eventsourcing/cipher.py +107 -0
- eventsourcing/compressor.py +15 -0
- eventsourcing/cryptography.py +91 -0
- eventsourcing/dcb/__init__.py +0 -0
- eventsourcing/dcb/api.py +144 -0
- eventsourcing/dcb/application.py +159 -0
- eventsourcing/dcb/domain.py +369 -0
- eventsourcing/dcb/msgpack.py +38 -0
- eventsourcing/dcb/persistence.py +193 -0
- eventsourcing/dcb/popo.py +178 -0
- eventsourcing/dcb/postgres_tt.py +704 -0
- eventsourcing/dcb/tests.py +608 -0
- eventsourcing/dispatch.py +80 -0
- eventsourcing/domain.py +1964 -0
- eventsourcing/interface.py +164 -0
- eventsourcing/persistence.py +1429 -0
- eventsourcing/popo.py +267 -0
- eventsourcing/postgres.py +1441 -0
- eventsourcing/projection.py +502 -0
- eventsourcing/py.typed +0 -0
- eventsourcing/sqlite.py +816 -0
- eventsourcing/system.py +1203 -0
- eventsourcing/tests/__init__.py +3 -0
- eventsourcing/tests/application.py +483 -0
- eventsourcing/tests/domain.py +105 -0
- eventsourcing/tests/persistence.py +1744 -0
- eventsourcing/tests/postgres_utils.py +131 -0
- eventsourcing/utils.py +257 -0
- eventsourcing-9.5.0b3.dist-info/METADATA +253 -0
- eventsourcing-9.5.0b3.dist-info/RECORD +35 -0
- eventsourcing-9.5.0b3.dist-info/WHEEL +4 -0
- eventsourcing-9.5.0b3.dist-info/licenses/AUTHORS +10 -0
- eventsourcing-9.5.0b3.dist-info/licenses/LICENSE +29 -0
|
@@ -0,0 +1,998 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import contextlib
|
|
4
|
+
import os
|
|
5
|
+
from abc import ABC, abstractmethod
|
|
6
|
+
from collections.abc import Callable, Iterable, Iterator, Sequence
|
|
7
|
+
from copy import deepcopy
|
|
8
|
+
from dataclasses import dataclass
|
|
9
|
+
from itertools import chain
|
|
10
|
+
from threading import Event, Lock
|
|
11
|
+
from typing import (
|
|
12
|
+
TYPE_CHECKING,
|
|
13
|
+
Any,
|
|
14
|
+
ClassVar,
|
|
15
|
+
Generic,
|
|
16
|
+
TypeVar,
|
|
17
|
+
cast,
|
|
18
|
+
)
|
|
19
|
+
from warnings import warn
|
|
20
|
+
|
|
21
|
+
from eventsourcing.domain import (
|
|
22
|
+
Aggregate,
|
|
23
|
+
BaseAggregate,
|
|
24
|
+
CanMutateProtocol,
|
|
25
|
+
CollectEventsProtocol,
|
|
26
|
+
DomainEventProtocol,
|
|
27
|
+
EventSourcingError,
|
|
28
|
+
MutableOrImmutableAggregate,
|
|
29
|
+
SDomainEvent,
|
|
30
|
+
SnapshotProtocol,
|
|
31
|
+
TAggregateID,
|
|
32
|
+
TDomainEvent,
|
|
33
|
+
TMutableOrImmutableAggregate,
|
|
34
|
+
datetime_now_with_tzinfo,
|
|
35
|
+
)
|
|
36
|
+
from eventsourcing.persistence import (
|
|
37
|
+
ApplicationRecorder,
|
|
38
|
+
DatetimeAsISO,
|
|
39
|
+
DecimalAsStr,
|
|
40
|
+
EventStore,
|
|
41
|
+
InfrastructureFactory,
|
|
42
|
+
JSONTranscoder,
|
|
43
|
+
Mapper,
|
|
44
|
+
Notification,
|
|
45
|
+
Recording,
|
|
46
|
+
Tracking,
|
|
47
|
+
Transcoder,
|
|
48
|
+
UUIDAsHex,
|
|
49
|
+
)
|
|
50
|
+
from eventsourcing.utils import Environment, EnvType, strtobool
|
|
51
|
+
|
|
52
|
+
if TYPE_CHECKING:
|
|
53
|
+
from types import TracebackType
|
|
54
|
+
from uuid import UUID
|
|
55
|
+
|
|
56
|
+
from typing_extensions import Self
|
|
57
|
+
|
|
58
|
+
ProjectorFunction = Callable[
|
|
59
|
+
[TMutableOrImmutableAggregate | None, Iterable[TDomainEvent]],
|
|
60
|
+
TMutableOrImmutableAggregate | None,
|
|
61
|
+
]
|
|
62
|
+
|
|
63
|
+
MutatorFunction = Callable[
|
|
64
|
+
[TDomainEvent, TMutableOrImmutableAggregate | None],
|
|
65
|
+
TMutableOrImmutableAggregate | None,
|
|
66
|
+
]
|
|
67
|
+
|
|
68
|
+
|
|
69
|
+
class ProgrammingError(Exception):
|
|
70
|
+
pass
|
|
71
|
+
|
|
72
|
+
|
|
73
|
+
def project_aggregate(
|
|
74
|
+
aggregate: TMutableOrImmutableAggregate | None,
|
|
75
|
+
domain_events: Iterable[DomainEventProtocol[Any]],
|
|
76
|
+
) -> TMutableOrImmutableAggregate | None:
|
|
77
|
+
"""Projector function for aggregate projections, which works
|
|
78
|
+
by successively calling aggregate mutator function mutate()
|
|
79
|
+
on each of the given list of domain events in turn.
|
|
80
|
+
"""
|
|
81
|
+
for domain_event in domain_events:
|
|
82
|
+
assert isinstance(domain_event, CanMutateProtocol)
|
|
83
|
+
aggregate = domain_event.mutate(aggregate)
|
|
84
|
+
return aggregate
|
|
85
|
+
|
|
86
|
+
|
|
87
|
+
S = TypeVar("S")
|
|
88
|
+
T = TypeVar("T")
|
|
89
|
+
|
|
90
|
+
|
|
91
|
+
class Cache(Generic[S, T]):
|
|
92
|
+
def __init__(self) -> None:
|
|
93
|
+
self.cache: dict[S, Any] = {}
|
|
94
|
+
|
|
95
|
+
def get(self, key: S, *, evict: bool = False) -> T:
|
|
96
|
+
if evict:
|
|
97
|
+
return self.cache.pop(key)
|
|
98
|
+
return self.cache[key]
|
|
99
|
+
|
|
100
|
+
def put(self, key: S, value: T | None) -> None:
|
|
101
|
+
if value is not None:
|
|
102
|
+
self.cache[key] = value
|
|
103
|
+
|
|
104
|
+
|
|
105
|
+
class LRUCache(Cache[S, T]):
|
|
106
|
+
"""Size limited caching that tracks accesses by recency.
|
|
107
|
+
|
|
108
|
+
This is basically copied from functools.lru_cache. But
|
|
109
|
+
we need to know when there was a cache hit, so we can
|
|
110
|
+
fast-forward the aggregate with new stored events.
|
|
111
|
+
"""
|
|
112
|
+
|
|
113
|
+
sentinel = object() # unique object used to signal cache misses
|
|
114
|
+
PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields
|
|
115
|
+
|
|
116
|
+
def __init__(self, maxsize: int):
|
|
117
|
+
# Constants shared by all lru cache instances:
|
|
118
|
+
super().__init__()
|
|
119
|
+
self.maxsize = maxsize
|
|
120
|
+
self.full = False
|
|
121
|
+
self.lock = Lock() # because linkedlist updates aren't threadsafe
|
|
122
|
+
self.root: list[Any] = [] # root of the circular doubly linked list
|
|
123
|
+
self.clear()
|
|
124
|
+
|
|
125
|
+
def clear(self) -> None:
|
|
126
|
+
self.root[:] = [
|
|
127
|
+
self.root,
|
|
128
|
+
self.root,
|
|
129
|
+
None,
|
|
130
|
+
None,
|
|
131
|
+
] # initialize by pointing to self
|
|
132
|
+
|
|
133
|
+
def get(self, key: S, *, evict: bool = False) -> T:
|
|
134
|
+
with self.lock:
|
|
135
|
+
link = self.cache.get(key)
|
|
136
|
+
if link is not None:
|
|
137
|
+
link_prev, link_next, _key, result = link
|
|
138
|
+
if not evict:
|
|
139
|
+
# Move the link to the front of the circular queue.
|
|
140
|
+
link_prev[self.NEXT] = link_next
|
|
141
|
+
link_next[self.PREV] = link_prev
|
|
142
|
+
last = self.root[self.PREV]
|
|
143
|
+
last[self.NEXT] = self.root[self.PREV] = link
|
|
144
|
+
link[self.PREV] = last
|
|
145
|
+
link[self.NEXT] = self.root
|
|
146
|
+
else:
|
|
147
|
+
# Remove the link.
|
|
148
|
+
link_prev[self.NEXT] = link_next
|
|
149
|
+
link_next[self.PREV] = link_prev
|
|
150
|
+
del self.cache[key]
|
|
151
|
+
self.full = self.cache.__len__() >= self.maxsize
|
|
152
|
+
|
|
153
|
+
return result
|
|
154
|
+
raise KeyError
|
|
155
|
+
|
|
156
|
+
def put(self, key: S, value: T | None) -> Any | None:
|
|
157
|
+
evicted_key = None
|
|
158
|
+
evicted_value = None
|
|
159
|
+
with self.lock:
|
|
160
|
+
link = self.cache.get(key)
|
|
161
|
+
if link is not None:
|
|
162
|
+
# Set value.
|
|
163
|
+
link[self.RESULT] = value
|
|
164
|
+
# Move the link to the front of the circular queue.
|
|
165
|
+
link_prev, link_next, _key, _ = link
|
|
166
|
+
link_prev[self.NEXT] = link_next
|
|
167
|
+
link_next[self.PREV] = link_prev
|
|
168
|
+
last = self.root[self.PREV]
|
|
169
|
+
last[self.NEXT] = self.root[self.PREV] = link
|
|
170
|
+
link[self.PREV] = last
|
|
171
|
+
link[self.NEXT] = self.root
|
|
172
|
+
elif self.full:
|
|
173
|
+
# Use the old root to store the new key and result.
|
|
174
|
+
oldroot = self.root
|
|
175
|
+
oldroot[self.KEY] = key
|
|
176
|
+
oldroot[self.RESULT] = value
|
|
177
|
+
# Empty the oldest link and make it the new root.
|
|
178
|
+
# Keep a reference to the old key and old result to
|
|
179
|
+
# prevent their ref counts from going to zero during the
|
|
180
|
+
# update. That will prevent potentially arbitrary object
|
|
181
|
+
# clean-up code (i.e. __del__) from running while we're
|
|
182
|
+
# still adjusting the links.
|
|
183
|
+
self.root = oldroot[self.NEXT]
|
|
184
|
+
evicted_key = self.root[self.KEY]
|
|
185
|
+
evicted_value = self.root[self.RESULT]
|
|
186
|
+
self.root[self.KEY] = self.root[self.RESULT] = None
|
|
187
|
+
# Now update the cache dictionary.
|
|
188
|
+
del self.cache[evicted_key]
|
|
189
|
+
# Save the potentially reentrant cache[key] assignment
|
|
190
|
+
# for last, after the root and links have been put in
|
|
191
|
+
# a consistent state.
|
|
192
|
+
self.cache[key] = oldroot
|
|
193
|
+
else:
|
|
194
|
+
# Put result in a new link at the front of the queue.
|
|
195
|
+
last = self.root[self.PREV]
|
|
196
|
+
link = [last, self.root, key, value]
|
|
197
|
+
last[self.NEXT] = self.root[self.PREV] = self.cache[key] = link
|
|
198
|
+
# Use the __len__() bound method instead of the len() function
|
|
199
|
+
# which could potentially be wrapped in an lru_cache itself.
|
|
200
|
+
self.full = self.cache.__len__() >= self.maxsize
|
|
201
|
+
return evicted_key, evicted_value
|
|
202
|
+
|
|
203
|
+
|
|
204
|
+
class Repository(Generic[TAggregateID]):
|
|
205
|
+
"""Reconstructs aggregates from events in an
|
|
206
|
+
:class:`~eventsourcing.persistence.EventStore`,
|
|
207
|
+
possibly using snapshot store to avoid replaying
|
|
208
|
+
all events.
|
|
209
|
+
"""
|
|
210
|
+
|
|
211
|
+
FASTFORWARD_LOCKS_CACHE_MAXSIZE = 50
|
|
212
|
+
|
|
213
|
+
def __init__(
|
|
214
|
+
self,
|
|
215
|
+
event_store: EventStore[TAggregateID],
|
|
216
|
+
*,
|
|
217
|
+
snapshot_store: EventStore[TAggregateID] | None = None,
|
|
218
|
+
cache_maxsize: int | None = None,
|
|
219
|
+
fastforward: bool = True,
|
|
220
|
+
fastforward_skipping: bool = False,
|
|
221
|
+
deepcopy_from_cache: bool = True,
|
|
222
|
+
):
|
|
223
|
+
"""Initialises repository with given event store (an
|
|
224
|
+
:class:`~eventsourcing.persistence.EventStore` for aggregate
|
|
225
|
+
:class:`~eventsourcing.domain.AggregateEvent` objects)
|
|
226
|
+
and optionally a snapshot store (an
|
|
227
|
+
:class:`~eventsourcing.persistence.EventStore` for aggregate
|
|
228
|
+
:class:`~eventsourcing.domain.Snapshot` objects).
|
|
229
|
+
"""
|
|
230
|
+
self.event_store: EventStore[TAggregateID] = event_store
|
|
231
|
+
self.snapshot_store: EventStore[TAggregateID] | None = snapshot_store
|
|
232
|
+
|
|
233
|
+
if cache_maxsize is None:
|
|
234
|
+
self.cache: (
|
|
235
|
+
Cache[TAggregateID, MutableOrImmutableAggregate[TAggregateID]] | None
|
|
236
|
+
) = None
|
|
237
|
+
elif cache_maxsize <= 0:
|
|
238
|
+
self.cache = Cache()
|
|
239
|
+
else:
|
|
240
|
+
self.cache = LRUCache(maxsize=cache_maxsize)
|
|
241
|
+
self.fastforward = fastforward
|
|
242
|
+
self.fastforward_skipping = fastforward_skipping
|
|
243
|
+
self.deepcopy_from_cache = deepcopy_from_cache
|
|
244
|
+
|
|
245
|
+
# Because fast-forwarding a cached aggregate isn't thread-safe.
|
|
246
|
+
self._fastforward_locks_lock = Lock()
|
|
247
|
+
self._fastforward_locks_cache: LRUCache[TAggregateID, Lock] = LRUCache(
|
|
248
|
+
maxsize=self.FASTFORWARD_LOCKS_CACHE_MAXSIZE
|
|
249
|
+
)
|
|
250
|
+
self._fastforward_locks_inuse: dict[TAggregateID, tuple[Lock, int]] = {}
|
|
251
|
+
|
|
252
|
+
def get(
|
|
253
|
+
self,
|
|
254
|
+
aggregate_id: TAggregateID,
|
|
255
|
+
*,
|
|
256
|
+
version: int | None = None,
|
|
257
|
+
projector_func: ProjectorFunction[
|
|
258
|
+
TMutableOrImmutableAggregate, TDomainEvent
|
|
259
|
+
] = project_aggregate,
|
|
260
|
+
fastforward_skipping: bool = False,
|
|
261
|
+
deepcopy_from_cache: bool = True,
|
|
262
|
+
) -> TMutableOrImmutableAggregate:
|
|
263
|
+
"""Reconstructs an :class:`~eventsourcing.domain.Aggregate` for a
|
|
264
|
+
given ID from stored events, optionally at a particular version.
|
|
265
|
+
"""
|
|
266
|
+
if self.cache and version is None:
|
|
267
|
+
try:
|
|
268
|
+
# Look for aggregate in the cache.
|
|
269
|
+
aggregate = cast(
|
|
270
|
+
"TMutableOrImmutableAggregate", self.cache.get(aggregate_id)
|
|
271
|
+
)
|
|
272
|
+
except KeyError:
|
|
273
|
+
# Reconstruct aggregate from stored events.
|
|
274
|
+
aggregate = self._reconstruct_aggregate(
|
|
275
|
+
aggregate_id, None, projector_func
|
|
276
|
+
)
|
|
277
|
+
# Put aggregate in the cache.
|
|
278
|
+
self.cache.put(aggregate_id, aggregate)
|
|
279
|
+
else:
|
|
280
|
+
if self.fastforward:
|
|
281
|
+
# Fast-forward cached aggregate.
|
|
282
|
+
fastforward_lock = self._use_fastforward_lock(aggregate_id)
|
|
283
|
+
# TODO: Should this be 'fastforward or self.fastforward_skipping'?
|
|
284
|
+
blocking = not (fastforward_skipping or self.fastforward_skipping)
|
|
285
|
+
try:
|
|
286
|
+
if fastforward_lock.acquire(blocking=blocking):
|
|
287
|
+
try:
|
|
288
|
+
new_events = self.event_store.get(
|
|
289
|
+
originator_id=aggregate_id, gt=aggregate.version
|
|
290
|
+
)
|
|
291
|
+
_aggregate = projector_func(
|
|
292
|
+
aggregate,
|
|
293
|
+
cast(
|
|
294
|
+
"Iterable[TDomainEvent]",
|
|
295
|
+
new_events,
|
|
296
|
+
),
|
|
297
|
+
)
|
|
298
|
+
if _aggregate is None:
|
|
299
|
+
raise AggregateNotFoundError(aggregate_id)
|
|
300
|
+
aggregate = _aggregate
|
|
301
|
+
finally:
|
|
302
|
+
fastforward_lock.release()
|
|
303
|
+
finally:
|
|
304
|
+
self._disuse_fastforward_lock(aggregate_id)
|
|
305
|
+
|
|
306
|
+
# Copy mutable aggregates for commands, so bad mutations don't corrupt.
|
|
307
|
+
if deepcopy_from_cache and self.deepcopy_from_cache:
|
|
308
|
+
aggregate = deepcopy(aggregate)
|
|
309
|
+
else:
|
|
310
|
+
# Reconstruct historical version of aggregate from stored events.
|
|
311
|
+
aggregate = self._reconstruct_aggregate(
|
|
312
|
+
aggregate_id, version, projector_func
|
|
313
|
+
)
|
|
314
|
+
return aggregate
|
|
315
|
+
|
|
316
|
+
def _reconstruct_aggregate(
|
|
317
|
+
self,
|
|
318
|
+
aggregate_id: TAggregateID,
|
|
319
|
+
version: int | None,
|
|
320
|
+
projector_func: ProjectorFunction[TMutableOrImmutableAggregate, TDomainEvent],
|
|
321
|
+
) -> TMutableOrImmutableAggregate:
|
|
322
|
+
gt: int | None = None
|
|
323
|
+
|
|
324
|
+
if self.snapshot_store is not None:
|
|
325
|
+
# Try to get a snapshot.
|
|
326
|
+
snapshots = list(
|
|
327
|
+
self.snapshot_store.get(
|
|
328
|
+
originator_id=aggregate_id,
|
|
329
|
+
desc=True,
|
|
330
|
+
limit=1,
|
|
331
|
+
lte=version,
|
|
332
|
+
),
|
|
333
|
+
)
|
|
334
|
+
if snapshots:
|
|
335
|
+
gt = snapshots[0].originator_version
|
|
336
|
+
else:
|
|
337
|
+
snapshots = []
|
|
338
|
+
|
|
339
|
+
# Get aggregate events.
|
|
340
|
+
aggregate_events = self.event_store.get(
|
|
341
|
+
originator_id=aggregate_id,
|
|
342
|
+
gt=gt,
|
|
343
|
+
lte=version,
|
|
344
|
+
)
|
|
345
|
+
|
|
346
|
+
# Reconstruct the aggregate from its events.
|
|
347
|
+
initial: TMutableOrImmutableAggregate | None = None
|
|
348
|
+
aggregate = projector_func(
|
|
349
|
+
initial,
|
|
350
|
+
chain(
|
|
351
|
+
cast("Iterable[TDomainEvent]", snapshots),
|
|
352
|
+
cast("Iterable[TDomainEvent]", aggregate_events),
|
|
353
|
+
),
|
|
354
|
+
)
|
|
355
|
+
|
|
356
|
+
# Raise exception if "not found".
|
|
357
|
+
if aggregate is None:
|
|
358
|
+
msg = f"Aggregate {aggregate_id!r} version {version!r} not found."
|
|
359
|
+
raise AggregateNotFoundError(msg)
|
|
360
|
+
# Return the aggregate.
|
|
361
|
+
return aggregate
|
|
362
|
+
|
|
363
|
+
def _use_fastforward_lock(self, aggregate_id: TAggregateID) -> Lock:
|
|
364
|
+
lock: Lock | None = None
|
|
365
|
+
with self._fastforward_locks_lock:
|
|
366
|
+
num_users = 0
|
|
367
|
+
with contextlib.suppress(KeyError):
|
|
368
|
+
lock, num_users = self._fastforward_locks_inuse[aggregate_id]
|
|
369
|
+
if lock is None:
|
|
370
|
+
with contextlib.suppress(KeyError):
|
|
371
|
+
lock = self._fastforward_locks_cache.get(aggregate_id, evict=True)
|
|
372
|
+
if lock is None:
|
|
373
|
+
lock = Lock()
|
|
374
|
+
num_users += 1
|
|
375
|
+
self._fastforward_locks_inuse[aggregate_id] = (lock, num_users)
|
|
376
|
+
return lock
|
|
377
|
+
|
|
378
|
+
def _disuse_fastforward_lock(self, aggregate_id: TAggregateID) -> None:
|
|
379
|
+
with self._fastforward_locks_lock:
|
|
380
|
+
lock_, num_users = self._fastforward_locks_inuse[aggregate_id]
|
|
381
|
+
num_users -= 1
|
|
382
|
+
if num_users == 0:
|
|
383
|
+
del self._fastforward_locks_inuse[aggregate_id]
|
|
384
|
+
self._fastforward_locks_cache.put(aggregate_id, lock_)
|
|
385
|
+
else:
|
|
386
|
+
self._fastforward_locks_inuse[aggregate_id] = (lock_, num_users)
|
|
387
|
+
|
|
388
|
+
def __contains__(self, item: TAggregateID) -> bool:
|
|
389
|
+
"""Tests to see if an aggregate exists in the repository."""
|
|
390
|
+
try:
|
|
391
|
+
self.get(aggregate_id=item)
|
|
392
|
+
except AggregateNotFoundError:
|
|
393
|
+
return False
|
|
394
|
+
else:
|
|
395
|
+
return True
|
|
396
|
+
|
|
397
|
+
|
|
398
|
+
@dataclass(frozen=True)
|
|
399
|
+
class Section:
|
|
400
|
+
"""Frozen dataclass that represents a section from a :class:`NotificationLog`.
|
|
401
|
+
The :data:`items` attribute contains a list of
|
|
402
|
+
:class:`~eventsourcing.persistence.Notification` objects.
|
|
403
|
+
The :data:`id` attribute is the section ID, two integers
|
|
404
|
+
separated by a comma that described the first and last
|
|
405
|
+
notification ID that are included in the section.
|
|
406
|
+
The :data:`next_id` attribute describes the section ID
|
|
407
|
+
of the next section, and will be set if the section contains
|
|
408
|
+
as many notifications as were requested.
|
|
409
|
+
|
|
410
|
+
Constructor arguments:
|
|
411
|
+
|
|
412
|
+
:param Optional[str] id: section ID of this section e.g. "1,10"
|
|
413
|
+
:param list[Notification] items: a list of event notifications
|
|
414
|
+
:param Optional[str] next_id: section ID of the following section
|
|
415
|
+
"""
|
|
416
|
+
|
|
417
|
+
id: str | None
|
|
418
|
+
items: Sequence[Notification]
|
|
419
|
+
next_id: str | None
|
|
420
|
+
|
|
421
|
+
|
|
422
|
+
class NotificationLog(ABC):
|
|
423
|
+
"""Abstract base class for notification logs."""
|
|
424
|
+
|
|
425
|
+
@abstractmethod
|
|
426
|
+
def __getitem__(self, section_id: str) -> Section:
|
|
427
|
+
"""Returns a :class:`Section` of
|
|
428
|
+
:class:`~eventsourcing.persistence.Notification` objects
|
|
429
|
+
from the notification log.
|
|
430
|
+
"""
|
|
431
|
+
|
|
432
|
+
@abstractmethod
|
|
433
|
+
def select(
|
|
434
|
+
self,
|
|
435
|
+
start: int | None,
|
|
436
|
+
limit: int,
|
|
437
|
+
stop: int | None = None,
|
|
438
|
+
topics: Sequence[str] = (),
|
|
439
|
+
*,
|
|
440
|
+
inclusive_of_start: bool = True,
|
|
441
|
+
) -> Sequence[Notification]:
|
|
442
|
+
"""Returns a selection of
|
|
443
|
+
:class:`~eventsourcing.persistence.Notification` objects
|
|
444
|
+
from the notification log.
|
|
445
|
+
"""
|
|
446
|
+
|
|
447
|
+
|
|
448
|
+
class LocalNotificationLog(NotificationLog):
|
|
449
|
+
"""Notification log that presents sections of event notifications
|
|
450
|
+
retrieved from an :class:`~eventsourcing.persistence.ApplicationRecorder`.
|
|
451
|
+
"""
|
|
452
|
+
|
|
453
|
+
DEFAULT_SECTION_SIZE = 10
|
|
454
|
+
|
|
455
|
+
def __init__(
|
|
456
|
+
self,
|
|
457
|
+
recorder: ApplicationRecorder,
|
|
458
|
+
section_size: int = DEFAULT_SECTION_SIZE,
|
|
459
|
+
):
|
|
460
|
+
"""Initialises a local notification object with given
|
|
461
|
+
:class:`~eventsourcing.persistence.ApplicationRecorder`
|
|
462
|
+
and an optional section size.
|
|
463
|
+
|
|
464
|
+
Constructor arguments:
|
|
465
|
+
|
|
466
|
+
:param ApplicationRecorder recorder: application recorder from which event
|
|
467
|
+
notifications will be selected
|
|
468
|
+
:param int section_size: number of notifications to include in a section
|
|
469
|
+
|
|
470
|
+
"""
|
|
471
|
+
self.recorder = recorder
|
|
472
|
+
self.section_size = section_size
|
|
473
|
+
|
|
474
|
+
def __getitem__(self, requested_section_id: str) -> Section:
|
|
475
|
+
"""Returns a :class:`Section` of event notifications
|
|
476
|
+
based on the requested section ID. The section ID of
|
|
477
|
+
the returned section will describe the event
|
|
478
|
+
notifications that are actually contained in
|
|
479
|
+
the returned section, and may vary from the
|
|
480
|
+
requested section ID if there are fewer notifications
|
|
481
|
+
in the recorder than were requested, or if there
|
|
482
|
+
are gaps in the sequence of recorded event notification.
|
|
483
|
+
"""
|
|
484
|
+
# Interpret the section ID.
|
|
485
|
+
parts = requested_section_id.split(",")
|
|
486
|
+
part1 = int(parts[0])
|
|
487
|
+
part2 = int(parts[1])
|
|
488
|
+
start = max(1, part1)
|
|
489
|
+
limit = min(max(0, part2 - start + 1), self.section_size)
|
|
490
|
+
|
|
491
|
+
# Select notifications.
|
|
492
|
+
notifications = self.select(start, limit)
|
|
493
|
+
|
|
494
|
+
# Get next section ID.
|
|
495
|
+
actual_section_id: str | None
|
|
496
|
+
next_id: str | None
|
|
497
|
+
if len(notifications):
|
|
498
|
+
last_notification_id = notifications[-1].id
|
|
499
|
+
actual_section_id = self.format_section_id(
|
|
500
|
+
notifications[0].id, last_notification_id
|
|
501
|
+
)
|
|
502
|
+
if len(notifications) == limit:
|
|
503
|
+
next_id = self.format_section_id(
|
|
504
|
+
last_notification_id + 1, last_notification_id + limit
|
|
505
|
+
)
|
|
506
|
+
else:
|
|
507
|
+
next_id = None
|
|
508
|
+
else:
|
|
509
|
+
actual_section_id = None
|
|
510
|
+
next_id = None
|
|
511
|
+
|
|
512
|
+
# Return a section of the notification log.
|
|
513
|
+
return Section(
|
|
514
|
+
id=actual_section_id,
|
|
515
|
+
items=notifications,
|
|
516
|
+
next_id=next_id,
|
|
517
|
+
)
|
|
518
|
+
|
|
519
|
+
def select(
|
|
520
|
+
self,
|
|
521
|
+
start: int | None,
|
|
522
|
+
limit: int,
|
|
523
|
+
stop: int | None = None,
|
|
524
|
+
topics: Sequence[str] = (),
|
|
525
|
+
*,
|
|
526
|
+
inclusive_of_start: bool = True,
|
|
527
|
+
) -> Sequence[Notification]:
|
|
528
|
+
"""Returns a selection of
|
|
529
|
+
:class:`~eventsourcing.persistence.Notification` objects
|
|
530
|
+
from the notification log.
|
|
531
|
+
"""
|
|
532
|
+
if limit > self.section_size:
|
|
533
|
+
msg = (
|
|
534
|
+
f"Requested limit {limit} greater than section size {self.section_size}"
|
|
535
|
+
)
|
|
536
|
+
raise ValueError(msg)
|
|
537
|
+
return self.recorder.select_notifications(
|
|
538
|
+
start=start,
|
|
539
|
+
limit=limit,
|
|
540
|
+
stop=stop,
|
|
541
|
+
topics=topics,
|
|
542
|
+
inclusive_of_start=inclusive_of_start,
|
|
543
|
+
)
|
|
544
|
+
|
|
545
|
+
@staticmethod
|
|
546
|
+
def format_section_id(first_id: int, last_id: int) -> str:
|
|
547
|
+
return f"{first_id},{last_id}"
|
|
548
|
+
|
|
549
|
+
|
|
550
|
+
class ProcessingEvent(Generic[TAggregateID]):
|
|
551
|
+
"""Keeps together a :class:`~eventsourcing.persistence.Tracking`
|
|
552
|
+
object, which represents the position of a domain event notification
|
|
553
|
+
in the notification log of a particular application, and the
|
|
554
|
+
new domain events that result from processing that notification.
|
|
555
|
+
"""
|
|
556
|
+
|
|
557
|
+
def __init__(self, tracking: Tracking | None = None):
|
|
558
|
+
"""Initialises the process event with the given tracking object."""
|
|
559
|
+
self.tracking = tracking
|
|
560
|
+
self.events: list[DomainEventProtocol[TAggregateID]] = []
|
|
561
|
+
self.aggregates: dict[
|
|
562
|
+
TAggregateID, MutableOrImmutableAggregate[TAggregateID]
|
|
563
|
+
] = {}
|
|
564
|
+
self.saved_kwargs: dict[Any, Any] = {}
|
|
565
|
+
|
|
566
|
+
def collect_events(
|
|
567
|
+
self,
|
|
568
|
+
*objs: MutableOrImmutableAggregate[TAggregateID]
|
|
569
|
+
| DomainEventProtocol[TAggregateID]
|
|
570
|
+
| None,
|
|
571
|
+
**kwargs: Any,
|
|
572
|
+
) -> None:
|
|
573
|
+
"""Collects pending domain events from the given aggregate."""
|
|
574
|
+
for obj in objs:
|
|
575
|
+
if obj is None:
|
|
576
|
+
continue
|
|
577
|
+
if isinstance(obj, DomainEventProtocol):
|
|
578
|
+
self.events.append(obj)
|
|
579
|
+
else:
|
|
580
|
+
if isinstance(obj, CollectEventsProtocol):
|
|
581
|
+
for event in obj.collect_events():
|
|
582
|
+
self.events.append(event)
|
|
583
|
+
self.aggregates[obj.id] = obj
|
|
584
|
+
|
|
585
|
+
self.saved_kwargs.update(kwargs)
|
|
586
|
+
|
|
587
|
+
def save(
|
|
588
|
+
self,
|
|
589
|
+
*aggregates: MutableOrImmutableAggregate[TAggregateID]
|
|
590
|
+
| DomainEventProtocol[TAggregateID]
|
|
591
|
+
| None,
|
|
592
|
+
**kwargs: Any,
|
|
593
|
+
) -> None:
|
|
594
|
+
warn(
|
|
595
|
+
"'save()' is deprecated, use 'collect_events()' instead",
|
|
596
|
+
DeprecationWarning,
|
|
597
|
+
stacklevel=2,
|
|
598
|
+
)
|
|
599
|
+
|
|
600
|
+
self.collect_events(*aggregates, **kwargs)
|
|
601
|
+
|
|
602
|
+
|
|
603
|
+
class Application(Generic[TAggregateID]):
|
|
604
|
+
"""Base class for event-sourced applications."""
|
|
605
|
+
|
|
606
|
+
name = "Application"
|
|
607
|
+
env: ClassVar[dict[str, str]] = {}
|
|
608
|
+
is_snapshotting_enabled: bool = False
|
|
609
|
+
snapshotting_intervals: ClassVar[
|
|
610
|
+
dict[type[MutableOrImmutableAggregate[Any]], int]
|
|
611
|
+
] = {}
|
|
612
|
+
snapshotting_projectors: ClassVar[
|
|
613
|
+
dict[
|
|
614
|
+
type[MutableOrImmutableAggregate[Any]],
|
|
615
|
+
ProjectorFunction[Any, Any],
|
|
616
|
+
]
|
|
617
|
+
] = {}
|
|
618
|
+
snapshot_class: type[SnapshotProtocol[TAggregateID]] | None = None
|
|
619
|
+
log_section_size = 10
|
|
620
|
+
notify_topics: Sequence[str] = []
|
|
621
|
+
|
|
622
|
+
AGGREGATE_CACHE_MAXSIZE = "AGGREGATE_CACHE_MAXSIZE"
|
|
623
|
+
AGGREGATE_CACHE_FASTFORWARD = "AGGREGATE_CACHE_FASTFORWARD"
|
|
624
|
+
AGGREGATE_CACHE_FASTFORWARD_SKIPPING = "AGGREGATE_CACHE_FASTFORWARD_SKIPPING"
|
|
625
|
+
DEEPCOPY_FROM_AGGREGATE_CACHE = "DEEPCOPY_FROM_AGGREGATE_CACHE"
|
|
626
|
+
|
|
627
|
+
def __init_subclass__(cls, **kwargs: Any) -> None:
|
|
628
|
+
if "name" not in cls.__dict__:
|
|
629
|
+
cls.name = cls.__name__
|
|
630
|
+
|
|
631
|
+
def __init__(self, env: EnvType | None = None) -> None:
|
|
632
|
+
"""Initialises an application with an
|
|
633
|
+
:class:`~eventsourcing.persistence.InfrastructureFactory`,
|
|
634
|
+
a :class:`~eventsourcing.persistence.Mapper`,
|
|
635
|
+
an :class:`~eventsourcing.persistence.ApplicationRecorder`,
|
|
636
|
+
an :class:`~eventsourcing.persistence.EventStore`,
|
|
637
|
+
a :class:`~eventsourcing.application.Repository`, and
|
|
638
|
+
a :class:`~eventsourcing.application.LocalNotificationLog`.
|
|
639
|
+
"""
|
|
640
|
+
self.closing = Event()
|
|
641
|
+
self.env = self.construct_env(self.name, env) # type: ignore[misc]
|
|
642
|
+
self.factory = self.construct_factory(self.env)
|
|
643
|
+
self.mapper: Mapper[TAggregateID] = self.construct_mapper()
|
|
644
|
+
self.recorder = self.construct_recorder()
|
|
645
|
+
self.events: EventStore[TAggregateID] = self.construct_event_store()
|
|
646
|
+
self.snapshots: EventStore[TAggregateID] | None = None
|
|
647
|
+
if self.factory.is_snapshotting_enabled():
|
|
648
|
+
self.snapshots = self.construct_snapshot_store()
|
|
649
|
+
self._repository: Repository[TAggregateID] = self.construct_repository()
|
|
650
|
+
self._notification_log = self.construct_notification_log()
|
|
651
|
+
|
|
652
|
+
@property
|
|
653
|
+
def repository(self) -> Repository[TAggregateID]:
|
|
654
|
+
"""An application's repository reconstructs aggregates from stored events."""
|
|
655
|
+
return self._repository
|
|
656
|
+
|
|
657
|
+
@property
|
|
658
|
+
def notification_log(self) -> LocalNotificationLog:
|
|
659
|
+
"""An application's notification log presents all the aggregate events
|
|
660
|
+
of an application in the order they were recorded as a sequence of event
|
|
661
|
+
notifications.
|
|
662
|
+
"""
|
|
663
|
+
return self._notification_log
|
|
664
|
+
|
|
665
|
+
@property
|
|
666
|
+
def log(self) -> LocalNotificationLog:
|
|
667
|
+
warn(
|
|
668
|
+
"'log' is deprecated, use 'notification_log' instead",
|
|
669
|
+
DeprecationWarning,
|
|
670
|
+
stacklevel=2,
|
|
671
|
+
)
|
|
672
|
+
return self._notification_log
|
|
673
|
+
|
|
674
|
+
def construct_env(self, name: str, env: EnvType | None = None) -> Environment:
|
|
675
|
+
"""Constructs environment from which application will be configured."""
|
|
676
|
+
_env = dict(type(self).env)
|
|
677
|
+
if type(self).is_snapshotting_enabled or type(self).snapshotting_intervals:
|
|
678
|
+
_env["IS_SNAPSHOTTING_ENABLED"] = "y"
|
|
679
|
+
_env.update(os.environ)
|
|
680
|
+
if env is not None:
|
|
681
|
+
_env.update(env)
|
|
682
|
+
return Environment(name, _env)
|
|
683
|
+
|
|
684
|
+
def construct_factory(self, env: Environment) -> InfrastructureFactory:
|
|
685
|
+
"""Constructs an :class:`~eventsourcing.persistence.InfrastructureFactory`
|
|
686
|
+
for use by the application.
|
|
687
|
+
"""
|
|
688
|
+
return InfrastructureFactory.construct(env)
|
|
689
|
+
|
|
690
|
+
def construct_mapper(self) -> Mapper[TAggregateID]:
|
|
691
|
+
"""Constructs a :class:`~eventsourcing.persistence.Mapper`
|
|
692
|
+
for use by the application.
|
|
693
|
+
"""
|
|
694
|
+
return self.factory.mapper(transcoder=self.construct_transcoder())
|
|
695
|
+
|
|
696
|
+
def construct_transcoder(self) -> Transcoder:
|
|
697
|
+
"""Constructs a :class:`~eventsourcing.persistence.Transcoder`
|
|
698
|
+
for use by the application.
|
|
699
|
+
"""
|
|
700
|
+
transcoder = self.factory.transcoder()
|
|
701
|
+
if isinstance(transcoder, JSONTranscoder):
|
|
702
|
+
self.register_transcodings(transcoder)
|
|
703
|
+
return transcoder
|
|
704
|
+
|
|
705
|
+
def register_transcodings(self, transcoder: JSONTranscoder) -> None:
|
|
706
|
+
"""Registers :class:`~eventsourcing.persistence.Transcoding`
|
|
707
|
+
objects on given :class:`~eventsourcing.persistence.JSONTranscoder`.
|
|
708
|
+
"""
|
|
709
|
+
transcoder.register(UUIDAsHex())
|
|
710
|
+
transcoder.register(DecimalAsStr())
|
|
711
|
+
transcoder.register(DatetimeAsISO())
|
|
712
|
+
|
|
713
|
+
def construct_recorder(self) -> ApplicationRecorder:
|
|
714
|
+
"""Constructs an :class:`~eventsourcing.persistence.ApplicationRecorder`
|
|
715
|
+
for use by the application.
|
|
716
|
+
"""
|
|
717
|
+
return self.factory.application_recorder()
|
|
718
|
+
|
|
719
|
+
def construct_event_store(self) -> EventStore[TAggregateID]:
|
|
720
|
+
"""Constructs an :class:`~eventsourcing.persistence.EventStore`
|
|
721
|
+
for use by the application to store and retrieve aggregate
|
|
722
|
+
:class:`~eventsourcing.domain.AggregateEvent` objects.
|
|
723
|
+
"""
|
|
724
|
+
return self.factory.event_store(
|
|
725
|
+
mapper=self.mapper,
|
|
726
|
+
recorder=self.recorder,
|
|
727
|
+
)
|
|
728
|
+
|
|
729
|
+
def construct_snapshot_store(self) -> EventStore[TAggregateID]:
|
|
730
|
+
"""Constructs an :py:class:`~eventsourcing.persistence.EventStore`
|
|
731
|
+
for use by the application to store and retrieve aggregate
|
|
732
|
+
:class:`~eventsourcing.domain.Snapshot` objects.
|
|
733
|
+
"""
|
|
734
|
+
recorder = self.factory.aggregate_recorder(purpose="snapshots")
|
|
735
|
+
return self.factory.event_store(
|
|
736
|
+
mapper=self.mapper,
|
|
737
|
+
recorder=recorder,
|
|
738
|
+
)
|
|
739
|
+
|
|
740
|
+
def construct_repository(self) -> Repository[TAggregateID]:
|
|
741
|
+
"""Constructs a :py:class:`Repository` for use by the application."""
|
|
742
|
+
cache_maxsize_envvar = self.env.get(self.AGGREGATE_CACHE_MAXSIZE)
|
|
743
|
+
cache_maxsize = int(cache_maxsize_envvar) if cache_maxsize_envvar else None
|
|
744
|
+
return Repository(
|
|
745
|
+
event_store=self.events,
|
|
746
|
+
snapshot_store=self.snapshots,
|
|
747
|
+
cache_maxsize=cache_maxsize,
|
|
748
|
+
fastforward=strtobool(self.env.get(self.AGGREGATE_CACHE_FASTFORWARD, "y")),
|
|
749
|
+
fastforward_skipping=strtobool(
|
|
750
|
+
self.env.get(self.AGGREGATE_CACHE_FASTFORWARD_SKIPPING, "n")
|
|
751
|
+
),
|
|
752
|
+
deepcopy_from_cache=strtobool(
|
|
753
|
+
self.env.get(self.DEEPCOPY_FROM_AGGREGATE_CACHE, "y")
|
|
754
|
+
),
|
|
755
|
+
)
|
|
756
|
+
|
|
757
|
+
def construct_notification_log(self) -> LocalNotificationLog:
|
|
758
|
+
"""Constructs a :class:`LocalNotificationLog` for use by the application."""
|
|
759
|
+
return LocalNotificationLog(self.recorder, section_size=self.log_section_size)
|
|
760
|
+
|
|
761
|
+
def save(
|
|
762
|
+
self,
|
|
763
|
+
*objs: MutableOrImmutableAggregate[TAggregateID]
|
|
764
|
+
| DomainEventProtocol[TAggregateID]
|
|
765
|
+
| None,
|
|
766
|
+
**kwargs: Any,
|
|
767
|
+
) -> list[Recording[TAggregateID]]:
|
|
768
|
+
"""Collects pending events from given aggregates and
|
|
769
|
+
puts them in the application's event store.
|
|
770
|
+
"""
|
|
771
|
+
processing_event: ProcessingEvent[TAggregateID] = ProcessingEvent()
|
|
772
|
+
processing_event.collect_events(*objs, **kwargs)
|
|
773
|
+
recordings = self._record(processing_event)
|
|
774
|
+
self._take_snapshots(processing_event)
|
|
775
|
+
self._notify(recordings)
|
|
776
|
+
self.notify(processing_event.events) # Deprecated.
|
|
777
|
+
return recordings
|
|
778
|
+
|
|
779
|
+
def _record(
|
|
780
|
+
self, processing_event: ProcessingEvent[TAggregateID]
|
|
781
|
+
) -> list[Recording[TAggregateID]]:
|
|
782
|
+
"""Records given process event in the application's recorder."""
|
|
783
|
+
recordings = self.events.put(
|
|
784
|
+
processing_event.events,
|
|
785
|
+
tracking=processing_event.tracking,
|
|
786
|
+
**processing_event.saved_kwargs,
|
|
787
|
+
)
|
|
788
|
+
if self.repository.cache and not self.repository.fastforward:
|
|
789
|
+
for aggregate_id, aggregate in processing_event.aggregates.items():
|
|
790
|
+
self.repository.cache.put(aggregate_id, aggregate)
|
|
791
|
+
return recordings
|
|
792
|
+
|
|
793
|
+
def _take_snapshots(self, processing_event: ProcessingEvent[TAggregateID]) -> None:
|
|
794
|
+
# Take snapshots using IDs and types.
|
|
795
|
+
if self.snapshots and self.snapshotting_intervals:
|
|
796
|
+
for event in processing_event.events:
|
|
797
|
+
try:
|
|
798
|
+
aggregate = processing_event.aggregates[event.originator_id]
|
|
799
|
+
except KeyError:
|
|
800
|
+
continue
|
|
801
|
+
interval = self.snapshotting_intervals.get(type(aggregate))
|
|
802
|
+
if interval is not None and event.originator_version % interval == 0:
|
|
803
|
+
try:
|
|
804
|
+
projector_func = self.snapshotting_projectors[type(aggregate)]
|
|
805
|
+
except KeyError:
|
|
806
|
+
if not isinstance(event, CanMutateProtocol):
|
|
807
|
+
msg = (
|
|
808
|
+
f"Cannot take snapshot for {type(aggregate)} with "
|
|
809
|
+
"default project_aggregate() function, because its "
|
|
810
|
+
f"domain event {type(event)} does not implement "
|
|
811
|
+
"the 'can mutate' protocol (see CanMutateProtocol)."
|
|
812
|
+
f" Please define application class {type(self)}"
|
|
813
|
+
" with class variable 'snapshotting_projectors', "
|
|
814
|
+
f"to be a dict that has {type(aggregate)} as a key "
|
|
815
|
+
"with the aggregate projector function for "
|
|
816
|
+
f"{type(aggregate)} as the value for that key."
|
|
817
|
+
)
|
|
818
|
+
raise ProgrammingError(msg) from None
|
|
819
|
+
|
|
820
|
+
projector_func = project_aggregate
|
|
821
|
+
self.take_snapshot(
|
|
822
|
+
aggregate_id=event.originator_id,
|
|
823
|
+
version=event.originator_version,
|
|
824
|
+
projector_func=projector_func,
|
|
825
|
+
)
|
|
826
|
+
|
|
827
|
+
def take_snapshot(
|
|
828
|
+
self,
|
|
829
|
+
aggregate_id: TAggregateID,
|
|
830
|
+
version: int | None = None,
|
|
831
|
+
projector_func: ProjectorFunction[Any, Any] = project_aggregate,
|
|
832
|
+
) -> None:
|
|
833
|
+
"""Takes a snapshot of the recorded state of the aggregate,
|
|
834
|
+
and puts the snapshot in the snapshot store.
|
|
835
|
+
"""
|
|
836
|
+
if self.snapshots is None:
|
|
837
|
+
msg = (
|
|
838
|
+
"Can't take snapshot without snapshots store. Please "
|
|
839
|
+
"set environment variable IS_SNAPSHOTTING_ENABLED to "
|
|
840
|
+
"a true value (e.g. 'y'), or set 'is_snapshotting_enabled' "
|
|
841
|
+
"on application class, or set 'snapshotting_intervals' on "
|
|
842
|
+
"application class."
|
|
843
|
+
)
|
|
844
|
+
raise AssertionError(msg)
|
|
845
|
+
aggregate: BaseAggregate[TAggregateID] = self.repository.get(
|
|
846
|
+
aggregate_id, version=version, projector_func=projector_func
|
|
847
|
+
)
|
|
848
|
+
snapshot_class = getattr(type(aggregate), "Snapshot", type(self).snapshot_class)
|
|
849
|
+
if snapshot_class is None:
|
|
850
|
+
msg = (
|
|
851
|
+
"Neither application nor aggregate have a snapshot class. "
|
|
852
|
+
f"Please either define a nested 'Snapshot' class on {type(aggregate)} "
|
|
853
|
+
f"or set class attribute 'snapshot_class' on {type(self)}."
|
|
854
|
+
)
|
|
855
|
+
raise AssertionError(msg)
|
|
856
|
+
|
|
857
|
+
snapshot = snapshot_class.take(aggregate)
|
|
858
|
+
self.snapshots.put([snapshot])
|
|
859
|
+
|
|
860
|
+
def notify(self, new_events: list[DomainEventProtocol[TAggregateID]]) -> None:
|
|
861
|
+
"""Deprecated.
|
|
862
|
+
|
|
863
|
+
Called after new aggregate events have been saved. This
|
|
864
|
+
method on this class doesn't actually do anything,
|
|
865
|
+
but this method may be implemented by subclasses that
|
|
866
|
+
need to take action when new domain events have been saved.
|
|
867
|
+
"""
|
|
868
|
+
|
|
869
|
+
def _notify(self, recordings: list[Recording[TAggregateID]]) -> None:
|
|
870
|
+
"""Called after new aggregate events have been saved. This
|
|
871
|
+
method on this class doesn't actually do anything,
|
|
872
|
+
but this method may be implemented by subclasses that
|
|
873
|
+
need to take action when new domain events have been saved.
|
|
874
|
+
"""
|
|
875
|
+
|
|
876
|
+
def close(self) -> None:
|
|
877
|
+
self.closing.set()
|
|
878
|
+
self.factory.close()
|
|
879
|
+
|
|
880
|
+
def __enter__(self) -> Self:
|
|
881
|
+
self.factory.__enter__()
|
|
882
|
+
return self
|
|
883
|
+
|
|
884
|
+
def __exit__(
|
|
885
|
+
self,
|
|
886
|
+
exc_type: type[BaseException] | None,
|
|
887
|
+
exc_val: BaseException | None,
|
|
888
|
+
exc_tb: TracebackType | None,
|
|
889
|
+
) -> None:
|
|
890
|
+
self.close()
|
|
891
|
+
self.factory.__exit__(exc_type, exc_val, exc_tb)
|
|
892
|
+
|
|
893
|
+
def __del__(self) -> None:
|
|
894
|
+
with contextlib.suppress(AttributeError):
|
|
895
|
+
self.close()
|
|
896
|
+
|
|
897
|
+
|
|
898
|
+
TApplication = TypeVar("TApplication", bound=Application[Any])
|
|
899
|
+
|
|
900
|
+
|
|
901
|
+
class AggregateNotFoundError(EventSourcingError):
|
|
902
|
+
"""Raised when an :class:`~eventsourcing.domain.Aggregate`
|
|
903
|
+
object is not found in a :class:`Repository`.
|
|
904
|
+
"""
|
|
905
|
+
|
|
906
|
+
|
|
907
|
+
class EventSourcedLog(Generic[TDomainEvent]):
|
|
908
|
+
"""Constructs a sequence of domain events, like an aggregate.
|
|
909
|
+
But unlike an aggregate the events can be triggered
|
|
910
|
+
and selected for use in an application without
|
|
911
|
+
reconstructing a current state from all the events.
|
|
912
|
+
|
|
913
|
+
This allows an indefinitely long sequence of events to be
|
|
914
|
+
generated and used without the practical restrictions of
|
|
915
|
+
projecting the events into a current state before they
|
|
916
|
+
can be used, which is useful e.g. for logging and
|
|
917
|
+
progressively discovering all the aggregate IDs of a
|
|
918
|
+
particular type in an application.
|
|
919
|
+
"""
|
|
920
|
+
|
|
921
|
+
def __init__(
|
|
922
|
+
self,
|
|
923
|
+
events: EventStore[Any],
|
|
924
|
+
originator_id: UUID,
|
|
925
|
+
logged_cls: type[TDomainEvent], # TODO: Rename to 'event_class' in v10.
|
|
926
|
+
):
|
|
927
|
+
self.events = events
|
|
928
|
+
self.originator_id = originator_id
|
|
929
|
+
self.logged_cls = logged_cls # TODO: Rename to 'event_class' in v10.
|
|
930
|
+
|
|
931
|
+
def trigger_event(
|
|
932
|
+
self,
|
|
933
|
+
next_originator_version: int | None = None,
|
|
934
|
+
**kwargs: Any,
|
|
935
|
+
) -> TDomainEvent:
|
|
936
|
+
"""Constructs and returns a new log event."""
|
|
937
|
+
return self._trigger_event(
|
|
938
|
+
logged_cls=self.logged_cls,
|
|
939
|
+
next_originator_version=next_originator_version,
|
|
940
|
+
**kwargs,
|
|
941
|
+
)
|
|
942
|
+
|
|
943
|
+
def _trigger_event(
|
|
944
|
+
self,
|
|
945
|
+
logged_cls: type[SDomainEvent],
|
|
946
|
+
next_originator_version: int | None = None,
|
|
947
|
+
**kwargs: Any,
|
|
948
|
+
) -> SDomainEvent:
|
|
949
|
+
"""Constructs and returns a new log event."""
|
|
950
|
+
if next_originator_version is None:
|
|
951
|
+
last_logged = self.get_last()
|
|
952
|
+
if last_logged is None:
|
|
953
|
+
next_originator_version = Aggregate.INITIAL_VERSION
|
|
954
|
+
else:
|
|
955
|
+
next_originator_version = last_logged.originator_version + 1
|
|
956
|
+
|
|
957
|
+
return logged_cls(
|
|
958
|
+
originator_id=self.originator_id,
|
|
959
|
+
originator_version=next_originator_version,
|
|
960
|
+
timestamp=datetime_now_with_tzinfo(),
|
|
961
|
+
**kwargs,
|
|
962
|
+
)
|
|
963
|
+
|
|
964
|
+
def get_first(self) -> TDomainEvent | None:
|
|
965
|
+
"""Selects the first logged event."""
|
|
966
|
+
try:
|
|
967
|
+
return next(self.get(limit=1))
|
|
968
|
+
except StopIteration:
|
|
969
|
+
return None
|
|
970
|
+
|
|
971
|
+
def get_last(self) -> TDomainEvent | None:
|
|
972
|
+
"""Selects the last logged event."""
|
|
973
|
+
try:
|
|
974
|
+
return next(self.get(desc=True, limit=1))
|
|
975
|
+
except StopIteration:
|
|
976
|
+
return None
|
|
977
|
+
|
|
978
|
+
def get(
|
|
979
|
+
self,
|
|
980
|
+
*,
|
|
981
|
+
gt: int | None = None,
|
|
982
|
+
lte: int | None = None,
|
|
983
|
+
desc: bool = False,
|
|
984
|
+
limit: int | None = None,
|
|
985
|
+
) -> Iterator[TDomainEvent]:
|
|
986
|
+
"""Selects a range of logged events with limit,
|
|
987
|
+
with ascending or descending order.
|
|
988
|
+
"""
|
|
989
|
+
return cast(
|
|
990
|
+
"Iterator[TDomainEvent]",
|
|
991
|
+
self.events.get(
|
|
992
|
+
originator_id=self.originator_id,
|
|
993
|
+
gt=gt,
|
|
994
|
+
lte=lte,
|
|
995
|
+
desc=desc,
|
|
996
|
+
limit=limit,
|
|
997
|
+
),
|
|
998
|
+
)
|