eventsourcing 9.5.0b3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- eventsourcing/__init__.py +0 -0
- eventsourcing/application.py +998 -0
- eventsourcing/cipher.py +107 -0
- eventsourcing/compressor.py +15 -0
- eventsourcing/cryptography.py +91 -0
- eventsourcing/dcb/__init__.py +0 -0
- eventsourcing/dcb/api.py +144 -0
- eventsourcing/dcb/application.py +159 -0
- eventsourcing/dcb/domain.py +369 -0
- eventsourcing/dcb/msgpack.py +38 -0
- eventsourcing/dcb/persistence.py +193 -0
- eventsourcing/dcb/popo.py +178 -0
- eventsourcing/dcb/postgres_tt.py +704 -0
- eventsourcing/dcb/tests.py +608 -0
- eventsourcing/dispatch.py +80 -0
- eventsourcing/domain.py +1964 -0
- eventsourcing/interface.py +164 -0
- eventsourcing/persistence.py +1429 -0
- eventsourcing/popo.py +267 -0
- eventsourcing/postgres.py +1441 -0
- eventsourcing/projection.py +502 -0
- eventsourcing/py.typed +0 -0
- eventsourcing/sqlite.py +816 -0
- eventsourcing/system.py +1203 -0
- eventsourcing/tests/__init__.py +3 -0
- eventsourcing/tests/application.py +483 -0
- eventsourcing/tests/domain.py +105 -0
- eventsourcing/tests/persistence.py +1744 -0
- eventsourcing/tests/postgres_utils.py +131 -0
- eventsourcing/utils.py +257 -0
- eventsourcing-9.5.0b3.dist-info/METADATA +253 -0
- eventsourcing-9.5.0b3.dist-info/RECORD +35 -0
- eventsourcing-9.5.0b3.dist-info/WHEEL +4 -0
- eventsourcing-9.5.0b3.dist-info/licenses/AUTHORS +10 -0
- eventsourcing-9.5.0b3.dist-info/licenses/LICENSE +29 -0
eventsourcing/system.py
ADDED
|
@@ -0,0 +1,1203 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import inspect
|
|
4
|
+
import threading
|
|
5
|
+
import traceback
|
|
6
|
+
from abc import ABC, abstractmethod
|
|
7
|
+
from collections import defaultdict
|
|
8
|
+
from collections.abc import Sequence
|
|
9
|
+
from queue import Full, Queue
|
|
10
|
+
from types import FrameType, ModuleType
|
|
11
|
+
from typing import TYPE_CHECKING, Any, ClassVar, Generic, cast
|
|
12
|
+
|
|
13
|
+
from eventsourcing.application import (
|
|
14
|
+
Application,
|
|
15
|
+
NotificationLog,
|
|
16
|
+
ProgrammingError,
|
|
17
|
+
Section,
|
|
18
|
+
TApplication,
|
|
19
|
+
)
|
|
20
|
+
from eventsourcing.domain import (
|
|
21
|
+
DomainEventProtocol,
|
|
22
|
+
MutableOrImmutableAggregate,
|
|
23
|
+
TAggregateID,
|
|
24
|
+
)
|
|
25
|
+
from eventsourcing.persistence import (
|
|
26
|
+
Mapper,
|
|
27
|
+
Notification,
|
|
28
|
+
ProcessRecorder,
|
|
29
|
+
Recording,
|
|
30
|
+
Tracking,
|
|
31
|
+
)
|
|
32
|
+
from eventsourcing.projection import EventSourcedProjection
|
|
33
|
+
from eventsourcing.utils import EnvType, get_topic, resolve_topic
|
|
34
|
+
|
|
35
|
+
if TYPE_CHECKING:
|
|
36
|
+
from collections.abc import Iterable, Iterator
|
|
37
|
+
|
|
38
|
+
from typing_extensions import Self
|
|
39
|
+
|
|
40
|
+
|
|
41
|
+
ProcessingJob = tuple[DomainEventProtocol[TAggregateID], Tracking]
|
|
42
|
+
|
|
43
|
+
|
|
44
|
+
class RecordingEvent(Generic[TAggregateID]):
|
|
45
|
+
def __init__(
|
|
46
|
+
self,
|
|
47
|
+
application_name: str,
|
|
48
|
+
recordings: list[Recording[TAggregateID]],
|
|
49
|
+
previous_max_notification_id: int | None,
|
|
50
|
+
):
|
|
51
|
+
self.application_name = application_name
|
|
52
|
+
self.recordings: list[Recording[TAggregateID]] = recordings
|
|
53
|
+
self.previous_max_notification_id = previous_max_notification_id
|
|
54
|
+
|
|
55
|
+
|
|
56
|
+
ConvertingJob = RecordingEvent[TAggregateID] | Sequence[Notification] | None
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
class Follower(EventSourcedProjection[TAggregateID]):
|
|
60
|
+
"""Extends the :class:`~eventsourcing.projection.EventSourcedProjection` class
|
|
61
|
+
by pulling notification objects from its notification log readers, by converting
|
|
62
|
+
the notification objects to domain events and tracking objects and by processing
|
|
63
|
+
the reconstructed domain event objects.
|
|
64
|
+
"""
|
|
65
|
+
|
|
66
|
+
pull_section_size = 10
|
|
67
|
+
|
|
68
|
+
def __init_subclass__(cls, **kwargs: Any) -> None:
|
|
69
|
+
super().__init_subclass__(**kwargs)
|
|
70
|
+
# for backwards compatibility, set "topics" if has "follow_topics".
|
|
71
|
+
cls.topics = getattr(cls, "follow_topics", cls.topics)
|
|
72
|
+
|
|
73
|
+
def __init__(self, env: EnvType | None = None) -> None:
|
|
74
|
+
super().__init__(env)
|
|
75
|
+
self.readers: dict[str, NotificationLogReader] = {}
|
|
76
|
+
self.mappers: dict[str, Mapper[TAggregateID]] = {}
|
|
77
|
+
self.is_threading_enabled = False
|
|
78
|
+
|
|
79
|
+
def follow(self, name: str, log: NotificationLog) -> None:
|
|
80
|
+
"""Constructs a notification log reader and a mapper for
|
|
81
|
+
the named application, and adds them to its collections
|
|
82
|
+
of readers and mappers.
|
|
83
|
+
"""
|
|
84
|
+
assert isinstance(self.recorder, ProcessRecorder)
|
|
85
|
+
reader = NotificationLogReader(log, section_size=self.pull_section_size)
|
|
86
|
+
env = self.construct_env(name, self.env)
|
|
87
|
+
factory = self.construct_factory(env)
|
|
88
|
+
mapper = factory.mapper(
|
|
89
|
+
self.construct_transcoder(), mapper_class=type(self.mapper)
|
|
90
|
+
)
|
|
91
|
+
self.readers[name] = reader
|
|
92
|
+
self.mappers[name] = mapper
|
|
93
|
+
|
|
94
|
+
# @retry(IntegrityError, max_attempts=100)
|
|
95
|
+
def pull_and_process(
|
|
96
|
+
self, leader_name: str, start: int | None = None, stop: int | None = None
|
|
97
|
+
) -> None:
|
|
98
|
+
"""Pull and process new domain event notifications."""
|
|
99
|
+
if start is None:
|
|
100
|
+
start = self.recorder.max_tracking_id(leader_name)
|
|
101
|
+
for notifications in self.pull_notifications(
|
|
102
|
+
leader_name, start=start, stop=stop, inclusive_of_start=False
|
|
103
|
+
):
|
|
104
|
+
notifications_iter = self.filter_received_notifications(notifications)
|
|
105
|
+
for domain_event, tracking in self.convert_notifications(
|
|
106
|
+
leader_name, notifications_iter
|
|
107
|
+
):
|
|
108
|
+
self.process_event(domain_event, tracking)
|
|
109
|
+
|
|
110
|
+
def process_event(
|
|
111
|
+
self, domain_event: DomainEventProtocol[TAggregateID], tracking: Tracking
|
|
112
|
+
) -> None:
|
|
113
|
+
with self.processing_lock:
|
|
114
|
+
super().process_event(domain_event, tracking)
|
|
115
|
+
|
|
116
|
+
def pull_notifications(
|
|
117
|
+
self,
|
|
118
|
+
leader_name: str,
|
|
119
|
+
start: int | None,
|
|
120
|
+
stop: int | None = None,
|
|
121
|
+
*,
|
|
122
|
+
inclusive_of_start: bool = True,
|
|
123
|
+
) -> Iterator[Sequence[Notification]]:
|
|
124
|
+
"""Pulls batches of unseen :class:`~eventsourcing.persistence.Notification`
|
|
125
|
+
objects from the notification log reader of the named application.
|
|
126
|
+
"""
|
|
127
|
+
return self.readers[leader_name].select(
|
|
128
|
+
start=start,
|
|
129
|
+
stop=stop,
|
|
130
|
+
topics=self.topics,
|
|
131
|
+
inclusive_of_start=inclusive_of_start,
|
|
132
|
+
)
|
|
133
|
+
|
|
134
|
+
def filter_received_notifications(
|
|
135
|
+
self, notifications: Sequence[Notification]
|
|
136
|
+
) -> Sequence[Notification]:
|
|
137
|
+
if self.topics:
|
|
138
|
+
return [n for n in notifications if n.topic in self.topics]
|
|
139
|
+
return notifications
|
|
140
|
+
|
|
141
|
+
def convert_notifications(
|
|
142
|
+
self, leader_name: str, notifications: Iterable[Notification]
|
|
143
|
+
) -> list[ProcessingJob[TAggregateID]]:
|
|
144
|
+
"""Uses the given :class:`~eventsourcing.persistence.Mapper` to convert
|
|
145
|
+
each received :class:`~eventsourcing.persistence.Notification`
|
|
146
|
+
object to an :class:`~eventsourcing.domain.AggregateEvent` object
|
|
147
|
+
paired with a :class:`~eventsourcing.persistence.Tracking` object.
|
|
148
|
+
"""
|
|
149
|
+
mapper = self.mappers[leader_name]
|
|
150
|
+
processing_jobs = []
|
|
151
|
+
for notification in notifications:
|
|
152
|
+
domain_event: DomainEventProtocol[TAggregateID] = mapper.to_domain_event(
|
|
153
|
+
notification
|
|
154
|
+
)
|
|
155
|
+
tracking = Tracking(
|
|
156
|
+
application_name=leader_name,
|
|
157
|
+
notification_id=notification.id,
|
|
158
|
+
)
|
|
159
|
+
processing_jobs.append((domain_event, tracking))
|
|
160
|
+
return processing_jobs
|
|
161
|
+
|
|
162
|
+
|
|
163
|
+
class RecordingEventReceiver(ABC, Generic[TAggregateID]):
|
|
164
|
+
"""Abstract base class for objects that may receive recording events."""
|
|
165
|
+
|
|
166
|
+
@abstractmethod
|
|
167
|
+
def receive_recording_event(
|
|
168
|
+
self, new_recording_event: RecordingEvent[TAggregateID]
|
|
169
|
+
) -> None:
|
|
170
|
+
"""Receives a recording event."""
|
|
171
|
+
|
|
172
|
+
|
|
173
|
+
class Leader(Application[TAggregateID]):
|
|
174
|
+
"""Extends the :class:`~eventsourcing.application.Application`
|
|
175
|
+
class by also being responsible for keeping track of
|
|
176
|
+
followers, and prompting followers when there are new
|
|
177
|
+
domain event notifications to be pulled and processed.
|
|
178
|
+
"""
|
|
179
|
+
|
|
180
|
+
def __init__(self, env: EnvType | None = None) -> None:
|
|
181
|
+
super().__init__(env)
|
|
182
|
+
self.previous_max_notification_id: int | None = None
|
|
183
|
+
self.followers: list[RecordingEventReceiver[TAggregateID]] = []
|
|
184
|
+
|
|
185
|
+
def lead(self, follower: RecordingEventReceiver[TAggregateID]) -> None:
|
|
186
|
+
"""Adds given follower to a list of followers."""
|
|
187
|
+
self.followers.append(follower)
|
|
188
|
+
|
|
189
|
+
def save(
|
|
190
|
+
self,
|
|
191
|
+
*objs: MutableOrImmutableAggregate[TAggregateID]
|
|
192
|
+
| DomainEventProtocol[TAggregateID]
|
|
193
|
+
| None,
|
|
194
|
+
**kwargs: Any,
|
|
195
|
+
) -> list[Recording[TAggregateID]]:
|
|
196
|
+
if self.previous_max_notification_id is None:
|
|
197
|
+
self.previous_max_notification_id = self.recorder.max_notification_id()
|
|
198
|
+
return super().save(*objs, **kwargs)
|
|
199
|
+
|
|
200
|
+
def _notify(self, recordings: list[Recording[TAggregateID]]) -> None:
|
|
201
|
+
"""Calls :func:`receive_recording_event` on each follower
|
|
202
|
+
whenever new events have just been saved.
|
|
203
|
+
"""
|
|
204
|
+
super()._notify(recordings)
|
|
205
|
+
if self.notify_topics:
|
|
206
|
+
recordings = [
|
|
207
|
+
r for r in recordings if r.notification.topic in self.notify_topics
|
|
208
|
+
]
|
|
209
|
+
if recordings:
|
|
210
|
+
recording_event = RecordingEvent(
|
|
211
|
+
application_name=self.name,
|
|
212
|
+
recordings=recordings,
|
|
213
|
+
previous_max_notification_id=self.previous_max_notification_id,
|
|
214
|
+
)
|
|
215
|
+
self.previous_max_notification_id = recordings[-1].notification.id
|
|
216
|
+
for follower in self.followers:
|
|
217
|
+
follower.receive_recording_event(recording_event)
|
|
218
|
+
|
|
219
|
+
|
|
220
|
+
class ProcessApplication(Leader[TAggregateID], Follower[TAggregateID]):
|
|
221
|
+
"""Base class for event processing applications
|
|
222
|
+
that are both "leaders" and followers".
|
|
223
|
+
"""
|
|
224
|
+
|
|
225
|
+
|
|
226
|
+
class System:
|
|
227
|
+
"""Defines a system of applications."""
|
|
228
|
+
|
|
229
|
+
__caller_modules: ClassVar[dict[int, ModuleType]] = {}
|
|
230
|
+
|
|
231
|
+
def __init__(
|
|
232
|
+
self,
|
|
233
|
+
pipes: Iterable[Iterable[type[Application[Any]]]],
|
|
234
|
+
):
|
|
235
|
+
# Remember the caller frame's module, so that we might identify a topic.
|
|
236
|
+
caller_frame = cast(FrameType, inspect.currentframe()).f_back
|
|
237
|
+
module = cast(ModuleType, inspect.getmodule(caller_frame))
|
|
238
|
+
type(self).__caller_modules[id(self)] = module # noqa: SLF001
|
|
239
|
+
|
|
240
|
+
# Build nodes and edges.
|
|
241
|
+
self.edges: list[tuple[str, str]] = []
|
|
242
|
+
classes: dict[str, type[Application[Any]]] = {}
|
|
243
|
+
for pipe in pipes:
|
|
244
|
+
follower_cls = None
|
|
245
|
+
for cls in pipe:
|
|
246
|
+
classes[cls.name] = cls
|
|
247
|
+
if follower_cls is None:
|
|
248
|
+
follower_cls = cls
|
|
249
|
+
else:
|
|
250
|
+
leader_cls = follower_cls
|
|
251
|
+
follower_cls = cls
|
|
252
|
+
edge = (leader_cls.name, follower_cls.name)
|
|
253
|
+
if edge not in self.edges:
|
|
254
|
+
self.edges.append(edge)
|
|
255
|
+
|
|
256
|
+
self.nodes: dict[str, str] = {}
|
|
257
|
+
for name, cls in classes.items():
|
|
258
|
+
self.nodes[name] = get_topic(cls)
|
|
259
|
+
|
|
260
|
+
# Identify leaders and followers.
|
|
261
|
+
self.follows: dict[str, list[str]] = defaultdict(list)
|
|
262
|
+
self.leads: dict[str, list[str]] = defaultdict(list)
|
|
263
|
+
for edge in self.edges:
|
|
264
|
+
self.leads[edge[0]].append(edge[1])
|
|
265
|
+
self.follows[edge[1]].append(edge[0])
|
|
266
|
+
|
|
267
|
+
# Identify singles.
|
|
268
|
+
self.singles = []
|
|
269
|
+
for name in classes:
|
|
270
|
+
if name not in self.leads and name not in self.follows:
|
|
271
|
+
self.singles.append(name)
|
|
272
|
+
|
|
273
|
+
# Check followers are followers.
|
|
274
|
+
for name in self.follows:
|
|
275
|
+
if not issubclass(classes[name], Follower):
|
|
276
|
+
msg = f"Not a follower class: {classes[name]}"
|
|
277
|
+
raise TypeError(msg)
|
|
278
|
+
|
|
279
|
+
# Check each process is a process application class.
|
|
280
|
+
for name in self.processors:
|
|
281
|
+
if not issubclass(classes[name], ProcessApplication):
|
|
282
|
+
msg = f"Not a process application class: {classes[name]}"
|
|
283
|
+
raise TypeError(msg)
|
|
284
|
+
|
|
285
|
+
@property
|
|
286
|
+
def leaders(self) -> list[str]:
|
|
287
|
+
return list(self.leads.keys())
|
|
288
|
+
|
|
289
|
+
@property
|
|
290
|
+
def leaders_only(self) -> list[str]:
|
|
291
|
+
return [name for name in self.leads if name not in self.follows]
|
|
292
|
+
|
|
293
|
+
@property
|
|
294
|
+
def followers(self) -> list[str]:
|
|
295
|
+
return list(self.follows.keys())
|
|
296
|
+
|
|
297
|
+
@property
|
|
298
|
+
def processors(self) -> list[str]:
|
|
299
|
+
return [name for name in self.leads if name in self.follows]
|
|
300
|
+
|
|
301
|
+
def get_app_cls(self, name: str) -> type[Application[Any]]:
|
|
302
|
+
cls = resolve_topic(self.nodes[name])
|
|
303
|
+
assert issubclass(cls, Application)
|
|
304
|
+
return cls
|
|
305
|
+
|
|
306
|
+
def leader_cls(self, name: str) -> type[Leader[Any]]:
|
|
307
|
+
cls = self.get_app_cls(name)
|
|
308
|
+
if issubclass(cls, Leader):
|
|
309
|
+
return cls
|
|
310
|
+
cls = type(cls.name, (Leader, cls), {})
|
|
311
|
+
assert issubclass(cls, Leader)
|
|
312
|
+
return cls
|
|
313
|
+
|
|
314
|
+
def follower_cls(self, name: str) -> type[Follower[Any]]:
|
|
315
|
+
cls = self.get_app_cls(name)
|
|
316
|
+
assert issubclass(cls, Follower)
|
|
317
|
+
return cls
|
|
318
|
+
|
|
319
|
+
@property
|
|
320
|
+
def topic(self) -> str:
|
|
321
|
+
"""
|
|
322
|
+
Returns a topic to the system object, if constructed as a module attribute.
|
|
323
|
+
"""
|
|
324
|
+
topic: str | None = None
|
|
325
|
+
module = System.__caller_modules[id(self)]
|
|
326
|
+
for name, value in module.__dict__.items():
|
|
327
|
+
if value is self:
|
|
328
|
+
topic = module.__name__ + ":" + name
|
|
329
|
+
assert resolve_topic(topic) is self
|
|
330
|
+
if topic is None:
|
|
331
|
+
msg = f"Unable to compute topic for system object: {self}"
|
|
332
|
+
raise ProgrammingError(msg)
|
|
333
|
+
return topic
|
|
334
|
+
|
|
335
|
+
|
|
336
|
+
class Runner(ABC, Generic[TAggregateID]):
|
|
337
|
+
"""Abstract base class for system runners."""
|
|
338
|
+
|
|
339
|
+
def __init__(self, system: System, env: EnvType | None = None):
|
|
340
|
+
self.system = system
|
|
341
|
+
self.env = env
|
|
342
|
+
self.is_started = False
|
|
343
|
+
|
|
344
|
+
@abstractmethod
|
|
345
|
+
def start(self) -> None:
|
|
346
|
+
"""Starts the runner."""
|
|
347
|
+
if self.is_started:
|
|
348
|
+
raise RunnerAlreadyStartedError
|
|
349
|
+
self.is_started = True
|
|
350
|
+
|
|
351
|
+
@abstractmethod
|
|
352
|
+
def stop(self) -> None:
|
|
353
|
+
"""Stops the runner."""
|
|
354
|
+
|
|
355
|
+
@abstractmethod
|
|
356
|
+
def get(self, cls: type[TApplication]) -> TApplication:
|
|
357
|
+
"""Returns an application instance for given application class."""
|
|
358
|
+
|
|
359
|
+
def __enter__(self) -> Self:
|
|
360
|
+
self.start()
|
|
361
|
+
return self
|
|
362
|
+
|
|
363
|
+
def __exit__(self, *args: object, **kwargs: Any) -> None:
|
|
364
|
+
self.stop()
|
|
365
|
+
|
|
366
|
+
|
|
367
|
+
class RunnerAlreadyStartedError(Exception):
|
|
368
|
+
"""Raised when runner is already started."""
|
|
369
|
+
|
|
370
|
+
|
|
371
|
+
class NotificationPullingError(Exception):
|
|
372
|
+
"""Raised when pulling notifications fails."""
|
|
373
|
+
|
|
374
|
+
|
|
375
|
+
class NotificationConvertingError(Exception):
|
|
376
|
+
"""Raised when converting notifications fails."""
|
|
377
|
+
|
|
378
|
+
|
|
379
|
+
class EventProcessingError(Exception):
|
|
380
|
+
"""Raised when event processing fails."""
|
|
381
|
+
|
|
382
|
+
|
|
383
|
+
class SingleThreadedRunner(Runner[TAggregateID], RecordingEventReceiver[TAggregateID]):
|
|
384
|
+
"""Runs a :class:`System` in a single thread."""
|
|
385
|
+
|
|
386
|
+
def __init__(self, system: System, env: EnvType | None = None):
|
|
387
|
+
"""Initialises runner with the given :class:`System`."""
|
|
388
|
+
super().__init__(system=system, env=env)
|
|
389
|
+
self.apps: dict[str, Application[TAggregateID]] = {}
|
|
390
|
+
self._recording_events_received: list[RecordingEvent[TAggregateID]] = []
|
|
391
|
+
self._prompted_names_lock = threading.Lock()
|
|
392
|
+
self._prompted_names: set[str] = set()
|
|
393
|
+
self._processing_lock = threading.Lock()
|
|
394
|
+
|
|
395
|
+
# Construct followers.
|
|
396
|
+
for name in self.system.followers:
|
|
397
|
+
self.apps[name] = self.system.follower_cls(name)(env=self.env)
|
|
398
|
+
|
|
399
|
+
# Construct leaders.
|
|
400
|
+
for name in self.system.leaders_only:
|
|
401
|
+
leader = self.system.leader_cls(name)(env=self.env)
|
|
402
|
+
self.apps[name] = leader
|
|
403
|
+
|
|
404
|
+
# Construct singles.
|
|
405
|
+
for name in self.system.singles:
|
|
406
|
+
single = self.system.get_app_cls(name)(env=self.env)
|
|
407
|
+
self.apps[name] = single
|
|
408
|
+
|
|
409
|
+
def start(self) -> None:
|
|
410
|
+
"""Starts the runner. The applications mentioned in the system definition
|
|
411
|
+
are constructed. The followers are set up to follow the applications
|
|
412
|
+
they are defined as following in the system definition. And the leaders
|
|
413
|
+
are set up to lead the runner itself.
|
|
414
|
+
"""
|
|
415
|
+
super().start()
|
|
416
|
+
|
|
417
|
+
# Setup followers to follow leaders.
|
|
418
|
+
for edge in self.system.edges:
|
|
419
|
+
leader_name = edge[0]
|
|
420
|
+
follower_name = edge[1]
|
|
421
|
+
leader = cast("Leader[Any]", self.apps[leader_name])
|
|
422
|
+
follower = cast(Follower[Any], self.apps[follower_name])
|
|
423
|
+
assert isinstance(leader, Leader)
|
|
424
|
+
assert isinstance(follower, Follower)
|
|
425
|
+
follower.follow(leader_name, leader.notification_log)
|
|
426
|
+
|
|
427
|
+
# Setup leaders to lead this runner.
|
|
428
|
+
for name in self.system.leaders:
|
|
429
|
+
leader = cast("Leader[Any]", self.apps[name])
|
|
430
|
+
assert isinstance(leader, Leader)
|
|
431
|
+
leader.lead(self)
|
|
432
|
+
|
|
433
|
+
def receive_recording_event(
|
|
434
|
+
self, new_recording_event: RecordingEvent[TAggregateID]
|
|
435
|
+
) -> None:
|
|
436
|
+
"""Receives recording event by appending the name of the leader
|
|
437
|
+
to a list of prompted names.
|
|
438
|
+
|
|
439
|
+
Then, unless this method has previously been called and not yet returned,
|
|
440
|
+
each of the prompted names is resolved to a leader application, and its
|
|
441
|
+
followers pull and process events from that application. This may lead to
|
|
442
|
+
further names being added to the list of prompted names. This process
|
|
443
|
+
continues until there are no more prompted names. In this way, a system
|
|
444
|
+
of applications will process all events in a single thread.
|
|
445
|
+
"""
|
|
446
|
+
leader_name = new_recording_event.application_name
|
|
447
|
+
with self._prompted_names_lock:
|
|
448
|
+
self._prompted_names.add(leader_name)
|
|
449
|
+
|
|
450
|
+
if self._processing_lock.acquire(blocking=False):
|
|
451
|
+
try:
|
|
452
|
+
while True:
|
|
453
|
+
with self._prompted_names_lock:
|
|
454
|
+
prompted_names = self._prompted_names
|
|
455
|
+
self._prompted_names = set()
|
|
456
|
+
|
|
457
|
+
if not prompted_names:
|
|
458
|
+
break
|
|
459
|
+
|
|
460
|
+
for leader_name in prompted_names:
|
|
461
|
+
for follower_name in self.system.leads[leader_name]:
|
|
462
|
+
follower = cast(Follower[Any], self.apps[follower_name])
|
|
463
|
+
follower.pull_and_process(leader_name)
|
|
464
|
+
|
|
465
|
+
finally:
|
|
466
|
+
self._processing_lock.release()
|
|
467
|
+
|
|
468
|
+
def stop(self) -> None:
|
|
469
|
+
for app in self.apps.values():
|
|
470
|
+
app.close()
|
|
471
|
+
self.apps.clear()
|
|
472
|
+
|
|
473
|
+
def get(self, cls: type[TApplication]) -> TApplication:
|
|
474
|
+
app = self.apps[cls.name]
|
|
475
|
+
assert isinstance(app, cls)
|
|
476
|
+
return app
|
|
477
|
+
|
|
478
|
+
|
|
479
|
+
class NewSingleThreadedRunner(
|
|
480
|
+
Runner[TAggregateID], RecordingEventReceiver[TAggregateID]
|
|
481
|
+
):
|
|
482
|
+
"""Runs a :class:`System` in a single thread."""
|
|
483
|
+
|
|
484
|
+
def __init__(self, system: System, env: EnvType | None = None):
|
|
485
|
+
"""Initialises runner with the given :class:`System`."""
|
|
486
|
+
super().__init__(system=system, env=env)
|
|
487
|
+
self.apps: dict[str, Application[Any]] = {}
|
|
488
|
+
self._recording_events_received: list[RecordingEvent[TAggregateID]] = []
|
|
489
|
+
self._recording_events_received_lock = threading.Lock()
|
|
490
|
+
self._processing_lock = threading.Lock()
|
|
491
|
+
self._previous_max_notification_ids: dict[str, int] = {}
|
|
492
|
+
|
|
493
|
+
# Construct followers.
|
|
494
|
+
for name in self.system.followers:
|
|
495
|
+
self.apps[name] = self.system.follower_cls(name)(env=self.env)
|
|
496
|
+
|
|
497
|
+
# Construct leaders.
|
|
498
|
+
for name in self.system.leaders_only:
|
|
499
|
+
leader = self.system.leader_cls(name)(env=self.env)
|
|
500
|
+
self.apps[name] = leader
|
|
501
|
+
|
|
502
|
+
# Construct singles.
|
|
503
|
+
for name in self.system.singles:
|
|
504
|
+
single = self.system.get_app_cls(name)(env=self.env)
|
|
505
|
+
self.apps[name] = single
|
|
506
|
+
|
|
507
|
+
def start(self) -> None:
|
|
508
|
+
"""Starts the runner.
|
|
509
|
+
The applications are constructed, and setup to lead and follow
|
|
510
|
+
each other, according to the system definition.
|
|
511
|
+
The followers are setup to follow the applications they follow
|
|
512
|
+
(have a notification log reader with the notification log of the
|
|
513
|
+
leader), and their leaders are setup to lead the runner itself
|
|
514
|
+
(send prompts).
|
|
515
|
+
"""
|
|
516
|
+
super().start()
|
|
517
|
+
|
|
518
|
+
# Setup followers to follow leaders.
|
|
519
|
+
for edge in self.system.edges:
|
|
520
|
+
leader_name = edge[0]
|
|
521
|
+
follower_name = edge[1]
|
|
522
|
+
leader = cast("Leader[Any]", self.apps[leader_name])
|
|
523
|
+
follower = cast(Follower[Any], self.apps[follower_name])
|
|
524
|
+
assert isinstance(leader, Leader)
|
|
525
|
+
assert isinstance(follower, Follower)
|
|
526
|
+
follower.follow(leader_name, leader.notification_log)
|
|
527
|
+
|
|
528
|
+
# Setup leaders to notify followers.
|
|
529
|
+
for name in self.system.leaders:
|
|
530
|
+
leader = cast("Leader[Any]", self.apps[name])
|
|
531
|
+
assert isinstance(leader, Leader)
|
|
532
|
+
leader.lead(self)
|
|
533
|
+
|
|
534
|
+
def receive_recording_event(
|
|
535
|
+
self, new_recording_event: RecordingEvent[TAggregateID]
|
|
536
|
+
) -> None:
|
|
537
|
+
"""Receives recording event by appending it to list of received recording
|
|
538
|
+
events.
|
|
539
|
+
|
|
540
|
+
Unless this method has previously been called and not yet returned, it
|
|
541
|
+
will then attempt to make the followers process all received recording
|
|
542
|
+
events, until there are none remaining.
|
|
543
|
+
"""
|
|
544
|
+
with self._recording_events_received_lock:
|
|
545
|
+
self._recording_events_received.append(new_recording_event)
|
|
546
|
+
|
|
547
|
+
if self._processing_lock.acquire(blocking=False):
|
|
548
|
+
try:
|
|
549
|
+
while True:
|
|
550
|
+
with self._recording_events_received_lock:
|
|
551
|
+
recording_events = self._recording_events_received
|
|
552
|
+
self._recording_events_received = []
|
|
553
|
+
|
|
554
|
+
if not recording_events:
|
|
555
|
+
break
|
|
556
|
+
|
|
557
|
+
for recording_event in recording_events:
|
|
558
|
+
leader_name = recording_event.application_name
|
|
559
|
+
previous_max_notification_id = (
|
|
560
|
+
self._previous_max_notification_ids.get(leader_name, 0)
|
|
561
|
+
)
|
|
562
|
+
|
|
563
|
+
# Ignore recording event if already seen a subsequent.
|
|
564
|
+
if (
|
|
565
|
+
recording_event.previous_max_notification_id is not None
|
|
566
|
+
and recording_event.previous_max_notification_id
|
|
567
|
+
< previous_max_notification_id
|
|
568
|
+
):
|
|
569
|
+
continue
|
|
570
|
+
|
|
571
|
+
# Catch up if there is a gap in sequence of recording events.
|
|
572
|
+
if (
|
|
573
|
+
recording_event.previous_max_notification_id is None
|
|
574
|
+
or recording_event.previous_max_notification_id
|
|
575
|
+
> previous_max_notification_id
|
|
576
|
+
):
|
|
577
|
+
for follower_name in self.system.leads[leader_name]:
|
|
578
|
+
follower = self.apps[follower_name]
|
|
579
|
+
assert isinstance(follower, Follower)
|
|
580
|
+
start = follower.recorder.max_tracking_id(leader_name)
|
|
581
|
+
stop = recording_event.recordings[0].notification.id - 1
|
|
582
|
+
follower.pull_and_process(
|
|
583
|
+
leader_name=leader_name,
|
|
584
|
+
start=start,
|
|
585
|
+
stop=stop,
|
|
586
|
+
)
|
|
587
|
+
for recording in recording_event.recordings:
|
|
588
|
+
for follower_name in self.system.leads[leader_name]:
|
|
589
|
+
follower = self.apps[follower_name]
|
|
590
|
+
assert isinstance(follower, Follower)
|
|
591
|
+
if (
|
|
592
|
+
follower.topics
|
|
593
|
+
and recording.notification.topic
|
|
594
|
+
not in follower.topics
|
|
595
|
+
):
|
|
596
|
+
continue
|
|
597
|
+
follower.process_event(
|
|
598
|
+
domain_event=recording.domain_event,
|
|
599
|
+
tracking=Tracking(
|
|
600
|
+
application_name=recording_event.application_name,
|
|
601
|
+
notification_id=recording.notification.id,
|
|
602
|
+
),
|
|
603
|
+
)
|
|
604
|
+
|
|
605
|
+
self._previous_max_notification_ids[leader_name] = (
|
|
606
|
+
recording_event.recordings[-1].notification.id
|
|
607
|
+
)
|
|
608
|
+
|
|
609
|
+
finally:
|
|
610
|
+
self._processing_lock.release()
|
|
611
|
+
|
|
612
|
+
def stop(self) -> None:
|
|
613
|
+
for app in self.apps.values():
|
|
614
|
+
app.close()
|
|
615
|
+
self.apps.clear()
|
|
616
|
+
|
|
617
|
+
def get(self, cls: type[TApplication]) -> TApplication:
|
|
618
|
+
app = self.apps[cls.name]
|
|
619
|
+
assert isinstance(app, cls)
|
|
620
|
+
return app
|
|
621
|
+
|
|
622
|
+
|
|
623
|
+
class MultiThreadedRunner(Runner[TAggregateID]):
|
|
624
|
+
"""Runs a :class:`System` with one :class:`MultiThreadedRunnerThread`
|
|
625
|
+
for each :class:`Follower` in the system definition.
|
|
626
|
+
"""
|
|
627
|
+
|
|
628
|
+
def __init__(self, system: System, env: EnvType | None = None):
|
|
629
|
+
"""Initialises runner with the given :class:`System`."""
|
|
630
|
+
super().__init__(system=system, env=env)
|
|
631
|
+
self.apps: dict[str, Application[Any]] = {}
|
|
632
|
+
self.threads: dict[str, MultiThreadedRunnerThread[TAggregateID]] = {}
|
|
633
|
+
self.has_errored = threading.Event()
|
|
634
|
+
|
|
635
|
+
# Construct followers.
|
|
636
|
+
for follower_name in self.system.followers:
|
|
637
|
+
follower_class = self.system.follower_cls(follower_name)
|
|
638
|
+
try:
|
|
639
|
+
follower = follower_class(env=self.env)
|
|
640
|
+
except Exception:
|
|
641
|
+
self.has_errored.set()
|
|
642
|
+
raise
|
|
643
|
+
self.apps[follower_name] = follower
|
|
644
|
+
|
|
645
|
+
# Construct non-follower leaders.
|
|
646
|
+
for leader_name in self.system.leaders_only:
|
|
647
|
+
self.apps[leader_name] = self.system.leader_cls(leader_name)(env=self.env)
|
|
648
|
+
|
|
649
|
+
# Construct singles.
|
|
650
|
+
for name in self.system.singles:
|
|
651
|
+
single = self.system.get_app_cls(name)(env=self.env)
|
|
652
|
+
self.apps[name] = single
|
|
653
|
+
|
|
654
|
+
def start(self) -> None:
|
|
655
|
+
"""Starts the runner.
|
|
656
|
+
A multi-threaded runner thread is started for each
|
|
657
|
+
'follower' application in the system, and constructs
|
|
658
|
+
an instance of each non-follower leader application in
|
|
659
|
+
the system. The followers are then setup to follow the
|
|
660
|
+
applications they follow (have a notification log reader
|
|
661
|
+
with the notification log of the leader), and their leaders
|
|
662
|
+
are setup to lead the follower's thead (send prompts).
|
|
663
|
+
"""
|
|
664
|
+
super().start()
|
|
665
|
+
|
|
666
|
+
# Construct followers.
|
|
667
|
+
thread: MultiThreadedRunnerThread[TAggregateID]
|
|
668
|
+
for follower_name in self.system.followers:
|
|
669
|
+
follower = cast(Follower[Any], self.apps[follower_name])
|
|
670
|
+
|
|
671
|
+
thread = MultiThreadedRunnerThread(
|
|
672
|
+
follower=follower,
|
|
673
|
+
has_errored=self.has_errored,
|
|
674
|
+
)
|
|
675
|
+
self.threads[follower.name] = thread
|
|
676
|
+
thread.start()
|
|
677
|
+
|
|
678
|
+
# Wait until all the threads have started.
|
|
679
|
+
for thread in self.threads.values():
|
|
680
|
+
thread.has_started.wait()
|
|
681
|
+
|
|
682
|
+
# Lead and follow.
|
|
683
|
+
for edge in self.system.edges:
|
|
684
|
+
leader = cast("Leader[Any]", self.apps[edge[0]])
|
|
685
|
+
follower = cast(Follower[Any], self.apps[edge[1]])
|
|
686
|
+
follower.follow(leader.name, leader.notification_log)
|
|
687
|
+
thread = self.threads[follower.name]
|
|
688
|
+
leader.lead(thread)
|
|
689
|
+
|
|
690
|
+
def watch_for_errors(self, timeout: float | None = None) -> bool:
|
|
691
|
+
if self.has_errored.wait(timeout=timeout):
|
|
692
|
+
self.stop()
|
|
693
|
+
return self.has_errored.is_set()
|
|
694
|
+
|
|
695
|
+
def stop(self) -> None:
|
|
696
|
+
threads = self.threads.values()
|
|
697
|
+
for thread in threads:
|
|
698
|
+
thread.stop()
|
|
699
|
+
for thread in threads:
|
|
700
|
+
thread.join(timeout=2)
|
|
701
|
+
for app in self.apps.values():
|
|
702
|
+
app.close()
|
|
703
|
+
self.apps.clear()
|
|
704
|
+
self.reraise_thread_errors()
|
|
705
|
+
|
|
706
|
+
def reraise_thread_errors(self) -> None:
|
|
707
|
+
for thread in self.threads.values():
|
|
708
|
+
if thread.error:
|
|
709
|
+
raise thread.error
|
|
710
|
+
|
|
711
|
+
def get(self, cls: type[TApplication]) -> TApplication:
|
|
712
|
+
app = self.apps[cls.name]
|
|
713
|
+
assert isinstance(app, cls)
|
|
714
|
+
return app
|
|
715
|
+
|
|
716
|
+
|
|
717
|
+
class MultiThreadedRunnerThread(RecordingEventReceiver[TAggregateID], threading.Thread):
|
|
718
|
+
"""Runs one :class:`~eventsourcing.system.Follower` application in
|
|
719
|
+
a :class:`~eventsourcing.system.MultiThreadedRunner`.
|
|
720
|
+
"""
|
|
721
|
+
|
|
722
|
+
def __init__(
|
|
723
|
+
self,
|
|
724
|
+
follower: Follower[Any],
|
|
725
|
+
has_errored: threading.Event,
|
|
726
|
+
):
|
|
727
|
+
super().__init__(daemon=True)
|
|
728
|
+
self.follower = follower
|
|
729
|
+
self.has_errored = has_errored
|
|
730
|
+
self.error: Exception | None = None
|
|
731
|
+
self.is_stopping = threading.Event()
|
|
732
|
+
self.has_started = threading.Event()
|
|
733
|
+
self.is_prompted = threading.Event()
|
|
734
|
+
self.prompted_names: list[str] = []
|
|
735
|
+
self.prompted_names_lock = threading.Lock()
|
|
736
|
+
self.is_running = threading.Event()
|
|
737
|
+
|
|
738
|
+
def run(self) -> None:
|
|
739
|
+
"""Loops forever until stopped. The loop blocks on waiting
|
|
740
|
+
for the 'is_prompted' event to be set, then calls
|
|
741
|
+
:func:`~Follower.pull_and_process` method for each
|
|
742
|
+
prompted name.
|
|
743
|
+
"""
|
|
744
|
+
self.has_started.set()
|
|
745
|
+
|
|
746
|
+
try:
|
|
747
|
+
while not self.is_stopping.is_set():
|
|
748
|
+
self.is_prompted.wait()
|
|
749
|
+
|
|
750
|
+
with self.prompted_names_lock:
|
|
751
|
+
prompted_names = self.prompted_names
|
|
752
|
+
self.prompted_names = []
|
|
753
|
+
self.is_prompted.clear()
|
|
754
|
+
for name in prompted_names:
|
|
755
|
+
self.follower.pull_and_process(name)
|
|
756
|
+
except Exception as e:
|
|
757
|
+
self.error = EventProcessingError(str(e))
|
|
758
|
+
self.error.__cause__ = e
|
|
759
|
+
self.has_errored.set()
|
|
760
|
+
|
|
761
|
+
def receive_recording_event(
|
|
762
|
+
self, new_recording_event: RecordingEvent[TAggregateID]
|
|
763
|
+
) -> None:
|
|
764
|
+
"""Receives prompt by appending name of
|
|
765
|
+
leader to list of prompted names.
|
|
766
|
+
"""
|
|
767
|
+
leader_name = new_recording_event.application_name
|
|
768
|
+
with self.prompted_names_lock:
|
|
769
|
+
if leader_name not in self.prompted_names:
|
|
770
|
+
self.prompted_names.append(leader_name)
|
|
771
|
+
self.is_prompted.set()
|
|
772
|
+
|
|
773
|
+
def stop(self) -> None:
|
|
774
|
+
self.is_stopping.set()
|
|
775
|
+
self.is_prompted.set()
|
|
776
|
+
|
|
777
|
+
|
|
778
|
+
class NewMultiThreadedRunner(
|
|
779
|
+
Runner[TAggregateID], RecordingEventReceiver[TAggregateID]
|
|
780
|
+
):
|
|
781
|
+
"""Runs a :class:`System` with multiple threads in a new way."""
|
|
782
|
+
|
|
783
|
+
QUEUE_MAX_SIZE: int = 0
|
|
784
|
+
|
|
785
|
+
def __init__(
|
|
786
|
+
self,
|
|
787
|
+
system: System,
|
|
788
|
+
env: EnvType | None = None,
|
|
789
|
+
):
|
|
790
|
+
"""Initialises runner with the given :class:`System`."""
|
|
791
|
+
super().__init__(system=system, env=env)
|
|
792
|
+
self.apps: dict[str, Application[TAggregateID]] = {}
|
|
793
|
+
self.pulling_threads: dict[str, list[PullingThread[TAggregateID]]] = {}
|
|
794
|
+
self.processing_queues: dict[
|
|
795
|
+
str, Queue[list[ProcessingJob[TAggregateID]] | None]
|
|
796
|
+
] = {}
|
|
797
|
+
self.all_threads: list[
|
|
798
|
+
PullingThread[TAggregateID]
|
|
799
|
+
| ConvertingThread[TAggregateID]
|
|
800
|
+
| ProcessingThread[TAggregateID]
|
|
801
|
+
] = []
|
|
802
|
+
self.has_errored = threading.Event()
|
|
803
|
+
|
|
804
|
+
# Construct followers.
|
|
805
|
+
for follower_name in self.system.followers:
|
|
806
|
+
follower_class = self.system.follower_cls(follower_name)
|
|
807
|
+
try:
|
|
808
|
+
follower = follower_class(env=self.env)
|
|
809
|
+
except Exception:
|
|
810
|
+
self.has_errored.set()
|
|
811
|
+
raise
|
|
812
|
+
self.apps[follower_name] = follower
|
|
813
|
+
|
|
814
|
+
# Construct non-follower leaders.
|
|
815
|
+
for leader_name in self.system.leaders_only:
|
|
816
|
+
self.apps[leader_name] = self.system.leader_cls(leader_name)(env=self.env)
|
|
817
|
+
|
|
818
|
+
# Construct singles.
|
|
819
|
+
for name in self.system.singles:
|
|
820
|
+
single = self.system.get_app_cls(name)(env=self.env)
|
|
821
|
+
self.apps[name] = single
|
|
822
|
+
|
|
823
|
+
def start(self) -> None:
|
|
824
|
+
"""Starts the runner.
|
|
825
|
+
|
|
826
|
+
A multi-threaded runner thread is started for each
|
|
827
|
+
'follower' application in the system, and constructs
|
|
828
|
+
an instance of each non-follower leader application in
|
|
829
|
+
the system. The followers are then setup to follow the
|
|
830
|
+
applications they follow (have a notification log reader
|
|
831
|
+
with the notification log of the leader), and their leaders
|
|
832
|
+
are setup to lead the follower's thead (send prompts).
|
|
833
|
+
"""
|
|
834
|
+
super().start()
|
|
835
|
+
|
|
836
|
+
# Start the processing threads.
|
|
837
|
+
for follower_name in self.system.followers:
|
|
838
|
+
follower = cast(Follower[Any], self.apps[follower_name])
|
|
839
|
+
processing_queue: Queue[list[ProcessingJob[TAggregateID]] | None] = Queue(
|
|
840
|
+
maxsize=self.QUEUE_MAX_SIZE
|
|
841
|
+
)
|
|
842
|
+
self.processing_queues[follower_name] = processing_queue
|
|
843
|
+
processing_thread = ProcessingThread(
|
|
844
|
+
processing_queue=processing_queue,
|
|
845
|
+
follower=follower,
|
|
846
|
+
has_errored=self.has_errored,
|
|
847
|
+
)
|
|
848
|
+
self.all_threads.append(processing_thread)
|
|
849
|
+
processing_thread.start()
|
|
850
|
+
|
|
851
|
+
for edge in self.system.edges:
|
|
852
|
+
# Set up follower to pull notifications from leader.
|
|
853
|
+
leader_name = edge[0]
|
|
854
|
+
leader = cast("Leader[Any]", self.apps[leader_name])
|
|
855
|
+
follower_name = edge[1]
|
|
856
|
+
follower = cast(Follower[Any], self.apps[follower_name])
|
|
857
|
+
follower.follow(leader.name, leader.notification_log)
|
|
858
|
+
|
|
859
|
+
# Create converting queue.
|
|
860
|
+
converting_queue: Queue[ConvertingJob[TAggregateID]] = Queue(
|
|
861
|
+
maxsize=self.QUEUE_MAX_SIZE
|
|
862
|
+
)
|
|
863
|
+
|
|
864
|
+
# Start converting thread.
|
|
865
|
+
converting_thread = ConvertingThread(
|
|
866
|
+
converting_queue=converting_queue,
|
|
867
|
+
processing_queue=self.processing_queues[follower_name],
|
|
868
|
+
follower=follower,
|
|
869
|
+
leader_name=leader_name,
|
|
870
|
+
has_errored=self.has_errored,
|
|
871
|
+
)
|
|
872
|
+
self.all_threads.append(converting_thread)
|
|
873
|
+
converting_thread.start()
|
|
874
|
+
|
|
875
|
+
# Start pulling thread.
|
|
876
|
+
pulling_thread = PullingThread(
|
|
877
|
+
converting_queue=converting_queue,
|
|
878
|
+
follower=follower,
|
|
879
|
+
leader_name=leader_name,
|
|
880
|
+
has_errored=self.has_errored,
|
|
881
|
+
)
|
|
882
|
+
self.all_threads.append(pulling_thread)
|
|
883
|
+
pulling_thread.start()
|
|
884
|
+
if leader_name not in self.pulling_threads:
|
|
885
|
+
self.pulling_threads[leader_name] = []
|
|
886
|
+
self.pulling_threads[leader_name].append(pulling_thread)
|
|
887
|
+
|
|
888
|
+
# Wait until all the threads have started.
|
|
889
|
+
for thread in self.all_threads:
|
|
890
|
+
thread.has_started.wait()
|
|
891
|
+
|
|
892
|
+
# Subscribe for notifications from leaders.
|
|
893
|
+
for leader_name in self.system.leaders:
|
|
894
|
+
leader = cast("Leader[Any]", self.apps[leader_name])
|
|
895
|
+
assert isinstance(leader, Leader)
|
|
896
|
+
leader.lead(self)
|
|
897
|
+
|
|
898
|
+
def watch_for_errors(self, timeout: float | None = None) -> bool:
|
|
899
|
+
if self.has_errored.wait(timeout=timeout):
|
|
900
|
+
self.stop()
|
|
901
|
+
return self.has_errored.is_set()
|
|
902
|
+
|
|
903
|
+
def stop(self) -> None:
|
|
904
|
+
for thread in self.all_threads:
|
|
905
|
+
thread.stop()
|
|
906
|
+
for thread in self.all_threads:
|
|
907
|
+
thread.join(timeout=2)
|
|
908
|
+
for app in self.apps.values():
|
|
909
|
+
app.close()
|
|
910
|
+
self.apps.clear()
|
|
911
|
+
self.reraise_thread_errors()
|
|
912
|
+
|
|
913
|
+
def reraise_thread_errors(self) -> None:
|
|
914
|
+
for thread in self.all_threads:
|
|
915
|
+
if thread.error:
|
|
916
|
+
raise thread.error
|
|
917
|
+
|
|
918
|
+
def get(self, cls: type[TApplication]) -> TApplication:
|
|
919
|
+
app = self.apps[cls.name]
|
|
920
|
+
assert isinstance(app, cls)
|
|
921
|
+
return app
|
|
922
|
+
|
|
923
|
+
def receive_recording_event(
|
|
924
|
+
self, new_recording_event: RecordingEvent[TAggregateID]
|
|
925
|
+
) -> None:
|
|
926
|
+
for pulling_thread in self.pulling_threads[
|
|
927
|
+
new_recording_event.application_name
|
|
928
|
+
]:
|
|
929
|
+
pulling_thread.receive_recording_event(new_recording_event)
|
|
930
|
+
|
|
931
|
+
|
|
932
|
+
class PullingThread(threading.Thread, Generic[TAggregateID]):
|
|
933
|
+
"""Receives or pulls notifications from the given leader, and
|
|
934
|
+
puts them on a queue for conversion into processing jobs.
|
|
935
|
+
"""
|
|
936
|
+
|
|
937
|
+
def __init__(
|
|
938
|
+
self,
|
|
939
|
+
converting_queue: Queue[ConvertingJob[TAggregateID]],
|
|
940
|
+
follower: Follower[Any],
|
|
941
|
+
leader_name: str,
|
|
942
|
+
has_errored: threading.Event,
|
|
943
|
+
):
|
|
944
|
+
super().__init__(daemon=True)
|
|
945
|
+
self.overflow_event = threading.Event()
|
|
946
|
+
self.recording_event_queue: Queue[RecordingEvent[TAggregateID] | None] = Queue(
|
|
947
|
+
maxsize=100
|
|
948
|
+
)
|
|
949
|
+
self.converting_queue: Queue[ConvertingJob[TAggregateID]] = converting_queue
|
|
950
|
+
self.receive_lock = threading.Lock()
|
|
951
|
+
self.follower = follower
|
|
952
|
+
self.leader_name = leader_name
|
|
953
|
+
self.error: Exception | None = None
|
|
954
|
+
self.has_errored = has_errored
|
|
955
|
+
self.is_stopping = threading.Event()
|
|
956
|
+
self.has_started = threading.Event()
|
|
957
|
+
self.mapper = self.follower.mappers[self.leader_name]
|
|
958
|
+
self.previous_max_notification_id = self.follower.recorder.max_tracking_id(
|
|
959
|
+
application_name=self.leader_name
|
|
960
|
+
)
|
|
961
|
+
|
|
962
|
+
def run(self) -> None:
|
|
963
|
+
self.has_started.set()
|
|
964
|
+
try:
|
|
965
|
+
while not self.is_stopping.is_set():
|
|
966
|
+
recording_event = self.recording_event_queue.get()
|
|
967
|
+
self.recording_event_queue.task_done()
|
|
968
|
+
if recording_event is None:
|
|
969
|
+
return
|
|
970
|
+
# Ignore recording event if already seen a subsequent.
|
|
971
|
+
if (
|
|
972
|
+
recording_event.previous_max_notification_id is not None
|
|
973
|
+
and self.previous_max_notification_id is not None
|
|
974
|
+
and recording_event.previous_max_notification_id
|
|
975
|
+
< self.previous_max_notification_id
|
|
976
|
+
):
|
|
977
|
+
continue
|
|
978
|
+
|
|
979
|
+
# Catch up if there is a gap in sequence of recording events.
|
|
980
|
+
if (
|
|
981
|
+
recording_event.previous_max_notification_id is None
|
|
982
|
+
or self.previous_max_notification_id is None
|
|
983
|
+
or recording_event.previous_max_notification_id
|
|
984
|
+
> self.previous_max_notification_id
|
|
985
|
+
):
|
|
986
|
+
start = self.previous_max_notification_id
|
|
987
|
+
stop = recording_event.recordings[0].notification.id - 1
|
|
988
|
+
for notifications in self.follower.pull_notifications(
|
|
989
|
+
self.leader_name,
|
|
990
|
+
start=start,
|
|
991
|
+
stop=stop,
|
|
992
|
+
inclusive_of_start=False,
|
|
993
|
+
):
|
|
994
|
+
self.converting_queue.put(notifications)
|
|
995
|
+
self.previous_max_notification_id = notifications[-1].id
|
|
996
|
+
self.converting_queue.put(recording_event)
|
|
997
|
+
self.previous_max_notification_id = recording_event.recordings[
|
|
998
|
+
-1
|
|
999
|
+
].notification.id
|
|
1000
|
+
except Exception as e:
|
|
1001
|
+
self.error = NotificationPullingError(str(e))
|
|
1002
|
+
self.error.__cause__ = e
|
|
1003
|
+
self.has_errored.set()
|
|
1004
|
+
|
|
1005
|
+
def receive_recording_event(
|
|
1006
|
+
self, recording_event: RecordingEvent[TAggregateID]
|
|
1007
|
+
) -> None:
|
|
1008
|
+
try:
|
|
1009
|
+
self.recording_event_queue.put(recording_event, timeout=0)
|
|
1010
|
+
except Full:
|
|
1011
|
+
self.overflow_event.set()
|
|
1012
|
+
|
|
1013
|
+
def stop(self) -> None:
|
|
1014
|
+
self.is_stopping.set()
|
|
1015
|
+
self.recording_event_queue.put(None)
|
|
1016
|
+
|
|
1017
|
+
|
|
1018
|
+
class ConvertingThread(threading.Thread, Generic[TAggregateID]):
|
|
1019
|
+
"""Converts notifications into processing jobs."""
|
|
1020
|
+
|
|
1021
|
+
def __init__(
|
|
1022
|
+
self,
|
|
1023
|
+
converting_queue: Queue[ConvertingJob[TAggregateID]],
|
|
1024
|
+
processing_queue: Queue[list[ProcessingJob[TAggregateID]] | None],
|
|
1025
|
+
follower: Follower[Any],
|
|
1026
|
+
leader_name: str,
|
|
1027
|
+
has_errored: threading.Event,
|
|
1028
|
+
):
|
|
1029
|
+
super().__init__(daemon=True)
|
|
1030
|
+
self.converting_queue: Queue[ConvertingJob[TAggregateID]] = converting_queue
|
|
1031
|
+
self.processing_queue: Queue[list[ProcessingJob[TAggregateID]] | None] = (
|
|
1032
|
+
processing_queue
|
|
1033
|
+
)
|
|
1034
|
+
self.follower = follower
|
|
1035
|
+
self.leader_name = leader_name
|
|
1036
|
+
self.error: Exception | None = None
|
|
1037
|
+
self.has_errored = has_errored
|
|
1038
|
+
self.is_stopping = threading.Event()
|
|
1039
|
+
self.has_started = threading.Event()
|
|
1040
|
+
self.mapper = self.follower.mappers[self.leader_name]
|
|
1041
|
+
|
|
1042
|
+
def run(self) -> None:
|
|
1043
|
+
self.has_started.set()
|
|
1044
|
+
try:
|
|
1045
|
+
while True:
|
|
1046
|
+
recording_event_or_notifications = self.converting_queue.get()
|
|
1047
|
+
self.converting_queue.task_done()
|
|
1048
|
+
if (
|
|
1049
|
+
self.is_stopping.is_set()
|
|
1050
|
+
or recording_event_or_notifications is None
|
|
1051
|
+
):
|
|
1052
|
+
return
|
|
1053
|
+
|
|
1054
|
+
processing_jobs = []
|
|
1055
|
+
|
|
1056
|
+
if isinstance(recording_event_or_notifications, RecordingEvent):
|
|
1057
|
+
recording_event = recording_event_or_notifications
|
|
1058
|
+
for recording in recording_event.recordings:
|
|
1059
|
+
if (
|
|
1060
|
+
self.follower.topics
|
|
1061
|
+
and recording.notification.topic not in self.follower.topics
|
|
1062
|
+
):
|
|
1063
|
+
continue
|
|
1064
|
+
tracking = Tracking(
|
|
1065
|
+
application_name=recording_event.application_name,
|
|
1066
|
+
notification_id=recording.notification.id,
|
|
1067
|
+
)
|
|
1068
|
+
processing_jobs.append((recording.domain_event, tracking))
|
|
1069
|
+
else:
|
|
1070
|
+
notifications = recording_event_or_notifications
|
|
1071
|
+
processing_jobs = self.follower.convert_notifications(
|
|
1072
|
+
leader_name=self.leader_name, notifications=notifications
|
|
1073
|
+
)
|
|
1074
|
+
if processing_jobs:
|
|
1075
|
+
self.processing_queue.put(processing_jobs)
|
|
1076
|
+
except Exception as e:
|
|
1077
|
+
print(traceback.format_exc()) # noqa: T201
|
|
1078
|
+
self.error = NotificationConvertingError(str(e))
|
|
1079
|
+
self.error.__cause__ = e
|
|
1080
|
+
self.has_errored.set()
|
|
1081
|
+
|
|
1082
|
+
def stop(self) -> None:
|
|
1083
|
+
self.is_stopping.set()
|
|
1084
|
+
self.converting_queue.put(None)
|
|
1085
|
+
|
|
1086
|
+
|
|
1087
|
+
class ProcessingThread(threading.Thread, Generic[TAggregateID]):
|
|
1088
|
+
"""A processing thread gets events from a processing queue, and
|
|
1089
|
+
calls the application's process_event() method.
|
|
1090
|
+
"""
|
|
1091
|
+
|
|
1092
|
+
def __init__(
|
|
1093
|
+
self,
|
|
1094
|
+
processing_queue: Queue[list[ProcessingJob[TAggregateID]] | None],
|
|
1095
|
+
follower: Follower[Any],
|
|
1096
|
+
has_errored: threading.Event,
|
|
1097
|
+
):
|
|
1098
|
+
super().__init__(daemon=True)
|
|
1099
|
+
self.processing_queue: Queue[list[ProcessingJob[TAggregateID]] | None] = (
|
|
1100
|
+
processing_queue
|
|
1101
|
+
)
|
|
1102
|
+
self.follower = follower
|
|
1103
|
+
self.error: Exception | None = None
|
|
1104
|
+
self.has_errored = has_errored
|
|
1105
|
+
self.is_stopping = threading.Event()
|
|
1106
|
+
self.has_started = threading.Event()
|
|
1107
|
+
|
|
1108
|
+
def run(self) -> None:
|
|
1109
|
+
self.has_started.set()
|
|
1110
|
+
try:
|
|
1111
|
+
while True:
|
|
1112
|
+
jobs = self.processing_queue.get()
|
|
1113
|
+
self.processing_queue.task_done()
|
|
1114
|
+
if self.is_stopping.is_set() or jobs is None:
|
|
1115
|
+
return
|
|
1116
|
+
for domain_event, tracking in jobs:
|
|
1117
|
+
self.follower.process_event(domain_event, tracking)
|
|
1118
|
+
except Exception as e:
|
|
1119
|
+
self.error = EventProcessingError(str(e))
|
|
1120
|
+
self.error.__cause__ = e
|
|
1121
|
+
self.has_errored.set()
|
|
1122
|
+
|
|
1123
|
+
def stop(self) -> None:
|
|
1124
|
+
self.is_stopping.set()
|
|
1125
|
+
self.processing_queue.put(None)
|
|
1126
|
+
|
|
1127
|
+
|
|
1128
|
+
class NotificationLogReader:
|
|
1129
|
+
"""Reads domain event notifications from a notification log."""
|
|
1130
|
+
|
|
1131
|
+
DEFAULT_SECTION_SIZE = 10
|
|
1132
|
+
|
|
1133
|
+
def __init__(
|
|
1134
|
+
self,
|
|
1135
|
+
notification_log: NotificationLog,
|
|
1136
|
+
section_size: int = DEFAULT_SECTION_SIZE,
|
|
1137
|
+
):
|
|
1138
|
+
"""Initialises a reader with the given notification log,
|
|
1139
|
+
and optionally a section size integer which determines
|
|
1140
|
+
the requested number of domain event notifications in
|
|
1141
|
+
each section retrieved from the notification log.
|
|
1142
|
+
"""
|
|
1143
|
+
self.notification_log = notification_log
|
|
1144
|
+
self.section_size = section_size
|
|
1145
|
+
|
|
1146
|
+
def read(self, *, start: int) -> Iterator[Notification]:
|
|
1147
|
+
"""Returns a generator that yields event notifications
|
|
1148
|
+
from the reader's notification log, starting from
|
|
1149
|
+
given start position (a notification ID).
|
|
1150
|
+
|
|
1151
|
+
This method traverses the linked list of sections presented by
|
|
1152
|
+
a notification log, and yields the individual event notifications
|
|
1153
|
+
that are contained in each section. When all the event notifications
|
|
1154
|
+
from a section have been yielded, the reader will retrieve the next
|
|
1155
|
+
section, and continues yielding event notification until all subsequent
|
|
1156
|
+
event notifications in the notification log from the start position
|
|
1157
|
+
have been yielded.
|
|
1158
|
+
"""
|
|
1159
|
+
section_id = f"{start},{start + self.section_size - 1}"
|
|
1160
|
+
while True:
|
|
1161
|
+
section: Section = self.notification_log[section_id]
|
|
1162
|
+
yield from section.items
|
|
1163
|
+
if section.next_id is None:
|
|
1164
|
+
break
|
|
1165
|
+
else:
|
|
1166
|
+
section_id = section.next_id
|
|
1167
|
+
|
|
1168
|
+
def select(
|
|
1169
|
+
self,
|
|
1170
|
+
*,
|
|
1171
|
+
start: int | None,
|
|
1172
|
+
stop: int | None = None,
|
|
1173
|
+
topics: Sequence[str] = (),
|
|
1174
|
+
inclusive_of_start: bool = True,
|
|
1175
|
+
) -> Iterator[Sequence[Notification]]:
|
|
1176
|
+
"""Returns a generator that yields lists of event notifications
|
|
1177
|
+
from the reader's notification log, starting from given start
|
|
1178
|
+
position (a notification ID).
|
|
1179
|
+
|
|
1180
|
+
This method selects a limited list of notifications from a
|
|
1181
|
+
notification log and yields event notifications in batches.
|
|
1182
|
+
When one list of event notifications has been yielded,
|
|
1183
|
+
the reader will retrieve another list, and continue until
|
|
1184
|
+
all subsequent event notifications in the notification log
|
|
1185
|
+
from the start position have been yielded.
|
|
1186
|
+
"""
|
|
1187
|
+
while True:
|
|
1188
|
+
notifications = self.notification_log.select(
|
|
1189
|
+
start=start,
|
|
1190
|
+
stop=stop,
|
|
1191
|
+
limit=self.section_size,
|
|
1192
|
+
topics=topics,
|
|
1193
|
+
inclusive_of_start=inclusive_of_start,
|
|
1194
|
+
)
|
|
1195
|
+
# Stop if zero notifications.
|
|
1196
|
+
if len(notifications) == 0:
|
|
1197
|
+
break
|
|
1198
|
+
|
|
1199
|
+
# Otherwise, yield and continue.
|
|
1200
|
+
start = notifications[-1].id
|
|
1201
|
+
if inclusive_of_start:
|
|
1202
|
+
start += 1
|
|
1203
|
+
yield notifications
|