eventsourcing 9.4.0b1__tar.gz → 9.4.0b2__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

Files changed (26) hide show
  1. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/PKG-INFO +1 -1
  2. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/persistence.py +37 -13
  3. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/projection.py +68 -18
  4. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/tests/application.py +124 -65
  5. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/pyproject.toml +1 -1
  6. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/AUTHORS +0 -0
  7. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/LICENSE +0 -0
  8. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/README.md +0 -0
  9. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/__init__.py +0 -0
  10. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/application.py +0 -0
  11. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/cipher.py +0 -0
  12. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/compressor.py +0 -0
  13. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/cryptography.py +0 -0
  14. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/dispatch.py +0 -0
  15. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/domain.py +0 -0
  16. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/interface.py +0 -0
  17. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/popo.py +0 -0
  18. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/postgres.py +0 -0
  19. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/py.typed +0 -0
  20. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/sqlite.py +0 -0
  21. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/system.py +0 -0
  22. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/tests/__init__.py +0 -0
  23. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/tests/domain.py +0 -0
  24. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/tests/persistence.py +0 -0
  25. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/tests/postgres_utils.py +0 -0
  26. {eventsourcing-9.4.0b1 → eventsourcing-9.4.0b2}/eventsourcing/utils.py +0 -0
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: eventsourcing
3
- Version: 9.4.0b1
3
+ Version: 9.4.0b2
4
4
  Summary: Event sourcing in Python
5
5
  License: BSD 3-Clause
6
6
  Keywords: event sourcing,event store,domain driven design,domain-driven design,ddd,cqrs,cqs
@@ -61,6 +61,12 @@ class Transcoder(ABC):
61
61
  """Decodes obj from bytes."""
62
62
 
63
63
 
64
+ class TranscodingNotRegisteredError(EventSourcingError, TypeError):
65
+ """
66
+ Raised when a transcoding isn't registered with JSONTranscoder.
67
+ """
68
+
69
+
64
70
  class JSONTranscoder(Transcoder):
65
71
  """
66
72
  Extensible transcoder that uses the Python :mod:`json` module.
@@ -104,7 +110,7 @@ class JSONTranscoder(Transcoder):
104
110
  "serializable. Please define and register "
105
111
  "a custom transcoding for this type."
106
112
  )
107
- raise TypeError(msg) from None
113
+ raise TranscodingNotRegisteredError(msg) from None
108
114
  else:
109
115
  return {
110
116
  "_type_": transcoding.name,
@@ -131,7 +137,7 @@ class JSONTranscoder(Transcoder):
131
137
  "deserializable. Please register a "
132
138
  "custom transcoding for this type."
133
139
  )
134
- raise TypeError(msg) from e
140
+ raise TranscodingNotRegisteredError(msg) from e
135
141
  else:
136
142
  return transcoding.decode(_data_)
137
143
  else:
@@ -245,6 +251,12 @@ class Cipher(ABC):
245
251
  """
246
252
 
247
253
 
254
+ class MapperDeserialisationError(EventSourcingError, ValueError):
255
+ """
256
+ Raised when deserialization fails in a Mapper.
257
+ """
258
+
259
+
248
260
  class Mapper:
249
261
  """
250
262
  Converts between domain event objects and :class:`StoredEvent` objects.
@@ -290,11 +302,21 @@ class Mapper:
290
302
  Converts the given :class:`StoredEvent` to a domain event object.
291
303
  """
292
304
  stored_state = stored_event.state
293
- if self.cipher:
294
- stored_state = self.cipher.decrypt(stored_state)
295
- if self.compressor:
296
- stored_state = self.compressor.decompress(stored_state)
297
- event_state: dict[str, Any] = self.transcoder.decode(stored_state)
305
+ try:
306
+ if self.cipher:
307
+ stored_state = self.cipher.decrypt(stored_state)
308
+ if self.compressor:
309
+ stored_state = self.compressor.decompress(stored_state)
310
+ event_state: dict[str, Any] = self.transcoder.decode(stored_state)
311
+ except Exception as e:
312
+ msg = (
313
+ f"Failed to deserialise state of stored event with "
314
+ f"topic '{stored_event.topic}', "
315
+ f"originator_id '{stored_event.originator_id}' and "
316
+ f"originator_version {stored_event.originator_version}: {e}"
317
+ )
318
+ raise MapperDeserialisationError(msg) from e
319
+
298
320
  event_state["originator_id"] = stored_event.originator_id
299
321
  event_state["originator_version"] = stored_event.originator_version
300
322
  cls = resolve_topic(stored_event.topic)
@@ -395,7 +417,7 @@ class Recorder:
395
417
  pass
396
418
 
397
419
 
398
- class AggregateRecorder(ABC):
420
+ class AggregateRecorder(Recorder, ABC):
399
421
  """
400
422
  Abstract base class for inserting and selecting stored events.
401
423
  """
@@ -532,7 +554,8 @@ class TrackingRecorder(Recorder, ABC):
532
554
  Raises WaitInterruptError if the `interrupt` is set before `timeout` is reached.
533
555
  """
534
556
  deadline = monotonic() + timeout
535
- delay_ms = 1.0
557
+ sleep_interval_ms = 100.0
558
+ max_sleep_interval_ms = 800.0
536
559
  while True:
537
560
  max_tracking_id = self.max_tracking_id(application_name)
538
561
  if notification_id is None or (
@@ -540,11 +563,10 @@ class TrackingRecorder(Recorder, ABC):
540
563
  ):
541
564
  break
542
565
  if interrupt:
543
- if interrupt.wait(timeout=delay_ms / 1000):
566
+ if interrupt.wait(timeout=sleep_interval_ms / 1000):
544
567
  raise WaitInterruptedError
545
568
  else:
546
- sleep(delay_ms / 1000)
547
- delay_ms *= 2
569
+ sleep(sleep_interval_ms / 1000)
548
570
  remaining = deadline - monotonic()
549
571
  if remaining < 0:
550
572
  msg = (
@@ -552,7 +574,9 @@ class TrackingRecorder(Recorder, ABC):
552
574
  f"from application '{application_name}' to be processed"
553
575
  )
554
576
  raise TimeoutError(msg)
555
- delay_ms = min(delay_ms, remaining * 1000)
577
+ sleep_interval_ms = min(
578
+ sleep_interval_ms * 2, remaining * 1000, max_sleep_interval_ms
579
+ )
556
580
 
557
581
 
558
582
  class ProcessRecorder(TrackingRecorder, ApplicationRecorder, ABC):
@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import contextlib
3
4
  import os
4
5
  import weakref
5
6
  from abc import ABC, abstractmethod
@@ -22,6 +23,8 @@ from eventsourcing.persistence import (
22
23
  from eventsourcing.utils import Environment, EnvType
23
24
 
24
25
  if TYPE_CHECKING:
26
+ from types import TracebackType
27
+
25
28
  from typing_extensions import Self
26
29
 
27
30
 
@@ -143,7 +146,8 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
143
146
  thread, calls projection's process_event() method for each event and tracking
144
147
  object pair received from the subscription.
145
148
  """
146
- self._is_stopping = Event()
149
+ self._is_interrupted = Event()
150
+ self._has_called_stop = False
147
151
 
148
152
  self.app: TApplication = application_class(env)
149
153
 
@@ -165,17 +169,29 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
165
169
  gt=self.view.max_tracking_id(self.app.name),
166
170
  topics=self.projection.topics,
167
171
  )
168
- self.thread_error: BaseException | None = None
169
- self.processing_thread = Thread(
172
+ self._thread_error: BaseException | None = None
173
+ self._stop_thread = Thread(
174
+ target=self._stop_subscription_when_stopping,
175
+ kwargs={
176
+ "subscription": self.subscription,
177
+ "is_stopping": self._is_interrupted,
178
+ },
179
+ )
180
+ self._stop_thread.start()
181
+ self._processing_thread = Thread(
170
182
  target=self._process_events_loop,
171
183
  kwargs={
172
184
  "subscription": self.subscription,
173
185
  "projection": self.projection,
174
- "is_stopping": self._is_stopping,
186
+ "is_stopping": self._is_interrupted,
175
187
  "runner": weakref.ref(self),
176
188
  },
177
189
  )
178
- self.processing_thread.start()
190
+ self._processing_thread.start()
191
+
192
+ @property
193
+ def is_interrupted(self) -> Event:
194
+ return self._is_interrupted
179
195
 
180
196
  def _construct_env(self, name: str, env: EnvType | None = None) -> Environment:
181
197
  """
@@ -188,11 +204,25 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
188
204
  return Environment(name, _env)
189
205
 
190
206
  def stop(self) -> None:
207
+ """
208
+ Sets the "interrupted" event.
209
+ """
210
+ self._has_called_stop = True
211
+ self._is_interrupted.set()
212
+
213
+ @staticmethod
214
+ def _stop_subscription_when_stopping(
215
+ subscription: ApplicationSubscription,
216
+ is_stopping: Event,
217
+ ) -> None:
191
218
  """
192
219
  Stops the application subscription, which will stop the event-processing thread.
193
220
  """
194
- self._is_stopping.set()
195
- self.subscription.stop()
221
+ try:
222
+ is_stopping.wait()
223
+ finally:
224
+ is_stopping.set()
225
+ subscription.stop()
196
226
 
197
227
  @staticmethod
198
228
  def _process_events_loop(
@@ -208,7 +238,7 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
208
238
  except BaseException as e:
209
239
  _runner = runner() # get reference from weakref
210
240
  if _runner is not None:
211
- _runner.thread_error = e
241
+ _runner._thread_error = e
212
242
  else:
213
243
  msg = "ProjectionRunner was deleted before error could be assigned:\n"
214
244
  msg += format_exc()
@@ -217,17 +247,21 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
217
247
  RuntimeWarning,
218
248
  stacklevel=2,
219
249
  )
220
-
250
+ finally:
221
251
  is_stopping.set()
222
- subscription.stop()
223
252
 
224
253
  def run_forever(self, timeout: float | None = None) -> None:
225
254
  """
226
255
  Blocks until timeout, or until the runner is stopped or errors. Re-raises
227
256
  any error otherwise exits normally
228
257
  """
229
- if self._is_stopping.wait(timeout=timeout) and self.thread_error is not None:
230
- raise self.thread_error
258
+ if (
259
+ self._is_interrupted.wait(timeout=timeout)
260
+ and self._thread_error is not None
261
+ ):
262
+ error = self._thread_error
263
+ self._thread_error = None
264
+ raise error
231
265
 
232
266
  def wait(self, notification_id: int | None, timeout: float = 1.0) -> None:
233
267
  """
@@ -239,24 +273,40 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
239
273
  application_name=self.subscription.name,
240
274
  notification_id=notification_id,
241
275
  timeout=timeout,
242
- interrupt=self._is_stopping,
276
+ interrupt=self._is_interrupted,
243
277
  )
244
278
  except WaitInterruptedError:
245
- if self.thread_error is not None:
246
- raise self.thread_error from None
279
+ if self._thread_error:
280
+ error = self._thread_error
281
+ self._thread_error = None
282
+ raise error from None
283
+ if self._has_called_stop:
284
+ return
285
+ raise
247
286
 
248
287
  def __enter__(self) -> Self:
249
288
  return self
250
289
 
251
- def __exit__(self, *args: object, **kwargs: Any) -> None:
290
+ def __exit__(
291
+ self,
292
+ exc_type: type[BaseException] | None,
293
+ exc_val: BaseException | None,
294
+ exc_tb: TracebackType | None,
295
+ ) -> None:
252
296
  """
253
297
  Calls stop() and waits for the event-processing thread to exit.
254
298
  """
255
299
  self.stop()
256
- self.processing_thread.join()
300
+ self._stop_thread.join()
301
+ self._processing_thread.join()
302
+ if self._thread_error:
303
+ error = self._thread_error
304
+ self._thread_error = None
305
+ raise error
257
306
 
258
307
  def __del__(self) -> None:
259
308
  """
260
309
  Calls stop().
261
310
  """
262
- self.stop()
311
+ with contextlib.suppress(AttributeError):
312
+ self.stop()
@@ -10,7 +10,7 @@ from decimal import Decimal
10
10
  from threading import Event, get_ident
11
11
  from time import sleep
12
12
  from timeit import timeit
13
- from typing import ClassVar
13
+ from typing import Any, ClassVar
14
14
  from unittest import TestCase
15
15
  from uuid import UUID, uuid4
16
16
 
@@ -23,7 +23,7 @@ from eventsourcing.persistence import (
23
23
  Transcoding,
24
24
  )
25
25
  from eventsourcing.tests.domain import BankAccount, EmailAddress
26
- from eventsourcing.utils import get_topic
26
+ from eventsourcing.utils import EnvType, get_topic
27
27
 
28
28
  TIMEIT_FACTOR = int(os.environ.get("TEST_TIMEIT_FACTOR", default=10))
29
29
 
@@ -344,96 +344,131 @@ class ApplicationTestCase(TestCase):
344
344
  app.repository.get(aggregate.id)
345
345
  self.assertEqual(aggregate, app.repository.cache.get(aggregate.id))
346
346
 
347
- def test_application_fastforward_skipping_during_contention(self) -> None:
348
- app = Application(
347
+ def test_check_aggregate_fastforwarding_nonblocking(self) -> None:
348
+ self._check_aggregate_fastforwarding_during_contention(
349
349
  env={
350
350
  "AGGREGATE_CACHE_MAXSIZE": "10",
351
351
  "AGGREGATE_CACHE_FASTFORWARD_SKIPPING": "y",
352
352
  }
353
353
  )
354
354
 
355
- aggregate = Aggregate()
356
- aggregate_id = aggregate.id
357
- app.save(aggregate)
355
+ def test_check_aggregate_fastforwarding_blocking(self) -> None:
356
+ self._check_aggregate_fastforwarding_during_contention(
357
+ env={"AGGREGATE_CACHE_MAXSIZE": "10"}
358
+ )
359
+
360
+ def _check_aggregate_fastforwarding_during_contention(self, env: EnvType) -> None:
361
+ app = Application(env=env)
362
+
363
+ self.assertEqual(len(app.repository._fastforward_locks_inuse), 0)
364
+
365
+ # Create one aggregate.
366
+ original_aggregate = Aggregate()
367
+ app.save(original_aggregate)
368
+ obj_ids = set()
369
+
370
+ # Prime the cache.
371
+ app.repository.get(original_aggregate.id)
372
+
373
+ # Remember the aggregate ID.
374
+ aggregate_id = original_aggregate.id
358
375
 
359
376
  stopped = Event()
377
+ errors: list[BaseException] = []
378
+ successful_thread_ids = set()
360
379
 
361
- # Trigger, save, get, check.
362
380
  def trigger_save_get_check() -> None:
363
381
  while not stopped.is_set():
364
382
  try:
383
+ # Get the aggregate.
365
384
  aggregate: Aggregate = app.repository.get(aggregate_id)
385
+ original_version = aggregate.version
386
+
387
+ # Try to record a new event.
366
388
  aggregate.trigger_event(Aggregate.Event)
367
- saved_version = aggregate.version
389
+ # Give other threads a chance.
368
390
  try:
369
391
  app.save(aggregate)
370
392
  except IntegrityError:
393
+ # Start again if we didn't record a new event.
394
+ # print("Got integrity error")
395
+ sleep(0.001)
371
396
  continue
372
- cached: Aggregate = app.repository.get(aggregate_id)
373
- if saved_version > cached.version:
374
- print(f"Skipped fast-forwarding at version {saved_version}")
375
- stopped.set()
376
- if aggregate.version % 1000 == 0:
377
- print("Version:", aggregate.version, get_ident())
378
- sleep(0.00)
379
- except BaseException:
380
- print(traceback.format_exc())
381
- raise
382
397
 
383
- executor = ThreadPoolExecutor(max_workers=100)
384
- for _ in range(100):
385
- executor.submit(trigger_save_get_check)
398
+ # Get the aggregate from the cache.
399
+ assert app.repository.cache is not None
400
+ cached: Any = app.repository.cache.get(aggregate_id)
401
+ obj_ids.add(id(cached))
386
402
 
387
- if not stopped.wait(timeout=100):
388
- stopped.set()
389
- self.fail("Didn't skip fast forwarding before test timed out...")
390
- executor.shutdown()
403
+ if len(obj_ids) > 1:
404
+ stopped.set()
405
+ continue
391
406
 
392
- def test_application_fastforward_blocking_during_contention(self) -> None:
393
- app = Application(
394
- env={
395
- "AGGREGATE_CACHE_MAXSIZE": "10",
396
- }
397
- )
407
+ # Fast-forward the cached aggregate.
408
+ fastforwarded: Aggregate = app.repository.get(aggregate_id)
398
409
 
399
- aggregate = Aggregate()
400
- aggregate_id = aggregate.id
401
- app.save(aggregate)
410
+ # Check cached aggregate was fast-forwarded with recorded event.
411
+ if fastforwarded.version < original_version:
412
+ try:
413
+ self.fail(
414
+ f"Failed to fast-forward at version {original_version}"
415
+ )
416
+ except AssertionError as e:
417
+ errors.append(e)
418
+ stopped.set()
419
+ continue
402
420
 
403
- stopped = Event()
421
+ # Monitor number of threads getting involved.
422
+ thread_id = get_ident()
423
+ successful_thread_ids.add(thread_id)
404
424
 
405
- # Trigger, save, get, check.
406
- def trigger_save_get_check() -> None:
407
- while not stopped.is_set():
408
- try:
409
- aggregate: Aggregate = app.repository.get(aggregate_id)
410
- aggregate.trigger_event(Aggregate.Event)
411
- saved_version = aggregate.version
412
- try:
413
- app.save(aggregate)
414
- except IntegrityError:
415
- continue
416
- cached: Aggregate = app.repository.get(aggregate_id)
417
- if saved_version > cached.version:
418
- print(f"Skipped fast-forwarding at version {saved_version}")
425
+ # print("Version:", aggregate.version, thread_id)
426
+
427
+ # See if we have done enough.
428
+ if len(successful_thread_ids) > 10 and aggregate.version >= 25:
419
429
  stopped.set()
420
- if aggregate.version % 1000 == 0:
421
- print("Version:", aggregate.version, get_ident())
422
- sleep(0.00)
423
- except BaseException:
430
+ continue
431
+
432
+ sleep(0.0001)
433
+ # sleep(0.001)
434
+ except BaseException as e:
435
+ errors.append(e)
436
+ stopped.set()
424
437
  print(traceback.format_exc())
425
438
  raise
426
439
 
427
440
  executor = ThreadPoolExecutor(max_workers=100)
441
+ futures = []
428
442
  for _ in range(100):
429
- executor.submit(trigger_save_get_check)
430
-
431
- if not stopped.wait(timeout=3):
432
- stopped.set()
433
- else:
434
- self.fail("Wrongly skipped fast forwarding")
443
+ f = executor.submit(trigger_save_get_check)
444
+ futures.append(f)
445
+
446
+ # Run for three seconds.
447
+ stopped.wait(timeout=10)
448
+ for f in futures:
449
+ f.result()
450
+ # print("Got all results, shutting down executor")
435
451
  executor.shutdown()
436
452
 
453
+ try:
454
+ if errors:
455
+ raise errors[0]
456
+ if len(obj_ids) > 1:
457
+ self.fail(f"More than one instance used in the cache: {len(obj_ids)}")
458
+ if len(successful_thread_ids) < 3:
459
+ self.fail("Insufficient sharing across contentious threads")
460
+
461
+ final_aggregate: Aggregate = app.repository.get(aggregate_id)
462
+ # print("Final aggregate version:", final_aggregate.version)
463
+ if final_aggregate.version < 25:
464
+ self.fail(f"Insufficient version increment: {final_aggregate.version}")
465
+
466
+ self.assertEqual(len(app.repository._fastforward_locks_inuse), 0)
467
+
468
+ finally:
469
+ # print("Closing application")
470
+ app.close()
471
+
437
472
  def test_application_with_cached_aggregates_not_fastforward(self) -> None:
438
473
  app = Application(
439
474
  env={
@@ -441,13 +476,37 @@ class ApplicationTestCase(TestCase):
441
476
  "AGGREGATE_CACHE_FASTFORWARD": "f",
442
477
  }
443
478
  )
444
- aggregate = Aggregate()
445
- app.save(aggregate)
479
+ aggregate1 = Aggregate()
480
+ app.save(aggregate1)
481
+ aggregate_id = aggregate1.id
482
+
446
483
  # Should put the aggregate in the cache.
447
484
  assert app.repository.cache is not None # for mypy
448
- self.assertEqual(aggregate, app.repository.cache.get(aggregate.id))
449
- app.repository.get(aggregate.id)
450
- self.assertEqual(aggregate, app.repository.cache.get(aggregate.id))
485
+ self.assertEqual(aggregate1, app.repository.cache.get(aggregate_id))
486
+ app.repository.get(aggregate_id)
487
+ self.assertEqual(aggregate1, app.repository.cache.get(aggregate_id))
488
+
489
+ aggregate2 = Aggregate()
490
+ aggregate2._id = aggregate_id
491
+ aggregate2.trigger_event(Aggregate.Event)
492
+
493
+ # This will replace object in cache.
494
+ app.save(aggregate2)
495
+
496
+ self.assertEqual(aggregate2.version, aggregate1.version + 1)
497
+ aggregate3: Aggregate = app.repository.get(aggregate_id)
498
+ self.assertEqual(aggregate3.version, aggregate3.version)
499
+ self.assertEqual(id(aggregate3.version), id(aggregate3.version))
500
+
501
+ # This will mess things up because the cache has a stale aggregate.
502
+ aggregate3.trigger_event(Aggregate.Event)
503
+ app.events.put(aggregate3.collect_events())
504
+
505
+ # And so using the aggregate to record new events will cause an IntegrityError.
506
+ aggregate4: Aggregate = app.repository.get(aggregate_id)
507
+ aggregate4.trigger_event(Aggregate.Event)
508
+ with self.assertRaises(IntegrityError):
509
+ app.save(aggregate4)
451
510
 
452
511
  def test_application_with_deepcopy_from_cache_arg(self) -> None:
453
512
  app = Application(
@@ -1,6 +1,6 @@
1
1
  [tool.poetry]
2
2
  name = "eventsourcing"
3
- version = "9.4.0b1"
3
+ version = "9.4.0b2"
4
4
 
5
5
  description = "Event sourcing in Python"
6
6
  authors = [
File without changes
File without changes