eventsourcing 9.4.0a8__py3-none-any.whl → 9.4.0b2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of eventsourcing might be problematic. Click here for more details.

@@ -1,5 +1,6 @@
1
1
  from __future__ import annotations
2
2
 
3
+ import contextlib
3
4
  import os
4
5
  import weakref
5
6
  from abc import ABC, abstractmethod
@@ -22,6 +23,8 @@ from eventsourcing.persistence import (
22
23
  from eventsourcing.utils import Environment, EnvType
23
24
 
24
25
  if TYPE_CHECKING:
26
+ from types import TracebackType
27
+
25
28
  from typing_extensions import Self
26
29
 
27
30
 
@@ -40,39 +43,65 @@ class ApplicationSubscription(Iterator[tuple[DomainEventProtocol, Tracking]]):
40
43
  gt: int | None = None,
41
44
  topics: Sequence[str] = (),
42
45
  ):
46
+ """
47
+ Starts subscription to application's stored events using application's recorder.
48
+ """
43
49
  self.name = app.name
44
50
  self.recorder = app.recorder
45
51
  self.mapper = app.mapper
46
52
  self.subscription = self.recorder.subscribe(gt=gt, topics=topics)
47
53
 
54
+ def stop(self) -> None:
55
+ """
56
+ Stops the stored event subscription.
57
+ """
58
+ self.subscription.stop()
59
+
48
60
  def __enter__(self) -> Self:
61
+ """
62
+ Calls __enter__ on the stored event subscription.
63
+ """
49
64
  self.subscription.__enter__()
50
65
  return self
51
66
 
52
67
  def __exit__(self, *args: object, **kwargs: Any) -> None:
68
+ """
69
+ Calls __exit__ on the stored event subscription.
70
+ """
53
71
  self.subscription.__exit__(*args, **kwargs)
54
72
 
55
73
  def __iter__(self) -> Self:
56
74
  return self
57
75
 
58
76
  def __next__(self) -> tuple[DomainEventProtocol, Tracking]:
77
+ """
78
+ Returns the next stored event from the stored event subscription.
79
+ Constructs a tracking object that identifies the position of
80
+ the event in the application sequence, and reconstructs a domain
81
+ event object from the stored event object.
82
+ """
59
83
  notification = next(self.subscription)
60
84
  tracking = Tracking(self.name, notification.id)
61
85
  domain_event = self.mapper.to_domain_event(notification)
62
86
  return domain_event, tracking
63
87
 
64
88
  def __del__(self) -> None:
89
+ """
90
+ Stops the stored event subscription.
91
+ """
65
92
  self.stop()
66
93
 
67
- def stop(self) -> None:
68
- self.subscription.stop()
69
-
70
94
 
71
95
  class Projection(ABC, Generic[TTrackingRecorder]):
72
96
  name: str = ""
73
- """Name of projection, used to pick prefixed environment variables."""
74
- topics: Sequence[str] = ()
75
- """Filter events in database when subscribing to an application."""
97
+ """
98
+ Name of projection, used to pick prefixed environment
99
+ variables and define database table names.
100
+ """
101
+ topics: tuple[str, ...] = ()
102
+ """
103
+ Filter events in database when subscribing to an application.
104
+ """
76
105
 
77
106
  def __init__(
78
107
  self,
@@ -104,10 +133,22 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
104
133
  self,
105
134
  *,
106
135
  application_class: type[TApplication],
107
- view_class: type[TTrackingRecorder],
108
136
  projection_class: type[Projection[TTrackingRecorder]],
137
+ view_class: type[TTrackingRecorder],
109
138
  env: EnvType | None = None,
110
139
  ):
140
+ """
141
+ Constructs application from given application class with given environment.
142
+ Also constructs a materialised view from given class using an infrastructure
143
+ factory constructed with an environment named after the projection. Also
144
+ constructs a projection with the constructed materialised view object.
145
+ Starts a subscription to application and, in a separate event-processing
146
+ thread, calls projection's process_event() method for each event and tracking
147
+ object pair received from the subscription.
148
+ """
149
+ self._is_interrupted = Event()
150
+ self._has_called_stop = False
151
+
111
152
  self.app: TApplication = application_class(env)
112
153
 
113
154
  self.view = (
@@ -128,18 +169,29 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
128
169
  gt=self.view.max_tracking_id(self.app.name),
129
170
  topics=self.projection.topics,
130
171
  )
131
- self._is_stopping = Event()
132
- self.thread_error: BaseException | None = None
133
- self.processing_thread = Thread(
172
+ self._thread_error: BaseException | None = None
173
+ self._stop_thread = Thread(
174
+ target=self._stop_subscription_when_stopping,
175
+ kwargs={
176
+ "subscription": self.subscription,
177
+ "is_stopping": self._is_interrupted,
178
+ },
179
+ )
180
+ self._stop_thread.start()
181
+ self._processing_thread = Thread(
134
182
  target=self._process_events_loop,
135
183
  kwargs={
136
184
  "subscription": self.subscription,
137
185
  "projection": self.projection,
138
- "is_stopping": self._is_stopping,
186
+ "is_stopping": self._is_interrupted,
139
187
  "runner": weakref.ref(self),
140
188
  },
141
189
  )
142
- self.processing_thread.start()
190
+ self._processing_thread.start()
191
+
192
+ @property
193
+ def is_interrupted(self) -> Event:
194
+ return self._is_interrupted
143
195
 
144
196
  def _construct_env(self, name: str, env: EnvType | None = None) -> Environment:
145
197
  """
@@ -152,8 +204,25 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
152
204
  return Environment(name, _env)
153
205
 
154
206
  def stop(self) -> None:
155
- self._is_stopping.set()
156
- self.subscription.stop()
207
+ """
208
+ Sets the "interrupted" event.
209
+ """
210
+ self._has_called_stop = True
211
+ self._is_interrupted.set()
212
+
213
+ @staticmethod
214
+ def _stop_subscription_when_stopping(
215
+ subscription: ApplicationSubscription,
216
+ is_stopping: Event,
217
+ ) -> None:
218
+ """
219
+ Stops the application subscription, which will stop the event-processing thread.
220
+ """
221
+ try:
222
+ is_stopping.wait()
223
+ finally:
224
+ is_stopping.set()
225
+ subscription.stop()
157
226
 
158
227
  @staticmethod
159
228
  def _process_events_loop(
@@ -169,7 +238,7 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
169
238
  except BaseException as e:
170
239
  _runner = runner() # get reference from weakref
171
240
  if _runner is not None:
172
- _runner.thread_error = e
241
+ _runner._thread_error = e
173
242
  else:
174
243
  msg = "ProjectionRunner was deleted before error could be assigned:\n"
175
244
  msg += format_exc()
@@ -178,32 +247,66 @@ class ProjectionRunner(Generic[TApplication, TTrackingRecorder]):
178
247
  RuntimeWarning,
179
248
  stacklevel=2,
180
249
  )
181
-
250
+ finally:
182
251
  is_stopping.set()
183
- subscription.subscription.stop()
184
252
 
185
253
  def run_forever(self, timeout: float | None = None) -> None:
186
- if self._is_stopping.wait(timeout=timeout) and self.thread_error is not None:
187
- raise self.thread_error
254
+ """
255
+ Blocks until timeout, or until the runner is stopped or errors. Re-raises
256
+ any error otherwise exits normally
257
+ """
258
+ if (
259
+ self._is_interrupted.wait(timeout=timeout)
260
+ and self._thread_error is not None
261
+ ):
262
+ error = self._thread_error
263
+ self._thread_error = None
264
+ raise error
188
265
 
189
266
  def wait(self, notification_id: int | None, timeout: float = 1.0) -> None:
267
+ """
268
+ Blocks until timeout, or until the materialised view has recorded a tracking
269
+ object that is greater than or equal to the given notification ID.
270
+ """
190
271
  try:
191
272
  self.projection.view.wait(
192
273
  application_name=self.subscription.name,
193
274
  notification_id=notification_id,
194
275
  timeout=timeout,
195
- interrupt=self._is_stopping,
276
+ interrupt=self._is_interrupted,
196
277
  )
197
278
  except WaitInterruptedError:
198
- if self.thread_error is not None:
199
- raise self.thread_error from None
279
+ if self._thread_error:
280
+ error = self._thread_error
281
+ self._thread_error = None
282
+ raise error from None
283
+ if self._has_called_stop:
284
+ return
285
+ raise
200
286
 
201
287
  def __enter__(self) -> Self:
202
288
  return self
203
289
 
204
- def __exit__(self, *args: object, **kwargs: Any) -> None:
290
+ def __exit__(
291
+ self,
292
+ exc_type: type[BaseException] | None,
293
+ exc_val: BaseException | None,
294
+ exc_tb: TracebackType | None,
295
+ ) -> None:
296
+ """
297
+ Calls stop() and waits for the event-processing thread to exit.
298
+ """
205
299
  self.stop()
206
- self.processing_thread.join()
300
+ self._stop_thread.join()
301
+ self._processing_thread.join()
302
+ if self._thread_error:
303
+ error = self._thread_error
304
+ self._thread_error = None
305
+ raise error
207
306
 
208
307
  def __del__(self) -> None:
209
- self.stop()
308
+ """
309
+ Calls stop().
310
+ """
311
+ with contextlib.suppress(AttributeError):
312
+ self.stop()
eventsourcing/system.py CHANGED
@@ -7,12 +7,11 @@ from abc import ABC, abstractmethod
7
7
  from collections import defaultdict
8
8
  from queue import Full, Queue
9
9
  from types import FrameType, ModuleType
10
- from typing import TYPE_CHECKING, Any, Callable, ClassVar, Optional, Union, cast
10
+ from typing import TYPE_CHECKING, Any, ClassVar, Optional, Union, cast
11
11
 
12
12
  if TYPE_CHECKING:
13
13
  from collections.abc import Iterable, Iterator, Sequence
14
14
  from typing_extensions import Self
15
- from eventsourcing.dispatch import singledispatchmethod
16
15
 
17
16
  from eventsourcing.application import (
18
17
  Application,
@@ -22,6 +21,7 @@ from eventsourcing.application import (
22
21
  Section,
23
22
  TApplication,
24
23
  )
24
+ from eventsourcing.dispatch import singledispatchmethod
25
25
  from eventsourcing.domain import DomainEventProtocol, MutableOrImmutableAggregate
26
26
  from eventsourcing.persistence import (
27
27
  IntegrityError,
@@ -198,11 +198,8 @@ class Follower(Application):
198
198
  self.notify(processing_event.events)
199
199
  self._notify(recordings)
200
200
 
201
- policy: (
202
- Callable[[DomainEventProtocol, ProcessingEvent], None] | singledispatchmethod
203
- )
204
-
205
- def policy( # type: ignore[no-redef]
201
+ @singledispatchmethod
202
+ def policy(
206
203
  self,
207
204
  domain_event: DomainEventProtocol,
208
205
  processing_event: ProcessingEvent,
@@ -10,7 +10,7 @@ from decimal import Decimal
10
10
  from threading import Event, get_ident
11
11
  from time import sleep
12
12
  from timeit import timeit
13
- from typing import ClassVar
13
+ from typing import Any, ClassVar
14
14
  from unittest import TestCase
15
15
  from uuid import UUID, uuid4
16
16
 
@@ -23,7 +23,7 @@ from eventsourcing.persistence import (
23
23
  Transcoding,
24
24
  )
25
25
  from eventsourcing.tests.domain import BankAccount, EmailAddress
26
- from eventsourcing.utils import get_topic
26
+ from eventsourcing.utils import EnvType, get_topic
27
27
 
28
28
  TIMEIT_FACTOR = int(os.environ.get("TEST_TIMEIT_FACTOR", default=10))
29
29
 
@@ -344,96 +344,131 @@ class ApplicationTestCase(TestCase):
344
344
  app.repository.get(aggregate.id)
345
345
  self.assertEqual(aggregate, app.repository.cache.get(aggregate.id))
346
346
 
347
- def test_application_fastforward_skipping_during_contention(self) -> None:
348
- app = Application(
347
+ def test_check_aggregate_fastforwarding_nonblocking(self) -> None:
348
+ self._check_aggregate_fastforwarding_during_contention(
349
349
  env={
350
350
  "AGGREGATE_CACHE_MAXSIZE": "10",
351
351
  "AGGREGATE_CACHE_FASTFORWARD_SKIPPING": "y",
352
352
  }
353
353
  )
354
354
 
355
- aggregate = Aggregate()
356
- aggregate_id = aggregate.id
357
- app.save(aggregate)
355
+ def test_check_aggregate_fastforwarding_blocking(self) -> None:
356
+ self._check_aggregate_fastforwarding_during_contention(
357
+ env={"AGGREGATE_CACHE_MAXSIZE": "10"}
358
+ )
359
+
360
+ def _check_aggregate_fastforwarding_during_contention(self, env: EnvType) -> None:
361
+ app = Application(env=env)
362
+
363
+ self.assertEqual(len(app.repository._fastforward_locks_inuse), 0)
364
+
365
+ # Create one aggregate.
366
+ original_aggregate = Aggregate()
367
+ app.save(original_aggregate)
368
+ obj_ids = set()
369
+
370
+ # Prime the cache.
371
+ app.repository.get(original_aggregate.id)
372
+
373
+ # Remember the aggregate ID.
374
+ aggregate_id = original_aggregate.id
358
375
 
359
376
  stopped = Event()
377
+ errors: list[BaseException] = []
378
+ successful_thread_ids = set()
360
379
 
361
- # Trigger, save, get, check.
362
380
  def trigger_save_get_check() -> None:
363
381
  while not stopped.is_set():
364
382
  try:
383
+ # Get the aggregate.
365
384
  aggregate: Aggregate = app.repository.get(aggregate_id)
385
+ original_version = aggregate.version
386
+
387
+ # Try to record a new event.
366
388
  aggregate.trigger_event(Aggregate.Event)
367
- saved_version = aggregate.version
389
+ # Give other threads a chance.
368
390
  try:
369
391
  app.save(aggregate)
370
392
  except IntegrityError:
393
+ # Start again if we didn't record a new event.
394
+ # print("Got integrity error")
395
+ sleep(0.001)
371
396
  continue
372
- cached: Aggregate = app.repository.get(aggregate_id)
373
- if saved_version > cached.version:
374
- print(f"Skipped fast-forwarding at version {saved_version}")
375
- stopped.set()
376
- if aggregate.version % 1000 == 0:
377
- print("Version:", aggregate.version, get_ident())
378
- sleep(0.00)
379
- except BaseException:
380
- print(traceback.format_exc())
381
- raise
382
397
 
383
- executor = ThreadPoolExecutor(max_workers=100)
384
- for _ in range(100):
385
- executor.submit(trigger_save_get_check)
398
+ # Get the aggregate from the cache.
399
+ assert app.repository.cache is not None
400
+ cached: Any = app.repository.cache.get(aggregate_id)
401
+ obj_ids.add(id(cached))
386
402
 
387
- if not stopped.wait(timeout=100):
388
- stopped.set()
389
- self.fail("Didn't skip fast forwarding before test timed out...")
390
- executor.shutdown()
403
+ if len(obj_ids) > 1:
404
+ stopped.set()
405
+ continue
391
406
 
392
- def test_application_fastforward_blocking_during_contention(self) -> None:
393
- app = Application(
394
- env={
395
- "AGGREGATE_CACHE_MAXSIZE": "10",
396
- }
397
- )
407
+ # Fast-forward the cached aggregate.
408
+ fastforwarded: Aggregate = app.repository.get(aggregate_id)
398
409
 
399
- aggregate = Aggregate()
400
- aggregate_id = aggregate.id
401
- app.save(aggregate)
410
+ # Check cached aggregate was fast-forwarded with recorded event.
411
+ if fastforwarded.version < original_version:
412
+ try:
413
+ self.fail(
414
+ f"Failed to fast-forward at version {original_version}"
415
+ )
416
+ except AssertionError as e:
417
+ errors.append(e)
418
+ stopped.set()
419
+ continue
402
420
 
403
- stopped = Event()
421
+ # Monitor number of threads getting involved.
422
+ thread_id = get_ident()
423
+ successful_thread_ids.add(thread_id)
404
424
 
405
- # Trigger, save, get, check.
406
- def trigger_save_get_check() -> None:
407
- while not stopped.is_set():
408
- try:
409
- aggregate: Aggregate = app.repository.get(aggregate_id)
410
- aggregate.trigger_event(Aggregate.Event)
411
- saved_version = aggregate.version
412
- try:
413
- app.save(aggregate)
414
- except IntegrityError:
415
- continue
416
- cached: Aggregate = app.repository.get(aggregate_id)
417
- if saved_version > cached.version:
418
- print(f"Skipped fast-forwarding at version {saved_version}")
425
+ # print("Version:", aggregate.version, thread_id)
426
+
427
+ # See if we have done enough.
428
+ if len(successful_thread_ids) > 10 and aggregate.version >= 25:
419
429
  stopped.set()
420
- if aggregate.version % 1000 == 0:
421
- print("Version:", aggregate.version, get_ident())
422
- sleep(0.00)
423
- except BaseException:
430
+ continue
431
+
432
+ sleep(0.0001)
433
+ # sleep(0.001)
434
+ except BaseException as e:
435
+ errors.append(e)
436
+ stopped.set()
424
437
  print(traceback.format_exc())
425
438
  raise
426
439
 
427
440
  executor = ThreadPoolExecutor(max_workers=100)
441
+ futures = []
428
442
  for _ in range(100):
429
- executor.submit(trigger_save_get_check)
430
-
431
- if not stopped.wait(timeout=3):
432
- stopped.set()
433
- else:
434
- self.fail("Wrongly skipped fast forwarding")
443
+ f = executor.submit(trigger_save_get_check)
444
+ futures.append(f)
445
+
446
+ # Run for three seconds.
447
+ stopped.wait(timeout=10)
448
+ for f in futures:
449
+ f.result()
450
+ # print("Got all results, shutting down executor")
435
451
  executor.shutdown()
436
452
 
453
+ try:
454
+ if errors:
455
+ raise errors[0]
456
+ if len(obj_ids) > 1:
457
+ self.fail(f"More than one instance used in the cache: {len(obj_ids)}")
458
+ if len(successful_thread_ids) < 3:
459
+ self.fail("Insufficient sharing across contentious threads")
460
+
461
+ final_aggregate: Aggregate = app.repository.get(aggregate_id)
462
+ # print("Final aggregate version:", final_aggregate.version)
463
+ if final_aggregate.version < 25:
464
+ self.fail(f"Insufficient version increment: {final_aggregate.version}")
465
+
466
+ self.assertEqual(len(app.repository._fastforward_locks_inuse), 0)
467
+
468
+ finally:
469
+ # print("Closing application")
470
+ app.close()
471
+
437
472
  def test_application_with_cached_aggregates_not_fastforward(self) -> None:
438
473
  app = Application(
439
474
  env={
@@ -441,13 +476,37 @@ class ApplicationTestCase(TestCase):
441
476
  "AGGREGATE_CACHE_FASTFORWARD": "f",
442
477
  }
443
478
  )
444
- aggregate = Aggregate()
445
- app.save(aggregate)
479
+ aggregate1 = Aggregate()
480
+ app.save(aggregate1)
481
+ aggregate_id = aggregate1.id
482
+
446
483
  # Should put the aggregate in the cache.
447
484
  assert app.repository.cache is not None # for mypy
448
- self.assertEqual(aggregate, app.repository.cache.get(aggregate.id))
449
- app.repository.get(aggregate.id)
450
- self.assertEqual(aggregate, app.repository.cache.get(aggregate.id))
485
+ self.assertEqual(aggregate1, app.repository.cache.get(aggregate_id))
486
+ app.repository.get(aggregate_id)
487
+ self.assertEqual(aggregate1, app.repository.cache.get(aggregate_id))
488
+
489
+ aggregate2 = Aggregate()
490
+ aggregate2._id = aggregate_id
491
+ aggregate2.trigger_event(Aggregate.Event)
492
+
493
+ # This will replace object in cache.
494
+ app.save(aggregate2)
495
+
496
+ self.assertEqual(aggregate2.version, aggregate1.version + 1)
497
+ aggregate3: Aggregate = app.repository.get(aggregate_id)
498
+ self.assertEqual(aggregate3.version, aggregate3.version)
499
+ self.assertEqual(id(aggregate3.version), id(aggregate3.version))
500
+
501
+ # This will mess things up because the cache has a stale aggregate.
502
+ aggregate3.trigger_event(Aggregate.Event)
503
+ app.events.put(aggregate3.collect_events())
504
+
505
+ # And so using the aggregate to record new events will cause an IntegrityError.
506
+ aggregate4: Aggregate = app.repository.get(aggregate_id)
507
+ aggregate4.trigger_event(Aggregate.Event)
508
+ with self.assertRaises(IntegrityError):
509
+ app.save(aggregate4)
451
510
 
452
511
  def test_application_with_deepcopy_from_cache_arg(self) -> None:
453
512
  app = Application(
@@ -69,11 +69,11 @@ class BankAccount(Aggregate):
69
69
 
70
70
  amount: Decimal
71
71
 
72
- def apply(self, account: Aggregate) -> None:
72
+ def apply(self, aggregate: Aggregate) -> None:
73
73
  """
74
74
  Increments the account balance.
75
75
  """
76
- cast(BankAccount, account).balance += self.amount
76
+ cast(BankAccount, aggregate).balance += self.amount
77
77
 
78
78
  def set_overdraft_limit(self, overdraft_limit: Decimal) -> None:
79
79
  """
@@ -95,8 +95,8 @@ class BankAccount(Aggregate):
95
95
 
96
96
  overdraft_limit: Decimal
97
97
 
98
- def apply(self, account: Aggregate) -> None:
99
- cast(BankAccount, account).overdraft_limit = self.overdraft_limit
98
+ def apply(self, aggregate: Aggregate) -> None:
99
+ cast(BankAccount, aggregate).overdraft_limit = self.overdraft_limit
100
100
 
101
101
  def close(self) -> None:
102
102
  """
@@ -109,8 +109,8 @@ class BankAccount(Aggregate):
109
109
  Domain event for when account is closed.
110
110
  """
111
111
 
112
- def apply(self, account: Aggregate) -> None:
113
- cast(BankAccount, account).is_closed = True
112
+ def apply(self, aggregate: Aggregate) -> None:
113
+ cast(BankAccount, aggregate).is_closed = True
114
114
 
115
115
 
116
116
  class AccountClosedError(Exception):
@@ -793,8 +793,16 @@ class TrackingRecorderTestCase(TestCase, ABC):
793
793
 
794
794
  def test_wait(self) -> None:
795
795
  tracking_recorder = self.create_recorder()
796
+
797
+ tracking_recorder.wait("upstream1", None)
798
+
799
+ with self.assertRaises(TimeoutError):
800
+ tracking_recorder.wait("upstream1", 21, timeout=0.1)
801
+
796
802
  tracking1 = Tracking(notification_id=21, application_name="upstream1")
797
803
  tracking_recorder.insert_tracking(tracking=tracking1)
804
+ tracking_recorder.wait("upstream1", None)
805
+ tracking_recorder.wait("upstream1", 10)
798
806
  tracking_recorder.wait("upstream1", 21)
799
807
  with self.assertRaises(TimeoutError):
800
808
  tracking_recorder.wait("upstream1", 22, timeout=0.1)
@@ -1,4 +1,5 @@
1
1
  import psycopg
2
+ from psycopg.sql import SQL, Identifier
2
3
 
3
4
  from eventsourcing.persistence import PersistenceError
4
5
  from eventsourcing.postgres import PostgresDatastore
@@ -43,7 +44,10 @@ def pg_close_all_connections(
43
44
 
44
45
 
45
46
  def drop_postgres_table(datastore: PostgresDatastore, table_name: str) -> None:
46
- statement = f"DROP TABLE {table_name}"
47
+ statement = SQL("DROP TABLE {0}.{1}").format(
48
+ Identifier(datastore.schema), Identifier(table_name)
49
+ )
50
+ # print(f"Dropping table {datastore.schema}.{table_name}")
47
51
  try:
48
52
  with datastore.transaction(commit=True) as curs:
49
53
  curs.execute(statement, prepare=False)
eventsourcing/utils.py CHANGED
@@ -2,7 +2,7 @@ from __future__ import annotations
2
2
 
3
3
  import importlib
4
4
  import sys
5
- from collections.abc import Iterator, Mapping, Sequence
5
+ from collections.abc import Iterator, Mapping
6
6
  from functools import wraps
7
7
  from inspect import isfunction
8
8
  from random import random
@@ -129,7 +129,7 @@ def clear_topic_cache() -> None:
129
129
 
130
130
 
131
131
  def retry(
132
- exc: type[Exception] | Sequence[type[Exception]] = Exception,
132
+ exc: type[Exception] | tuple[type[Exception], ...] = Exception,
133
133
  max_attempts: int = 1,
134
134
  wait: float = 0,
135
135
  stall: float = 0,
@@ -235,18 +235,23 @@ class Environment(dict[str, str]):
235
235
  super().__init__(env or {})
236
236
  self.name = name
237
237
 
238
+ @overload # type: ignore[override]
239
+ def get(self, __key: str) -> str | None: ... # pragma: no cover
240
+
238
241
  @overload
239
- def get(self, key: str) -> str | None: ... # pragma: no cover
242
+ def get(self, __key: str, __default: str) -> str: ... # pragma: no cover
240
243
 
241
244
  @overload
242
- def get(self, key: str, default: str | T) -> str | T: ... # pragma: no cover
245
+ def get(self, __key: str, __default: T) -> str | T: ... # pragma: no cover
243
246
 
244
- def get(self, key: str, default: str | T | None = None) -> str | T | None:
245
- for _key in self.create_keys(key):
247
+ def get( # pyright: ignore [reportIncompatibleMethodOverride]
248
+ self, __key: str, __default: str | T | None = None
249
+ ) -> str | T | None:
250
+ for _key in self.create_keys(__key):
246
251
  value = super().get(_key, None)
247
252
  if value is not None:
248
253
  return value
249
- return default
254
+ return __default
250
255
 
251
256
  def create_keys(self, key: str) -> list[str]:
252
257
  keys = []
@@ -1,13 +1,13 @@
1
1
  Metadata-Version: 2.3
2
2
  Name: eventsourcing
3
- Version: 9.4.0a8
3
+ Version: 9.4.0b2
4
4
  Summary: Event sourcing in Python
5
5
  License: BSD 3-Clause
6
6
  Keywords: event sourcing,event store,domain driven design,domain-driven design,ddd,cqrs,cqs
7
7
  Author: John Bywater
8
8
  Author-email: john.bywater@appropriatesoftware.net
9
9
  Requires-Python: >=3.9, !=2.7.*, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*, !=3.7.*, !=3.8.*
10
- Classifier: Development Status :: 3 - Alpha
10
+ Classifier: Development Status :: 4 - Beta
11
11
  Classifier: Intended Audience :: Developers
12
12
  Classifier: Intended Audience :: Education
13
13
  Classifier: Intended Audience :: Science/Research