qx-eventstore 0.2.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,56 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.pyo
5
+ *.pyd
6
+ .Python
7
+ *.so
8
+ *.egg
9
+ *.egg-info/
10
+ dist/
11
+ build/
12
+ eggs/
13
+ .eggs/
14
+ sdist/
15
+ wheels/
16
+ *.egg-link
17
+
18
+ # Virtual environments
19
+ .venv/
20
+ venv/
21
+ env/
22
+ ENV/
23
+
24
+ # uv
25
+ .uv/
26
+
27
+ # Testing
28
+ .pytest_cache/
29
+ .coverage
30
+ htmlcov/
31
+ .tox/
32
+
33
+ # Type checking
34
+ .mypy_cache/
35
+ .ruff_cache/
36
+
37
+ # IDE
38
+ .idea/
39
+ .vscode/
40
+ *.swp
41
+ *.swo
42
+
43
+ # OS
44
+ .DS_Store
45
+ Thumbs.db
46
+
47
+ # Docker
48
+ *.env.local
49
+
50
+ # Dist artifacts
51
+ dist/
52
+
53
+ # VS Code extension build artifacts
54
+ extensions/vscode/node_modules/
55
+ extensions/vscode/dist/
56
+ extensions/vscode/*.vsix
@@ -0,0 +1,36 @@
1
+ Metadata-Version: 2.4
2
+ Name: qx-eventstore
3
+ Version: 0.2.0
4
+ Summary: Qx event store: event-sourced aggregates with Postgres-backed event log and snapshots
5
+ Author: Qx Engineering
6
+ License: MIT
7
+ Requires-Python: >=3.14
8
+ Requires-Dist: qx-core
9
+ Requires-Dist: qx-db
10
+ Description-Content-Type: text/markdown
11
+
12
+ # qx-eventstore
13
+
14
+ Event-sourced aggregates for the Qx framework. Aggregates are persisted as an immutable event
15
+ stream in Postgres, with optional periodic snapshots for efficient replay.
16
+
17
+ ## Usage
18
+
19
+ ```python
20
+ from dataclasses import dataclass, field
21
+ from qx.eventstore import EventSourcedAggregate, EventStore, include_eventstore_tables
22
+
23
+ class MoneyDeposited(DomainEvent):
24
+ event_name = "account.money_deposited"
25
+ amount: int
26
+
27
+ @dataclass
28
+ class Account(EventSourcedAggregate[str]):
29
+ balance: int = field(default=0)
30
+
31
+ def deposit(self, amount: int) -> None:
32
+ self.record_event(MoneyDeposited(amount=amount))
33
+
34
+ def apply_moneydeposited(self, ev: MoneyDeposited) -> None:
35
+ self.balance += ev.amount
36
+ ```
@@ -0,0 +1,25 @@
1
+ # qx-eventstore
2
+
3
+ Event-sourced aggregates for the Qx framework. Aggregates are persisted as an immutable event
4
+ stream in Postgres, with optional periodic snapshots for efficient replay.
5
+
6
+ ## Usage
7
+
8
+ ```python
9
+ from dataclasses import dataclass, field
10
+ from qx.eventstore import EventSourcedAggregate, EventStore, include_eventstore_tables
11
+
12
+ class MoneyDeposited(DomainEvent):
13
+ event_name = "account.money_deposited"
14
+ amount: int
15
+
16
+ @dataclass
17
+ class Account(EventSourcedAggregate[str]):
18
+ balance: int = field(default=0)
19
+
20
+ def deposit(self, amount: int) -> None:
21
+ self.record_event(MoneyDeposited(amount=amount))
22
+
23
+ def apply_moneydeposited(self, ev: MoneyDeposited) -> None:
24
+ self.balance += ev.amount
25
+ ```
@@ -0,0 +1,19 @@
1
+ [project]
2
+ name = "qx-eventstore"
3
+ version = "0.2.0"
4
+ description = "Qx event store: event-sourced aggregates with Postgres-backed event log and snapshots"
5
+ readme = "README.md"
6
+ requires-python = ">=3.14"
7
+ license = { text = "MIT" }
8
+ authors = [{ name = "Qx Engineering" }]
9
+ dependencies = [
10
+ "qx-core",
11
+ "qx-db",
12
+ ]
13
+
14
+ [build-system]
15
+ requires = ["hatchling"]
16
+ build-backend = "hatchling.build"
17
+
18
+ [tool.hatch.build.targets.wheel]
19
+ packages = ["src/qx"]
@@ -0,0 +1,17 @@
1
+ """qx-eventstore: event-sourced aggregates with Postgres-backed event log."""
2
+
3
+ from qx.eventstore.aggregate import EventSourcedAggregate
4
+ from qx.eventstore.store import EventStore
5
+ from qx.eventstore.table import (
6
+ AGGREGATE_EVENTS_TABLE_NAME,
7
+ AGGREGATE_SNAPSHOTS_TABLE_NAME,
8
+ include_eventstore_tables,
9
+ )
10
+
11
+ __all__ = [
12
+ "AGGREGATE_EVENTS_TABLE_NAME",
13
+ "AGGREGATE_SNAPSHOTS_TABLE_NAME",
14
+ "EventSourcedAggregate",
15
+ "EventStore",
16
+ "include_eventstore_tables",
17
+ ]
@@ -0,0 +1,91 @@
1
+ """EventSourcedAggregate — base class for event-sourced aggregates."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import TYPE_CHECKING, Any, TypeVar
7
+
8
+ if TYPE_CHECKING:
9
+ from qx.core.domain.events import DomainEvent
10
+
11
+ __all__ = ["EventSourcedAggregate"]
12
+
13
+ TId = TypeVar("TId")
14
+
15
+ _APPLY_PREFIX = "apply_"
16
+
17
+
18
+ @dataclass
19
+ class EventSourcedAggregate[TId]:
20
+ """Base class for aggregates persisted as an event stream.
21
+
22
+ Subclass and implement ``apply_<EventClassName>`` methods — they are the
23
+ *only* state mutators. Command methods call ``record_event`` which buffers
24
+ the event and immediately applies it::
25
+
26
+ class Account(EventSourcedAggregate[Identifier]):
27
+ balance: int = field(default=0)
28
+
29
+ def deposit(self, amount: int) -> None:
30
+ self.record_event(MoneyDeposited(amount=amount))
31
+
32
+ def apply_money_deposited(self, ev: MoneyDeposited) -> None:
33
+ self.balance += ev.amount
34
+
35
+ The repository loads the event stream, calls ``_replay`` to restore
36
+ state, then drains pending events on save.
37
+ """
38
+
39
+ id: TId
40
+ version: int = field(default=0, compare=False)
41
+ _pending_events: list[DomainEvent] = field(
42
+ default_factory=list, repr=False, compare=False, init=False
43
+ )
44
+
45
+ def record_event(self, event: DomainEvent) -> None:
46
+ """Buffer an event and apply it immediately to update local state."""
47
+ self._pending_events.append(event)
48
+ self._apply(event)
49
+
50
+ def pull_events(self) -> list[DomainEvent]:
51
+ """Drain and return pending events. Called by the repository at save time."""
52
+ events = list(self._pending_events)
53
+ self._pending_events.clear()
54
+ return events
55
+
56
+ @property
57
+ def has_pending_events(self) -> bool:
58
+ return bool(self._pending_events)
59
+
60
+ def _apply(self, event: DomainEvent) -> None:
61
+ """Dispatch to the appropriate apply_* method if it exists."""
62
+ method_name = f"{_APPLY_PREFIX}{type(event).__name__.lower()}"
63
+ handler = getattr(self, method_name, None)
64
+ if handler is not None:
65
+ handler(event)
66
+
67
+ def _replay(self, events: list[DomainEvent], *, version: int) -> None:
68
+ """Restore state by replaying a sequence of persisted events.
69
+
70
+ Called by the repository after loading the event stream. Does NOT
71
+ buffer events into ``_pending_events``; they are already persisted.
72
+ """
73
+ for event in events:
74
+ self._apply(event)
75
+ self.version = version
76
+
77
+ @classmethod
78
+ def _from_events(
79
+ cls,
80
+ id: TId,
81
+ events: list[DomainEvent],
82
+ *,
83
+ version: int,
84
+ ) -> Any:
85
+ """Reconstruct an instance from its full event history."""
86
+ instance: EventSourcedAggregate[TId] = object.__new__(cls)
87
+ instance.id = id
88
+ instance.version = 0
89
+ instance._pending_events = []
90
+ instance._replay(events, version=version)
91
+ return instance
File without changes
@@ -0,0 +1,230 @@
1
+ """EventStore — append and replay aggregate event streams."""
2
+
3
+ from __future__ import annotations
4
+
5
+ import dataclasses
6
+ import importlib
7
+ from datetime import UTC, datetime
8
+ from typing import TYPE_CHECKING, Any
9
+ from uuid import uuid4
10
+
11
+ from qx.core import ConflictError, Result
12
+ from sqlalchemy import select
13
+ from sqlalchemy.dialects.postgresql import insert as pg_insert
14
+
15
+ if TYPE_CHECKING:
16
+ from qx.core.domain.events import DomainEvent
17
+ from qx.eventstore.aggregate import EventSourcedAggregate
18
+ from sqlalchemy import Table
19
+ from sqlalchemy.ext.asyncio import AsyncSession
20
+
21
+ __all__ = ["EventStore"]
22
+
23
+
24
+ class EventStore:
25
+ """Postgres-backed event log for event-sourced aggregates.
26
+
27
+ Append events in a single call; load them back for replay. Optionally
28
+ reads/writes snapshots to avoid full-stream replay on large aggregates.
29
+
30
+ Usage::
31
+
32
+ store = EventStore(session, events_table, snapshots_table)
33
+
34
+ # append events after handling a command
35
+ await store.append(account, aggregate_type="accounts.Account")
36
+
37
+ # load and replay
38
+ account = await store.load(
39
+ Account,
40
+ aggregate_id=str(account_id),
41
+ aggregate_type="accounts.Account",
42
+ )
43
+ """
44
+
45
+ def __init__(
46
+ self,
47
+ session: AsyncSession,
48
+ events_table: Table,
49
+ snapshots_table: Table,
50
+ *,
51
+ snapshot_every: int = 50,
52
+ ) -> None:
53
+ self._session = session
54
+ self._events = events_table
55
+ self._snapshots = snapshots_table
56
+ self._snapshot_every = snapshot_every
57
+
58
+ async def append(
59
+ self,
60
+ aggregate: EventSourcedAggregate[Any],
61
+ *,
62
+ aggregate_type: str,
63
+ tenant_id: str | None = None,
64
+ ) -> Result[int]:
65
+ """Drain pending events from ``aggregate`` and append them to the log.
66
+
67
+ Returns the new version (last sequence number written).
68
+ Raises on optimistic concurrency conflict (duplicate sequence).
69
+ """
70
+ events = aggregate.pull_events()
71
+ if not events:
72
+ return Result.success(aggregate.version)
73
+
74
+ base_seq = aggregate.version
75
+ now = datetime.now(UTC)
76
+ rows = []
77
+ for i, event in enumerate(events, start=1):
78
+ rows.append(
79
+ {
80
+ "id": uuid4(),
81
+ "aggregate_type": aggregate_type,
82
+ "aggregate_id": str(aggregate.id),
83
+ "sequence": base_seq + i,
84
+ "event_type": f"{type(event).__module__}.{type(event).__qualname__}",
85
+ "event_name": event.event_name,
86
+ "payload": event.model_dump(mode="json"),
87
+ "occurred_at": now,
88
+ "tenant_id": tenant_id,
89
+ }
90
+ )
91
+
92
+ try:
93
+ await self._session.execute(self._events.insert(), rows)
94
+ except Exception as exc:
95
+ if "unique" in str(exc).lower():
96
+ return Result.failure(
97
+ ConflictError(
98
+ code="eventstore.version_conflict",
99
+ message=f"{aggregate_type}:{aggregate.id} was modified concurrently",
100
+ )
101
+ )
102
+ raise
103
+
104
+ new_version = base_seq + len(events)
105
+ aggregate.version = new_version
106
+
107
+ if new_version % self._snapshot_every == 0:
108
+ await self._save_snapshot(aggregate, aggregate_type=aggregate_type, tenant_id=tenant_id)
109
+
110
+ return Result.success(new_version)
111
+
112
+ async def load(
113
+ self,
114
+ cls: type[EventSourcedAggregate[Any]],
115
+ *,
116
+ aggregate_id: str,
117
+ aggregate_type: str,
118
+ ) -> Result[EventSourcedAggregate[Any] | None]:
119
+ """Load an aggregate by replaying its event stream.
120
+
121
+ If a snapshot exists, replay only events after the snapshot version.
122
+ Returns ``None`` if no events (and no snapshot) exist for this id.
123
+ """
124
+ snapshot_state: dict[str, Any] | None = None
125
+ snapshot_version = 0
126
+
127
+ snap_row = await self._session.execute(
128
+ select(self._snapshots).where(
129
+ self._snapshots.c.aggregate_type == aggregate_type,
130
+ self._snapshots.c.aggregate_id == aggregate_id,
131
+ )
132
+ )
133
+ snap = snap_row.mappings().first()
134
+ if snap is not None:
135
+ snapshot_state = dict(snap["state"]) if isinstance(snap["state"], dict) else {}
136
+ snapshot_version = int(snap["version"])
137
+
138
+ rows = await self._session.execute(
139
+ select(self._events)
140
+ .where(
141
+ self._events.c.aggregate_type == aggregate_type,
142
+ self._events.c.aggregate_id == aggregate_id,
143
+ self._events.c.sequence > snapshot_version,
144
+ )
145
+ .order_by(self._events.c.sequence)
146
+ )
147
+ event_rows = list(rows.mappings())
148
+
149
+ if snapshot_state is None and not event_rows:
150
+ return Result.success(None)
151
+
152
+ events = [self._deserialize_event(dict(r)) for r in event_rows]
153
+ final_version = int(event_rows[-1]["sequence"]) if event_rows else snapshot_version
154
+
155
+ if snapshot_state is not None:
156
+ aggregate = _restore_from_snapshot(cls, aggregate_id, snapshot_state, snapshot_version)
157
+ aggregate._replay(events, version=final_version)
158
+ else:
159
+ aggregate = cls._from_events(aggregate_id, events, version=final_version)
160
+
161
+ return Result.success(aggregate)
162
+
163
+ async def _save_snapshot(
164
+ self,
165
+ aggregate: EventSourcedAggregate[Any],
166
+ *,
167
+ aggregate_type: str,
168
+ tenant_id: str | None,
169
+ ) -> None:
170
+ """Upsert a snapshot row for the current aggregate state."""
171
+ state = _serialize_aggregate(aggregate)
172
+ now = datetime.now(UTC)
173
+ stmt = (
174
+ pg_insert(self._snapshots)
175
+ .values(
176
+ id=uuid4(),
177
+ aggregate_type=aggregate_type,
178
+ aggregate_id=str(aggregate.id),
179
+ state=state,
180
+ version=aggregate.version,
181
+ taken_at=now,
182
+ tenant_id=tenant_id,
183
+ )
184
+ .on_conflict_do_update(
185
+ index_elements=["aggregate_type", "aggregate_id"],
186
+ set_={"state": state, "version": aggregate.version, "taken_at": now},
187
+ )
188
+ )
189
+ await self._session.execute(stmt)
190
+
191
+ @staticmethod
192
+ def _deserialize_event(row: dict[str, Any]) -> DomainEvent:
193
+ """Reconstruct a DomainEvent from a stored event row."""
194
+ event_type_path: str = row["event_type"]
195
+ module_path, _, class_name = event_type_path.rpartition(".")
196
+ module = importlib.import_module(module_path)
197
+ event_cls = getattr(module, class_name)
198
+ return event_cls.model_validate(row["payload"]) # type: ignore[no-any-return]
199
+
200
+
201
+ def _serialize_aggregate(aggregate: EventSourcedAggregate[Any]) -> dict[str, Any]:
202
+ """Best-effort dict serialization for snapshot state."""
203
+ if dataclasses.is_dataclass(aggregate):
204
+ return {k: v for k, v in dataclasses.asdict(aggregate).items() if not k.startswith("_")}
205
+ return {}
206
+
207
+
208
+ def _restore_from_snapshot(
209
+ cls: type[EventSourcedAggregate[Any]],
210
+ aggregate_id: str,
211
+ state: dict[str, Any],
212
+ version: int,
213
+ ) -> EventSourcedAggregate[Any]:
214
+ """Build a partial aggregate from snapshot state (before replaying tail events)."""
215
+ instance: EventSourcedAggregate[Any] = object.__new__(cls)
216
+ for f in dataclasses.fields(instance):
217
+ if f.name.startswith("_"):
218
+ continue
219
+ if f.name == "id":
220
+ object.__setattr__(instance, "id", aggregate_id)
221
+ elif f.name == "version":
222
+ object.__setattr__(instance, "version", version)
223
+ elif f.name in state:
224
+ object.__setattr__(instance, f.name, state[f.name])
225
+ elif f.default is not dataclasses.MISSING:
226
+ object.__setattr__(instance, f.name, f.default)
227
+ elif f.default_factory is not dataclasses.MISSING:
228
+ object.__setattr__(instance, f.name, f.default_factory())
229
+ object.__setattr__(instance, "_pending_events", [])
230
+ return instance
@@ -0,0 +1,107 @@
1
+ """SQLAlchemy table definitions for the event store."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from typing import TYPE_CHECKING
6
+
7
+ from sqlalchemy import Column, DateTime, Index, Integer, String, Table, text
8
+ from sqlalchemy.dialects.postgresql import JSONB
9
+ from sqlalchemy.dialects.postgresql import UUID as PG_UUID
10
+
11
+ if TYPE_CHECKING:
12
+ from sqlalchemy import MetaData
13
+
14
+ __all__ = [
15
+ "AGGREGATE_EVENTS_TABLE_NAME",
16
+ "AGGREGATE_SNAPSHOTS_TABLE_NAME",
17
+ "include_eventstore_tables",
18
+ ]
19
+
20
+ AGGREGATE_EVENTS_TABLE_NAME = "qx_aggregate_events"
21
+ AGGREGATE_SNAPSHOTS_TABLE_NAME = "qx_aggregate_snapshots"
22
+
23
+
24
+ def include_eventstore_tables(metadata: MetaData) -> tuple[Table, Table]:
25
+ """Add event-store tables to ``metadata`` and return ``(events, snapshots)``.
26
+
27
+ Call once at startup alongside your aggregate tables::
28
+
29
+ metadata = make_metadata()
30
+ include_outbox_table(metadata)
31
+ events_table, snapshots_table = include_eventstore_tables(metadata)
32
+
33
+ Schema — events
34
+ ---------------
35
+ id UUID PK
36
+ aggregate_type string — fully-qualified aggregate class name
37
+ aggregate_id string — aggregate identifier (stringified)
38
+ sequence int — monotonic per-aggregate sequence number (1-based)
39
+ event_type string — fully-qualified event class name
40
+ event_name string — stable human-readable name (e.g. "account.money_deposited")
41
+ payload JSONB — serialised event body
42
+ occurred_at timestamptz
43
+ tenant_id string | NULL
44
+
45
+ Schema — snapshots
46
+ ------------------
47
+ id UUID PK
48
+ aggregate_type string
49
+ aggregate_id string — unique per (aggregate_type, aggregate_id)
50
+ state JSONB — serialised aggregate state at ``version``
51
+ version int — sequence number of the last event baked in
52
+ taken_at timestamptz
53
+ tenant_id string | NULL
54
+ """
55
+ events = Table(
56
+ AGGREGATE_EVENTS_TABLE_NAME,
57
+ metadata,
58
+ Column("id", PG_UUID(as_uuid=True), primary_key=True),
59
+ Column("aggregate_type", String(255), nullable=False),
60
+ Column("aggregate_id", String(255), nullable=False),
61
+ Column("sequence", Integer, nullable=False),
62
+ Column("event_type", String(255), nullable=False),
63
+ Column("event_name", String(255), nullable=False),
64
+ Column("payload", JSONB, nullable=False, server_default="{}"),
65
+ Column(
66
+ "occurred_at",
67
+ DateTime(timezone=True),
68
+ nullable=False,
69
+ server_default=text("now()"),
70
+ ),
71
+ Column("tenant_id", String(255), nullable=True),
72
+ Index(
73
+ f"ix_{AGGREGATE_EVENTS_TABLE_NAME}_aggregate",
74
+ "aggregate_type",
75
+ "aggregate_id",
76
+ "sequence",
77
+ unique=True,
78
+ ),
79
+ Index(f"ix_{AGGREGATE_EVENTS_TABLE_NAME}_event_name", "event_name"),
80
+ extend_existing=True,
81
+ )
82
+
83
+ snapshots = Table(
84
+ AGGREGATE_SNAPSHOTS_TABLE_NAME,
85
+ metadata,
86
+ Column("id", PG_UUID(as_uuid=True), primary_key=True),
87
+ Column("aggregate_type", String(255), nullable=False),
88
+ Column("aggregate_id", String(255), nullable=False),
89
+ Column("state", JSONB, nullable=False, server_default="{}"),
90
+ Column("version", Integer, nullable=False),
91
+ Column(
92
+ "taken_at",
93
+ DateTime(timezone=True),
94
+ nullable=False,
95
+ server_default=text("now()"),
96
+ ),
97
+ Column("tenant_id", String(255), nullable=True),
98
+ Index(
99
+ f"ix_{AGGREGATE_SNAPSHOTS_TABLE_NAME}_aggregate",
100
+ "aggregate_type",
101
+ "aggregate_id",
102
+ unique=True,
103
+ ),
104
+ extend_existing=True,
105
+ )
106
+
107
+ return events, snapshots
@@ -0,0 +1,276 @@
1
+ """EventStore unit tests — no database required."""
2
+
3
+ from __future__ import annotations
4
+
5
+ from dataclasses import dataclass, field
6
+ from typing import ClassVar
7
+ from unittest.mock import AsyncMock, MagicMock, patch
8
+ from uuid import uuid4
9
+
10
+ from qx.core import DomainEvent
11
+ from qx.eventstore import EventSourcedAggregate, EventStore
12
+ from qx.eventstore.table import include_eventstore_tables
13
+ from sqlalchemy import MetaData
14
+
15
+ # ---- Domain fixtures ----
16
+
17
+
18
+ class MoneyDeposited(DomainEvent):
19
+ event_name: ClassVar[str] = "account.money_deposited"
20
+ amount: int
21
+
22
+
23
+ class MoneyWithdrawn(DomainEvent):
24
+ event_name: ClassVar[str] = "account.money_withdrawn"
25
+ amount: int
26
+
27
+
28
+ @dataclass
29
+ class BankAccount(EventSourcedAggregate[str]):
30
+ balance: int = field(default=0)
31
+
32
+ def deposit(self, amount: int) -> None:
33
+ self.record_event(MoneyDeposited(amount=amount))
34
+
35
+ def withdraw(self, amount: int) -> None:
36
+ self.record_event(MoneyWithdrawn(amount=amount))
37
+
38
+ def apply_moneydeposited(self, ev: MoneyDeposited) -> None:
39
+ self.balance += ev.amount
40
+
41
+ def apply_moneywithdrawn(self, ev: MoneyWithdrawn) -> None:
42
+ self.balance -= ev.amount
43
+
44
+
45
+ # ---- EventSourcedAggregate ----
46
+
47
+
48
+ def test_record_event_buffers_and_applies() -> None:
49
+ account = BankAccount(id="acc-1")
50
+ account.deposit(100)
51
+
52
+ assert account.balance == 100
53
+ assert account.has_pending_events
54
+ assert len(account._pending_events) == 1
55
+
56
+
57
+ def test_pull_events_drains_buffer() -> None:
58
+ account = BankAccount(id="acc-1")
59
+ account.deposit(50)
60
+ account.withdraw(20)
61
+
62
+ events = account.pull_events()
63
+
64
+ assert len(events) == 2
65
+ assert not account.has_pending_events
66
+
67
+
68
+ def test_replay_restores_state() -> None:
69
+ account = BankAccount(id="acc-1")
70
+ events = [MoneyDeposited(amount=100), MoneyWithdrawn(amount=30)]
71
+
72
+ account._replay(events, version=2)
73
+
74
+ assert account.balance == 70
75
+ assert account.version == 2
76
+ assert not account.has_pending_events
77
+
78
+
79
+ def test_from_events_constructs_aggregate() -> None:
80
+ events = [MoneyDeposited(amount=200), MoneyWithdrawn(amount=50)]
81
+ account = BankAccount._from_events("acc-1", events, version=2)
82
+
83
+ assert account.balance == 150
84
+ assert account.version == 2
85
+ assert not account.has_pending_events
86
+
87
+
88
+ def test_unknown_event_type_is_ignored() -> None:
89
+ class OtherEvent(DomainEvent):
90
+ event_name: ClassVar[str] = "other"
91
+
92
+ account = BankAccount(id="acc-1")
93
+ account._replay([OtherEvent()], version=1)
94
+ assert account.balance == 0 # no handler, no crash
95
+
96
+
97
+ # ---- EventStore.append ----
98
+
99
+
100
+ async def test_append_writes_rows_for_each_event() -> None:
101
+ session = AsyncMock()
102
+ store = _make_store(session)
103
+ account = BankAccount(id="acc-1")
104
+ account.deposit(100)
105
+ account.withdraw(40)
106
+
107
+ result = await store.append(account, aggregate_type="test.BankAccount")
108
+
109
+ assert result.is_success
110
+ assert result.value == 2
111
+ assert account.version == 2
112
+ assert not account.has_pending_events
113
+ session.execute.assert_awaited_once()
114
+ rows = session.execute.call_args[0][1]
115
+ assert len(rows) == 2
116
+ assert rows[0]["sequence"] == 1
117
+ assert rows[1]["sequence"] == 2
118
+
119
+
120
+ async def test_append_noop_when_no_pending_events() -> None:
121
+ session = AsyncMock()
122
+ store = _make_store(session)
123
+ account = BankAccount(id="acc-1")
124
+
125
+ result = await store.append(account, aggregate_type="test.BankAccount")
126
+
127
+ assert result.is_success
128
+ assert result.value == 0
129
+ session.execute.assert_not_awaited()
130
+
131
+
132
+ async def test_append_uses_base_version_as_sequence_offset() -> None:
133
+ session = AsyncMock()
134
+ store = _make_store(session)
135
+ account = BankAccount(id="acc-1")
136
+ account.version = 5
137
+ account.deposit(10)
138
+
139
+ await store.append(account, aggregate_type="test.BankAccount")
140
+
141
+ rows = session.execute.call_args[0][1]
142
+ assert rows[0]["sequence"] == 6
143
+
144
+
145
+ async def test_append_triggers_snapshot_at_threshold() -> None:
146
+ session = AsyncMock()
147
+ store = _make_store(session, snapshot_every=3)
148
+ account = BankAccount(id="acc-1")
149
+ account.version = 2
150
+ account.deposit(10) # sequence 3 — threshold hit
151
+
152
+ with patch.object(store, "_save_snapshot", new=AsyncMock()) as mock_snap:
153
+ await store.append(account, aggregate_type="test.BankAccount")
154
+
155
+ mock_snap.assert_awaited_once()
156
+
157
+
158
+ async def test_append_no_snapshot_below_threshold() -> None:
159
+ session = AsyncMock()
160
+ store = _make_store(session, snapshot_every=10)
161
+ account = BankAccount(id="acc-1")
162
+ account.deposit(10)
163
+
164
+ with patch.object(store, "_save_snapshot", new=AsyncMock()) as mock_snap:
165
+ await store.append(account, aggregate_type="test.BankAccount")
166
+
167
+ mock_snap.assert_not_awaited()
168
+
169
+
170
+ # ---- EventStore.load ----
171
+
172
+
173
+ async def test_load_returns_none_when_no_events_no_snapshot() -> None:
174
+ session = _session_with_rows(snap_row=None, event_rows=[])
175
+ store = _make_store(session)
176
+
177
+ result = await store.load(BankAccount, aggregate_id="acc-1", aggregate_type="test.BankAccount")
178
+
179
+ assert result.is_success
180
+ assert result.value is None
181
+
182
+
183
+ async def test_load_replays_events_without_snapshot() -> None:
184
+ event_rows = [
185
+ _event_row(seq=1, event_cls=MoneyDeposited, payload={"amount": 100}),
186
+ _event_row(seq=2, event_cls=MoneyWithdrawn, payload={"amount": 30}),
187
+ ]
188
+ session = _session_with_rows(snap_row=None, event_rows=event_rows)
189
+ store = _make_store(session)
190
+
191
+ result = await store.load(BankAccount, aggregate_id="acc-1", aggregate_type="test.BankAccount")
192
+
193
+ assert result.is_success
194
+ account = result.value
195
+ assert account is not None
196
+ assert account.balance == 70
197
+ assert account.version == 2
198
+
199
+
200
+ async def test_load_uses_snapshot_and_tail_events() -> None:
201
+ snap = {"id": "snap-uuid", "state": {"balance": 80, "id": "acc-1"}, "version": 5}
202
+ event_rows = [_event_row(seq=6, event_cls=MoneyDeposited, payload={"amount": 20})]
203
+ session = _session_with_rows(snap_row=snap, event_rows=event_rows)
204
+ store = _make_store(session)
205
+
206
+ result = await store.load(BankAccount, aggregate_id="acc-1", aggregate_type="test.BankAccount")
207
+
208
+ assert result.is_success
209
+ account = result.value
210
+ assert account is not None
211
+ assert account.balance == 100 # 80 from snapshot + 20 from tail
212
+ assert account.version == 6
213
+
214
+
215
+ async def test_load_with_snapshot_and_no_tail_events() -> None:
216
+ snap = {"id": "snap-uuid", "state": {"balance": 50, "id": "acc-1"}, "version": 10}
217
+ session = _session_with_rows(snap_row=snap, event_rows=[])
218
+ store = _make_store(session)
219
+
220
+ result = await store.load(BankAccount, aggregate_id="acc-1", aggregate_type="test.BankAccount")
221
+
222
+ assert result.is_success
223
+ account = result.value
224
+ assert account is not None
225
+ assert account.balance == 50
226
+ assert account.version == 10
227
+
228
+
229
+ # ---- Helpers ----
230
+
231
+
232
+ def _make_real_tables() -> tuple:
233
+ return include_eventstore_tables(MetaData())
234
+
235
+
236
+ def _make_store(session: AsyncMock, *, snapshot_every: int = 50) -> EventStore:
237
+ events_table, snapshots_table = _make_real_tables()
238
+ return EventStore(session, events_table, snapshots_table, snapshot_every=snapshot_every)
239
+
240
+
241
+ def _event_row(*, seq: int, event_cls: type, payload: dict) -> dict:
242
+ full_type = f"{event_cls.__module__}.{event_cls.__qualname__}"
243
+ return {
244
+ "id": uuid4(),
245
+ "aggregate_type": "test.BankAccount",
246
+ "aggregate_id": "acc-1",
247
+ "sequence": seq,
248
+ "event_type": full_type,
249
+ "event_name": event_cls.event_name,
250
+ "payload": payload,
251
+ }
252
+
253
+
254
+ def _session_with_rows(
255
+ *,
256
+ snap_row: dict | None,
257
+ event_rows: list[dict],
258
+ ) -> AsyncMock:
259
+ """Build a mock AsyncSession that returns snap_row then event_rows on execute calls."""
260
+ session = AsyncMock()
261
+
262
+ def _mapping_result(row: dict | None) -> MagicMock:
263
+ result = MagicMock()
264
+ result.mappings.return_value.first.return_value = row
265
+ return result
266
+
267
+ def _mapping_list(rows: list[dict]) -> MagicMock:
268
+ result = MagicMock()
269
+ result.mappings.return_value.__iter__ = MagicMock(return_value=iter(rows))
270
+ return result
271
+
272
+ session.execute.side_effect = [
273
+ _mapping_result(snap_row),
274
+ _mapping_list(event_rows),
275
+ ]
276
+ return session