qx-db 0.1.0__tar.gz

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
qx_db-0.1.0/.gitignore ADDED
@@ -0,0 +1,51 @@
1
+ # Python
2
+ __pycache__/
3
+ *.py[cod]
4
+ *.pyo
5
+ *.pyd
6
+ .Python
7
+ *.so
8
+ *.egg
9
+ *.egg-info/
10
+ dist/
11
+ build/
12
+ eggs/
13
+ .eggs/
14
+ sdist/
15
+ wheels/
16
+ *.egg-link
17
+
18
+ # Virtual environments
19
+ .venv/
20
+ venv/
21
+ env/
22
+ ENV/
23
+
24
+ # uv
25
+ .uv/
26
+
27
+ # Testing
28
+ .pytest_cache/
29
+ .coverage
30
+ htmlcov/
31
+ .tox/
32
+
33
+ # Type checking
34
+ .mypy_cache/
35
+ .ruff_cache/
36
+
37
+ # IDE
38
+ .idea/
39
+ .vscode/
40
+ *.swp
41
+ *.swo
42
+
43
+ # OS
44
+ .DS_Store
45
+ Thumbs.db
46
+
47
+ # Docker
48
+ *.env.local
49
+
50
+ # Dist artifacts
51
+ dist/
qx_db-0.1.0/PKG-INFO ADDED
@@ -0,0 +1,71 @@
1
+ Metadata-Version: 2.4
2
+ Name: qx-db
3
+ Version: 0.1.0
4
+ Summary: Qx database layer: SQLAlchemy 2 async, repositories, unit of work, transactional outbox
5
+ Author: Qx Engineering
6
+ License: MIT
7
+ Requires-Python: >=3.14
8
+ Requires-Dist: alembic>=1.13.0
9
+ Requires-Dist: asyncpg>=0.30.0
10
+ Requires-Dist: pydantic>=2.8.0
11
+ Requires-Dist: qx-core
12
+ Requires-Dist: qx-cqrs
13
+ Requires-Dist: qx-di
14
+ Requires-Dist: qx-observability
15
+ Requires-Dist: sqlalchemy[asyncio]>=2.0.30
16
+ Description-Content-Type: text/markdown
17
+
18
+ # qx-db
19
+
20
+ Database layer for the Qx framework — SQLAlchemy 2 async, a generic `Repository[TEntity]`, `UnitOfWork` with domain-event routing, transactional outbox, and cursor/offset pagination.
21
+
22
+ ## What lives here
23
+
24
+ - **`qx.db.Repository[TEntity]`** — generic async CRUD with optimistic concurrency (`version` column), soft-delete, tenant filtering, and allow-listed filter/sort fields.
25
+ - **`qx.db.UnitOfWork`** — wraps a SQLAlchemy session; commits the aggregate write and the outbox `INSERT` in one transaction, then drains and dispatches domain events.
26
+ - **`qx.db.OutboxRecorder` / `DefaultOutboxRecorder`** — persists `IntegrationEvent` payloads to `qx_outbox_events` for reliable delivery.
27
+ - **`qx.db.SessionFactory`** — factory for `AsyncSession` instances; injected into repositories.
28
+ - **`qx.db.make_metadata` / `make_registry`** — SQLAlchemy `MetaData` and `registry` helpers for imperative mapping (no declarative base).
29
+ - **`qx.db.standard_audit_columns` / `uuid_column` / `jsonb_column`** — column helpers for consistent schema conventions.
30
+ - **`qx.db.build_cursor_page` / `encode_cursor` / `decode_cursor`** — opaque cursor pagination utilities.
31
+ - **`qx.db.include_outbox_table`** — attaches the `qx_outbox_events` table to your `MetaData`.
32
+
33
+ ## Defining a repository
34
+
35
+ ```python
36
+ from qx.db import Repository
37
+ from sqlalchemy import Table
38
+
39
+ class UserRepository(Repository[User]):
40
+ entity_cls = User
41
+ table: Table # set to your SQLAlchemy Table at class or instance level
42
+ filterable_fields = {"email", "name", "is_active"}
43
+ sortable_fields = {"created_at", "email"}
44
+ tenanted = False # set True for multi-tenant tables
45
+ ```
46
+
47
+ ## Unit of Work
48
+
49
+ ```python
50
+ from qx.db import UnitOfWork
51
+
52
+ class CreateUserHandler:
53
+ def __init__(self, uow: UnitOfWork) -> None:
54
+ self._uow = uow
55
+
56
+ async def handle(self, cmd: CreateUserCommand) -> Result[UserDto]:
57
+ async with self._uow.begin() as ctx:
58
+ user_result = User.register(cmd.email, cmd.name)
59
+ user = unwrap(user_result)
60
+ await ctx.users.add(user)
61
+ # commit + outbox INSERT + event dispatch all happened above
62
+ return Result.success(UserDto.from_domain(user))
63
+ ```
64
+
65
+ ## Design rules
66
+
67
+ - **Optimistic concurrency** — `save()` uses `WHERE id = ? AND version = ?`; returns `ConflictError` (not an exception) on mismatch.
68
+ - **Soft-delete by default** — `list()` and `get()` exclude rows where `deleted_at IS NOT NULL` unless `include_deleted=True`.
69
+ - **Allow-listed filters** — `filterable_fields` and `sortable_fields` are class-level sets; querying an unlisted field raises `ValueError` to prevent controller logic leaking into SQL.
70
+ - **Imperative mapping** — domain entities are plain dataclasses with no SQLAlchemy decorators. The mapping lives in the infrastructure layer, not the domain.
71
+ - **UUID v7** — `uuid_column()` defaults to UUID v7 primary keys for sequential B-tree index locality.
qx_db-0.1.0/README.md ADDED
@@ -0,0 +1,54 @@
1
+ # qx-db
2
+
3
+ Database layer for the Qx framework — SQLAlchemy 2 async, a generic `Repository[TEntity]`, `UnitOfWork` with domain-event routing, transactional outbox, and cursor/offset pagination.
4
+
5
+ ## What lives here
6
+
7
+ - **`qx.db.Repository[TEntity]`** — generic async CRUD with optimistic concurrency (`version` column), soft-delete, tenant filtering, and allow-listed filter/sort fields.
8
+ - **`qx.db.UnitOfWork`** — wraps a SQLAlchemy session; commits the aggregate write and the outbox `INSERT` in one transaction, then drains and dispatches domain events.
9
+ - **`qx.db.OutboxRecorder` / `DefaultOutboxRecorder`** — persists `IntegrationEvent` payloads to `qx_outbox_events` for reliable delivery.
10
+ - **`qx.db.SessionFactory`** — factory for `AsyncSession` instances; injected into repositories.
11
+ - **`qx.db.make_metadata` / `make_registry`** — SQLAlchemy `MetaData` and `registry` helpers for imperative mapping (no declarative base).
12
+ - **`qx.db.standard_audit_columns` / `uuid_column` / `jsonb_column`** — column helpers for consistent schema conventions.
13
+ - **`qx.db.build_cursor_page` / `encode_cursor` / `decode_cursor`** — opaque cursor pagination utilities.
14
+ - **`qx.db.include_outbox_table`** — attaches the `qx_outbox_events` table to your `MetaData`.
15
+
16
+ ## Defining a repository
17
+
18
+ ```python
19
+ from qx.db import Repository
20
+ from sqlalchemy import Table
21
+
22
+ class UserRepository(Repository[User]):
23
+ entity_cls = User
24
+ table: Table # set to your SQLAlchemy Table at class or instance level
25
+ filterable_fields = {"email", "name", "is_active"}
26
+ sortable_fields = {"created_at", "email"}
27
+ tenanted = False # set True for multi-tenant tables
28
+ ```
29
+
30
+ ## Unit of Work
31
+
32
+ ```python
33
+ from qx.db import UnitOfWork
34
+
35
+ class CreateUserHandler:
36
+ def __init__(self, uow: UnitOfWork) -> None:
37
+ self._uow = uow
38
+
39
+ async def handle(self, cmd: CreateUserCommand) -> Result[UserDto]:
40
+ async with self._uow.begin() as ctx:
41
+ user_result = User.register(cmd.email, cmd.name)
42
+ user = unwrap(user_result)
43
+ await ctx.users.add(user)
44
+ # commit + outbox INSERT + event dispatch all happened above
45
+ return Result.success(UserDto.from_domain(user))
46
+ ```
47
+
48
+ ## Design rules
49
+
50
+ - **Optimistic concurrency** — `save()` uses `WHERE id = ? AND version = ?`; returns `ConflictError` (not an exception) on mismatch.
51
+ - **Soft-delete by default** — `list()` and `get()` exclude rows where `deleted_at IS NOT NULL` unless `include_deleted=True`.
52
+ - **Allow-listed filters** — `filterable_fields` and `sortable_fields` are class-level sets; querying an unlisted field raises `ValueError` to prevent controller logic leaking into SQL.
53
+ - **Imperative mapping** — domain entities are plain dataclasses with no SQLAlchemy decorators. The mapping lives in the infrastructure layer, not the domain.
54
+ - **UUID v7** — `uuid_column()` defaults to UUID v7 primary keys for sequential B-tree index locality.
@@ -0,0 +1,25 @@
1
+ [project]
2
+ name = "qx-db"
3
+ version = "0.1.0"
4
+ description = "Qx database layer: SQLAlchemy 2 async, repositories, unit of work, transactional outbox"
5
+ readme = "README.md"
6
+ requires-python = ">=3.14"
7
+ license = { text = "MIT" }
8
+ authors = [{ name = "Qx Engineering" }]
9
+ dependencies = [
10
+ "qx-core",
11
+ "qx-di",
12
+ "qx-cqrs",
13
+ "qx-observability",
14
+ "sqlalchemy[asyncio]>=2.0.30",
15
+ "asyncpg>=0.30.0",
16
+ "alembic>=1.13.0",
17
+ "pydantic>=2.8.0",
18
+ ]
19
+
20
+ [build-system]
21
+ requires = ["hatchling"]
22
+ build-backend = "hatchling.build"
23
+
24
+ [tool.hatch.build.targets.wheel]
25
+ packages = ["src/qx"]
@@ -0,0 +1,66 @@
1
+ """Qx database layer.
2
+
3
+ Public surface — import from ``qx.db``.
4
+ """
5
+
6
+ from __future__ import annotations
7
+
8
+ from qx.db.engine import (
9
+ DatabaseSettings,
10
+ SessionFactory,
11
+ create_engine,
12
+ make_session_factory,
13
+ open_session,
14
+ )
15
+ from qx.db.mapping import (
16
+ jsonb_column,
17
+ make_metadata,
18
+ make_registry,
19
+ standard_audit_columns,
20
+ uuid_column,
21
+ )
22
+ from qx.db.outbox import (
23
+ OUTBOX_TABLE_NAME,
24
+ DefaultOutboxRecorder,
25
+ OutboxRecorder,
26
+ include_outbox_table,
27
+ )
28
+ from qx.db.pagination import (
29
+ build_cursor_page,
30
+ decode_cursor,
31
+ encode_cursor,
32
+ )
33
+ from qx.db.repository import Repository
34
+ from qx.db.uow import EventDispatcher, UnitOfWork
35
+
36
+ __version__ = "0.1.0"
37
+
38
+ __all__ = [
39
+ "OUTBOX_TABLE_NAME",
40
+ # Engine / sessions
41
+ "DatabaseSettings",
42
+ "DefaultOutboxRecorder",
43
+ "EventDispatcher",
44
+ # Outbox
45
+ "OutboxRecorder",
46
+ # Repository
47
+ "Repository",
48
+ "SessionFactory",
49
+ # UoW
50
+ "UnitOfWork",
51
+ "__version__",
52
+ "build_cursor_page",
53
+ "create_engine",
54
+ "decode_cursor",
55
+ # Pagination
56
+ "encode_cursor",
57
+ "include_outbox_table",
58
+ "jsonb_column",
59
+ # Mapping
60
+ "make_metadata",
61
+ "make_registry",
62
+ "make_session_factory",
63
+ "open_session",
64
+ "standard_audit_columns",
65
+ "uuid_column",
66
+ ]
@@ -0,0 +1,122 @@
1
+ """Async engine and session factory.
2
+
3
+ Two-tier setup:
4
+
5
+ - One ``AsyncEngine`` per database, registered as a singleton.
6
+ - ``AsyncSession`` is **scoped** to a request: one session per HTTP request /
7
+ message / job, opened by the unit-of-work and closed on scope exit.
8
+
9
+ We expose a ``DatabaseSettings`` Pydantic settings class that services pull
10
+ their DB config from. Settings live in ``qx-core`` style (``QX_DB__*``
11
+ env vars).
12
+
13
+ The engine is configured with pool_pre_ping (so DNS / TCP-stale connections
14
+ get caught) and a sane default pool size. NullPool is supported for serverless
15
+ runtimes (Lambda, Fly Machines) where a connection pool fights the platform.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from contextlib import asynccontextmanager
21
+ from typing import TYPE_CHECKING, ClassVar, Literal
22
+
23
+ from pydantic import Field, SecretStr
24
+ from pydantic_settings import BaseSettings, SettingsConfigDict
25
+ from sqlalchemy.ext.asyncio import (
26
+ AsyncEngine,
27
+ AsyncSession,
28
+ async_sessionmaker,
29
+ create_async_engine,
30
+ )
31
+ from sqlalchemy.pool import AsyncAdaptedQueuePool, NullPool
32
+
33
+ if TYPE_CHECKING:
34
+ from collections.abc import AsyncIterator
35
+
36
+ __all__ = [
37
+ "DatabaseSettings",
38
+ "SessionFactory",
39
+ "create_engine",
40
+ "make_session_factory",
41
+ ]
42
+
43
+
44
+ PoolKind = Literal["queue", "null"]
45
+
46
+
47
+ class DatabaseSettings(BaseSettings):
48
+ """Database configuration.
49
+
50
+ Env vars: ``QX_DB__URL``, ``QX_DB__POOL_SIZE``, etc.
51
+ """
52
+
53
+ model_config: ClassVar[SettingsConfigDict] = SettingsConfigDict(
54
+ env_prefix="QX_DB__",
55
+ extra="ignore",
56
+ )
57
+
58
+ url: SecretStr = Field(
59
+ default=SecretStr("postgresql+asyncpg://postgres:postgres@localhost:5432/qx")
60
+ )
61
+ pool_size: int = 10
62
+ max_overflow: int = 10
63
+ pool_timeout_seconds: float = 30.0
64
+ pool_recycle_seconds: int = 1800 # recycle every 30 min, dodging upstream idle-cuts
65
+ pool_pre_ping: bool = True
66
+ pool_kind: PoolKind = "queue"
67
+ echo: bool = False
68
+ # Statement timeout applied via `SET LOCAL statement_timeout`; per-statement,
69
+ # not connection-wide, so transactions can opt out via their own SET.
70
+ statement_timeout_ms: int = 10_000
71
+
72
+
73
+ def create_engine(settings: DatabaseSettings) -> AsyncEngine:
74
+ """Build an async engine.
75
+
76
+ The engine is a long-lived, thread-safe object. Treat it as a singleton.
77
+ Disposing it (``await engine.dispose()``) closes all pooled connections —
78
+ do that at process shutdown.
79
+ """
80
+ if settings.pool_kind == "null":
81
+ engine = create_async_engine(
82
+ settings.url.get_secret_value(),
83
+ poolclass=NullPool,
84
+ echo=settings.echo,
85
+ future=True,
86
+ )
87
+ else:
88
+ engine = create_async_engine(
89
+ settings.url.get_secret_value(),
90
+ poolclass=AsyncAdaptedQueuePool,
91
+ pool_size=settings.pool_size,
92
+ max_overflow=settings.max_overflow,
93
+ pool_timeout=settings.pool_timeout_seconds,
94
+ pool_recycle=settings.pool_recycle_seconds,
95
+ pool_pre_ping=settings.pool_pre_ping,
96
+ echo=settings.echo,
97
+ future=True,
98
+ )
99
+ return engine
100
+
101
+
102
+ SessionFactory = async_sessionmaker[AsyncSession]
103
+
104
+
105
+ def make_session_factory(engine: AsyncEngine) -> SessionFactory:
106
+ """Build a session factory. Singleton per engine."""
107
+ return async_sessionmaker(
108
+ bind=engine,
109
+ expire_on_commit=False, # let aggregates be read after commit without re-fetch
110
+ autoflush=False, # we control flushes explicitly via the UoW
111
+ class_=AsyncSession,
112
+ )
113
+
114
+
115
+ @asynccontextmanager
116
+ async def open_session(factory: SessionFactory) -> AsyncIterator[AsyncSession]:
117
+ """Open a session in an async context manager. Closes regardless of outcome."""
118
+ session = factory()
119
+ try:
120
+ yield session
121
+ finally:
122
+ await session.close()
@@ -0,0 +1,116 @@
1
+ """SQLAlchemy imperative mapping.
2
+
3
+ We deliberately do **not** use declarative-style ORM (no ``DeclarativeBase``,
4
+ no ``Mapped`` on entities). Reasons:
5
+
6
+ - Keeps domain entities decoupled from SQLAlchemy. They remain plain
7
+ ``@dataclass``-style objects that the framework (and tests) can construct
8
+ trivially without an active session.
9
+ - Lets us define multiple persistence representations of the same aggregate
10
+ (e.g., a write model and a denormalized read model) without inheritance
11
+ gymnastics.
12
+ - Mirrors how clean-architecture-discipline services in other ecosystems
13
+ (Java/Spring's JPA EntityManager, .NET's EF Core Fluent API) handle the
14
+ domain/infrastructure boundary.
15
+
16
+ The trade-off: a small amount of boilerplate per aggregate in ``mapping.py``
17
+ files declaring ```` + ``registry.map_imperatively(Entity, table)``. Worth it.
18
+
19
+ This module supplies the framework-level ``mapping_registry()`` and a few
20
+ common column types so services stay consistent.
21
+ """
22
+
23
+ from __future__ import annotations
24
+
25
+ from typing import TYPE_CHECKING, Any
26
+
27
+ from sqlalchemy import (
28
+ Column,
29
+ DateTime,
30
+ Integer,
31
+ MetaData,
32
+ )
33
+ from sqlalchemy.dialects.postgresql import JSONB
34
+ from sqlalchemy.dialects.postgresql import UUID as PG_UUID
35
+ from sqlalchemy.orm import registry as sa_registry
36
+
37
+ if TYPE_CHECKING:
38
+ from uuid import UUID
39
+
40
+ __all__ = [
41
+ "jsonb_column",
42
+ "make_metadata",
43
+ "make_registry",
44
+ "standard_audit_columns",
45
+ "uuid_column",
46
+ ]
47
+
48
+
49
+ def make_metadata(schema: str | None = None) -> MetaData:
50
+ """Build a metadata object with consistent naming conventions.
51
+
52
+ Predictable index/constraint names matter for Alembic — without these,
53
+ autogenerate produces unstable names that flip between revisions.
54
+ """
55
+ return MetaData(
56
+ schema=schema,
57
+ naming_convention={
58
+ "ix": "ix_%(column_0_label)s",
59
+ "uq": "uq_%(table_name)s_%(column_0_name)s",
60
+ "ck": "ck_%(table_name)s_%(constraint_name)s",
61
+ "fk": "fk_%(table_name)s_%(column_0_name)s_%(referred_table_name)s",
62
+ "pk": "pk_%(table_name)s",
63
+ },
64
+ )
65
+
66
+
67
+ def make_registry(metadata: MetaData | None = None) -> sa_registry:
68
+ """Create an SQLAlchemy registry for imperative mapping."""
69
+ md = metadata or make_metadata()
70
+ return sa_registry(metadata=md)
71
+
72
+
73
+ def standard_audit_columns(
74
+ *,
75
+ include_tenant: bool = True,
76
+ include_deletion: bool = True,
77
+ ) -> list[Column[Any]]:
78
+ """Common audit columns mirroring the ``Entity`` base fields.
79
+
80
+ Use in every aggregate table::
81
+
82
+ users =(
83
+ "users",
84
+ metadata,
85
+ Column("id", PG_UUID(as_uuid=True), primary_key=True),
86
+ Column("email"(255), nullable=False, unique=True),
87
+ *standard_audit_columns(),
88
+ )
89
+ """
90
+ cols: list[Column[Any]] = [
91
+ Column("created_at", DateTime(timezone=True), nullable=False),
92
+ Column("updated_at", DateTime(timezone=True), nullable=True),
93
+ Column("created_by", PG_UUID(as_uuid=True), nullable=True),
94
+ Column("updated_by", PG_UUID(as_uuid=True), nullable=True),
95
+ Column("version", Integer, nullable=False, default=1),
96
+ ]
97
+ if include_tenant:
98
+ cols.append(Column("tenant_id", PG_UUID(as_uuid=True), nullable=True, index=True))
99
+ if include_deletion:
100
+ cols.extend(
101
+ [
102
+ Column("deleted_at", DateTime(timezone=True), nullable=True, index=True),
103
+ Column("deleted_by", PG_UUID(as_uuid=True), nullable=True),
104
+ ]
105
+ )
106
+ return cols
107
+
108
+
109
+ def uuid_column(name: str, *, primary_key: bool = False, **kwargs: Any) -> Column[UUID]:
110
+ """Conventional UUID column. Uses Postgres native UUID type."""
111
+ return Column(name, PG_UUID(as_uuid=True), primary_key=primary_key, **kwargs)
112
+
113
+
114
+ def jsonb_column(name: str, **kwargs: Any) -> Column[dict[str, Any]]:
115
+ """Conventional JSONB column."""
116
+ return Column(name, JSONB, **kwargs)
@@ -0,0 +1,104 @@
1
+ """Alembic integration helpers.
2
+
3
+ Services own their migration directory; the framework only supplies the
4
+ boilerplate to make ``alembic`` aware of the async engine and the framework's
5
+ naming conventions.
6
+
7
+ Typical service ``alembic/env.py`` calls into here::
8
+
9
+ from qx.db.migrations import run_async_migrations
10
+ from myservice.persistence import metadata
11
+
12
+ run_async_migrations(metadata)
13
+
14
+ We don't ship an opinionated ``alembic.ini`` because version locations and
15
+ script templates are service-specific. The CLI scaffold (``qx`` CLI)
16
+ generates them.
17
+ """
18
+
19
+ from __future__ import annotations
20
+
21
+ import asyncio
22
+ from typing import Any
23
+
24
+ from alembic import context
25
+ from sqlalchemy import MetaData
26
+ from sqlalchemy.engine import Connection
27
+ from sqlalchemy.ext.asyncio import async_engine_from_config
28
+
29
+ __all__ = ["run_async_migrations"]
30
+
31
+
32
+ def run_async_migrations(
33
+ metadata: MetaData,
34
+ *,
35
+ compare_type: bool = True,
36
+ compare_server_default: bool = True,
37
+ include_schemas: bool = False,
38
+ ) -> None:
39
+ """Run Alembic migrations against an async engine.
40
+
41
+ Call from ``alembic/env.py``. Picks up the connection URL from the
42
+ ``alembic.ini`` ``sqlalchemy.url`` (which services typically set from
43
+ ``QX_DB__URL``).
44
+
45
+ ``compare_type`` and ``compare_server_default`` are on by default so
46
+ autogenerate catches the largest class of schema drift.
47
+ """
48
+ cfg = context.config
49
+ if context.is_offline_mode():
50
+ context.configure(
51
+ url=cfg.get_main_option("sqlalchemy.url"),
52
+ target_metadata=metadata,
53
+ compare_type=compare_type,
54
+ compare_server_default=compare_server_default,
55
+ include_schemas=include_schemas,
56
+ literal_binds=True,
57
+ )
58
+ with context.begin_transaction():
59
+ context.run_migrations()
60
+ return
61
+
62
+ asyncio.run(_run_online(cfg, metadata, compare_type, compare_server_default, include_schemas))
63
+
64
+
65
+ async def _run_online(
66
+ cfg: Any,
67
+ metadata: MetaData,
68
+ compare_type: bool,
69
+ compare_server_default: bool,
70
+ include_schemas: bool,
71
+ ) -> None:
72
+ section = cfg.get_section(cfg.config_ini_section) or {}
73
+ engine = async_engine_from_config(
74
+ section,
75
+ prefix="sqlalchemy.",
76
+ future=True,
77
+ )
78
+ async with engine.connect() as conn:
79
+ await conn.run_sync(
80
+ _do_migrations,
81
+ metadata,
82
+ compare_type,
83
+ compare_server_default,
84
+ include_schemas,
85
+ )
86
+ await engine.dispose()
87
+
88
+
89
+ def _do_migrations(
90
+ connection: Connection,
91
+ metadata: MetaData,
92
+ compare_type: bool,
93
+ compare_server_default: bool,
94
+ include_schemas: bool,
95
+ ) -> None:
96
+ context.configure(
97
+ connection=connection,
98
+ target_metadata=metadata,
99
+ compare_type=compare_type,
100
+ compare_server_default=compare_server_default,
101
+ include_schemas=include_schemas,
102
+ )
103
+ with context.begin_transaction():
104
+ context.run_migrations()
@@ -0,0 +1,147 @@
1
+ """Transactional outbox.
2
+
3
+ The outbox is the textbook pattern for atomic state-change + event-publishing:
4
+
5
+ 1. The application writes the aggregate change and inserts a row into
6
+ ``outbox_events`` in the **same SQL transaction**. Either both succeed or
7
+ both roll back.
8
+ 2. A separate ``OutboxRelay`` worker polls the table, publishes pending rows
9
+ to the message broker (NATS JetStream), and marks them as published.
10
+
11
+ This module supplies:
12
+
13
+ - The ``outbox_events`` Table definition (services include it in their
14
+ metadata via ``include_outbox_table(metadata)``).
15
+ - The ``OutboxRecorder`` which serializes ``IntegrationEvent`` instances and
16
+ inserts them in the UoW's transaction.
17
+ - The relay worker itself lives in ``qx-events`` — it depends on a
18
+ broker, and we keep the database package broker-agnostic.
19
+
20
+ Schema choice: store the full event envelope as JSONB plus a few denormalized
21
+ columns we'll want to query/index (event_name, occurred_at, tenant_id). This
22
+ lets you do `SELECT * FROM outbox_events WHERE event_name = 'user.registered'
23
+ AND published_at IS NULL` without parsing JSON.
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ import json
29
+ from typing import TYPE_CHECKING, Protocol, runtime_checkable
30
+ from uuid import uuid4
31
+
32
+ from sqlalchemy import (
33
+ Column,
34
+ DateTime,
35
+ Index,
36
+ Integer,
37
+ MetaData,
38
+ String,
39
+ Table,
40
+ text,
41
+ )
42
+ from sqlalchemy.dialects.postgresql import JSONB
43
+ from sqlalchemy.dialects.postgresql import UUID as PG_UUID
44
+
45
+ if TYPE_CHECKING:
46
+ from collections.abc import Sequence
47
+
48
+ from qx.core import IntegrationEvent
49
+ from sqlalchemy.ext.asyncio import AsyncSession
50
+
51
+ __all__ = [
52
+ "OUTBOX_TABLE_NAME",
53
+ "DefaultOutboxRecorder",
54
+ "OutboxRecorder",
55
+ "include_outbox_table",
56
+ ]
57
+
58
+
59
+ OUTBOX_TABLE_NAME = "qx_outbox_events"
60
+
61
+
62
+ def include_outbox_table(metadata: MetaData) -> Table:
63
+ """Register the outbox table in the given metadata.
64
+
65
+ Services call this once during their mapping setup so Alembic picks up the
66
+ table in autogenerated revisions.
67
+ """
68
+ return Table(
69
+ OUTBOX_TABLE_NAME,
70
+ metadata,
71
+ Column("id", PG_UUID(as_uuid=True), primary_key=True),
72
+ Column("event_name", String(255), nullable=False),
73
+ Column("event_version", Integer, nullable=False, default=1),
74
+ Column("payload", JSONB, nullable=False),
75
+ Column("occurred_at", DateTime(timezone=True), nullable=False),
76
+ Column("correlation_id", PG_UUID(as_uuid=True), nullable=True),
77
+ Column("causation_id", PG_UUID(as_uuid=True), nullable=True),
78
+ Column("tenant_id", PG_UUID(as_uuid=True), nullable=True),
79
+ Column("published_at", DateTime(timezone=True), nullable=True),
80
+ Column("attempts", Integer, nullable=False, default=0),
81
+ Column("last_error", String(2048), nullable=True),
82
+ Index(f"ix_{OUTBOX_TABLE_NAME}_pending", "published_at", "occurred_at"),
83
+ Index(f"ix_{OUTBOX_TABLE_NAME}_event_name", "event_name"),
84
+ )
85
+
86
+
87
+ @runtime_checkable
88
+ class OutboxRecorder(Protocol):
89
+ """Protocol the UoW uses to write integration events to the outbox.
90
+
91
+ Kept abstract so test cases and alternative storage (e.g., outbox-as-Kafka-
92
+ log for very high throughput) can swap in.
93
+ """
94
+
95
+ async def record(
96
+ self,
97
+ session: AsyncSession,
98
+ events: Sequence[IntegrationEvent],
99
+ ) -> None: ...
100
+
101
+
102
+ class DefaultOutboxRecorder:
103
+ """Inserts rows into the PostgreSQL outbox table inside the current transaction."""
104
+
105
+ def __init__(self, table_name: str = OUTBOX_TABLE_NAME) -> None:
106
+ self._table = table_name
107
+
108
+ async def record(
109
+ self,
110
+ session: AsyncSession,
111
+ events: Sequence[IntegrationEvent],
112
+ ) -> None:
113
+ if not events:
114
+ return
115
+ rows = [
116
+ {
117
+ "id": str(uuid4()),
118
+ "event_name": ev.event_name,
119
+ "event_version": ev.event_version,
120
+ "payload": json.dumps(ev.envelope()),
121
+ "occurred_at": ev.occurred_at,
122
+ "correlation_id": str(ev.correlation_id) if ev.correlation_id else None,
123
+ "causation_id": str(ev.causation_id) if ev.causation_id else None,
124
+ "tenant_id": str(ev.tenant_id) if ev.tenant_id else None,
125
+ "published_at": None,
126
+ "attempts": 0,
127
+ "last_error": None,
128
+ }
129
+ for ev in events
130
+ ]
131
+ # We use an inline INSERT to keep this module free of a Table dep on
132
+ # the live MetaData (the service owns the metadata; we just write).
133
+ await session.execute(
134
+ text(
135
+ f"""
136
+ INSERT INTO {self._table}
137
+ (id, event_name, event_version, payload, occurred_at,
138
+ correlation_id, causation_id, tenant_id, published_at,
139
+ attempts, last_error)
140
+ VALUES
141
+ (:id, :event_name, :event_version, CAST(:payload AS JSONB),
142
+ :occurred_at, :correlation_id, :causation_id, :tenant_id,
143
+ :published_at, :attempts, :last_error)
144
+ """
145
+ ),
146
+ rows,
147
+ )
@@ -0,0 +1,66 @@
1
+ """Cursor pagination helpers.
2
+
3
+ We use **keyset pagination**: the cursor encodes the sort key + tiebreaker
4
+ (id) of the last row in the previous page. The next query becomes ``WHERE
5
+ (sort_key, id) > (last_sort_key, last_id) ORDER BY sort_key, id LIMIT N``.
6
+
7
+ Cursors are base64'd JSON internally. Clients treat them as opaque tokens —
8
+ they should never parse them. This lets us evolve the cursor format (add
9
+ fields, change sort key) without breaking clients.
10
+ """
11
+
12
+ from __future__ import annotations
13
+
14
+ import base64
15
+ import json
16
+ from datetime import datetime
17
+ from typing import TYPE_CHECKING, Any
18
+
19
+ from qx.core import CursorPage
20
+
21
+ if TYPE_CHECKING:
22
+ from collections.abc import Sequence
23
+
24
+ from qx.core.types.pagination import Sort
25
+
26
+ __all__ = ["build_cursor_page", "decode_cursor", "encode_cursor"]
27
+
28
+
29
+ def encode_cursor(values: dict[str, Any]) -> str:
30
+ raw = json.dumps(values, default=_serialize, separators=(",", ":")).encode()
31
+ return base64.urlsafe_b64encode(raw).decode().rstrip("=")
32
+
33
+
34
+ def decode_cursor(cursor: str) -> dict[str, Any]:
35
+ pad = "=" * (-len(cursor) % 4)
36
+ raw = base64.urlsafe_b64decode(cursor + pad).decode()
37
+ return json.loads(raw) # type: ignore[no-any-return]
38
+
39
+
40
+ def _serialize(o: Any) -> Any:
41
+ if isinstance(o, datetime):
42
+ return o.isoformat()
43
+ return str(o)
44
+
45
+
46
+ def build_cursor_page(
47
+ items: Sequence[Any],
48
+ *,
49
+ limit: int,
50
+ sort: Sequence[Sort],
51
+ cursor_fields: Sequence[str],
52
+ ) -> CursorPage[Any]:
53
+ """Build a ``CursorPage`` from query results.
54
+
55
+ The caller fetches ``limit + 1`` rows; if ``len(items) > limit`` we trim
56
+ and emit a cursor pointing at the last yielded row. This is the textbook
57
+ "fetch one extra to know if there's a next page" trick.
58
+ """
59
+ has_next = len(items) > limit
60
+ yielded = list(items[:limit])
61
+ next_cursor: str | None = None
62
+ if has_next and yielded:
63
+ last = yielded[-1]
64
+ cursor_payload = {f: getattr(last, f, None) for f in cursor_fields}
65
+ next_cursor = encode_cursor(cursor_payload)
66
+ return CursorPage(items=yielded, next_cursor=next_cursor, has_next=has_next)
File without changes
@@ -0,0 +1,281 @@
1
+ """Generic Repository base class.
2
+
3
+ Repositories are the boundary between domain code and persistence. They:
4
+
5
+ - Accept and return domain entities/aggregates (never raw rows).
6
+ - Translate filter/sort/pagination shapes into SQL.
7
+ - Honor tenant isolation (when the entity is tenanted).
8
+ - Honor soft-delete by default (callers can opt in to including deleted).
9
+ - Pull domain events out of aggregates on save and hand them to the unit of work
10
+ (which routes them to in-process handlers and/or the outbox).
11
+
12
+ This base implements the boring 80%. Aggregate-specific repositories subclass
13
+ and add domain-flavored finders (``find_by_email``, ``find_active_admins``,
14
+ etc.). Avoid generic ``find(where=...)`` APIs leaking up — they couple HTTP
15
+ controllers to SQL syntax and prevent the repo from validating queries.
16
+ """
17
+
18
+ from __future__ import annotations
19
+
20
+ from typing import TYPE_CHECKING, Any, ClassVar, TypeVar
21
+
22
+ from qx.core import (
23
+ ConflictError,
24
+ Entity,
25
+ Filter,
26
+ NotFoundError,
27
+ OffsetPage,
28
+ OffsetPagination,
29
+ Result,
30
+ current_context,
31
+ utcnow,
32
+ )
33
+ from sqlalchemy import Column, Table, delete, func, select, update
34
+
35
+ if TYPE_CHECKING:
36
+ from collections.abc import Sequence
37
+ from uuid import UUID
38
+
39
+ from qx.core.types.pagination import FilterOp, Sort
40
+ from sqlalchemy.ext.asyncio import AsyncSession
41
+ from sqlalchemy.sql import ColumnElement, Select
42
+
43
+ __all__ = ["Repository"]
44
+
45
+
46
+ TEntity = TypeVar("TEntity", bound=Entity[Any])
47
+
48
+
49
+ class Repository[TEntity: Entity[Any]]:
50
+ """Base repository with typed CRUD against a single aggregate table.
51
+
52
+ Subclass and supply the entity type and table at class-definition time::
53
+
54
+ class UserRepository(Repository[User]):
55
+ entity_cls = User
56
+ table = users_table
57
+ # Allow-list of filterable fields. Reject anything else.
58
+ filterable_fields = {"email", "name", "is_admin"}
59
+ sortable_fields = {"created_at", "email"}
60
+ """
61
+
62
+ entity_cls: type[TEntity]
63
+ table: Table
64
+ filterable_fields: ClassVar[set[str]] = set()
65
+ sortable_fields: ClassVar[set[str]] = {"created_at"}
66
+
67
+ # Subclasses can override to disable tenant filtering for global-scoped tables.
68
+ tenanted: bool = True
69
+
70
+ def __init__(self, session: AsyncSession) -> None:
71
+ self._session = session
72
+
73
+ # ============================================================
74
+ # Reads
75
+ # ============================================================
76
+
77
+ async def get(
78
+ self,
79
+ id: UUID,
80
+ *,
81
+ include_deleted: bool = False,
82
+ ) -> Result[TEntity]:
83
+ stmt = select(self.table).where(self.table.c.id == id)
84
+ stmt = self._scope_query(stmt, include_deleted=include_deleted)
85
+ row = (await self._session.execute(stmt)).mappings().first()
86
+ return Result.from_optional(
87
+ self._row_to_entity(row) if row else None,
88
+ on_none=NotFoundError(
89
+ code=f"{self.entity_cls.__name__.lower()}.not_found",
90
+ message=f"{self.entity_cls.__name__} {id} not found",
91
+ details={"id": str(id)},
92
+ ),
93
+ )
94
+
95
+ async def exists(self, id: UUID) -> bool:
96
+ stmt = select(func.count()).select_from(self.table).where(self.table.c.id == id)
97
+ stmt = self._scope_query(stmt, include_deleted=False)
98
+ return ((await self._session.execute(stmt)).scalar() or 0) > 0
99
+
100
+ async def list(
101
+ self,
102
+ pagination: OffsetPagination = OffsetPagination(), # noqa: B008
103
+ *,
104
+ include_deleted: bool = False,
105
+ ) -> OffsetPage[TEntity]:
106
+ stmt = select(self.table)
107
+ stmt = self._scope_query(stmt, include_deleted=include_deleted)
108
+ stmt = self._apply_filters(stmt, pagination.filters)
109
+ stmt = self._apply_sort(stmt, pagination.sort)
110
+
111
+ count_stmt = select(func.count()).select_from(stmt.subquery())
112
+ total = (await self._session.execute(count_stmt)).scalar() or 0
113
+
114
+ stmt = stmt.limit(pagination.page_size).offset(pagination.offset)
115
+ rows = (await self._session.execute(stmt)).mappings().all()
116
+ items = [self._row_to_entity(r) for r in rows]
117
+ return OffsetPage(
118
+ items=items,
119
+ page=pagination.page,
120
+ page_size=pagination.page_size,
121
+ total=int(total),
122
+ )
123
+
124
+ # ============================================================
125
+ # Writes
126
+ # ============================================================
127
+
128
+ async def add(self, entity: TEntity) -> Result[TEntity]:
129
+ """Insert a new aggregate. Sets created_at/created_by from context."""
130
+ ctx = current_context()
131
+ values = self._entity_to_dict(entity)
132
+ values.setdefault("created_at", utcnow())
133
+ values.setdefault("created_by", ctx.user_id)
134
+ if self.tenanted and "tenant_id" not in values:
135
+ values["tenant_id"] = ctx.tenant_id
136
+ values.setdefault("version", 1)
137
+ await self._session.execute(self.table.insert().values(**values))
138
+ return Result.success(entity)
139
+
140
+ async def save(self, entity: TEntity) -> Result[TEntity]:
141
+ """Update an existing aggregate with optimistic concurrency on ``version``."""
142
+ ctx = current_context()
143
+ current_version = entity.version
144
+ values = self._entity_to_dict(entity)
145
+ values["version"] = current_version + 1
146
+ values["updated_at"] = utcnow()
147
+ values["updated_by"] = ctx.user_id
148
+ stmt = (
149
+ update(self.table)
150
+ .where(self.table.c.id == values["id"], self.table.c.version == current_version)
151
+ .values(**values)
152
+ )
153
+ result = await self._session.execute(stmt)
154
+ if result.rowcount == 0: # type: ignore[attr-defined]
155
+ return Result.failure(
156
+ ConflictError(
157
+ code=f"{self.entity_cls.__name__.lower()}.version_conflict",
158
+ message=(
159
+ f"{self.entity_cls.__name__} {entity.id} was modified concurrently "
160
+ f"(expected version {current_version})."
161
+ ),
162
+ details={"id": str(entity.id), "version": current_version},
163
+ )
164
+ )
165
+ entity.version = current_version + 1
166
+ return Result.success(entity)
167
+
168
+ async def soft_delete(self, id: UUID) -> Result[None]:
169
+ ctx = current_context()
170
+ stmt = (
171
+ update(self.table)
172
+ .where(
173
+ self.table.c.id == id,
174
+ self.table.c.deleted_at.is_(None),
175
+ )
176
+ .values(deleted_at=utcnow(), deleted_by=ctx.user_id)
177
+ )
178
+ result = await self._session.execute(stmt)
179
+ if result.rowcount == 0: # type: ignore[attr-defined]
180
+ return Result.failure(
181
+ NotFoundError(
182
+ code=f"{self.entity_cls.__name__.lower()}.not_found",
183
+ message=f"{self.entity_cls.__name__} {id} not found or already deleted",
184
+ )
185
+ )
186
+ return Result.success(None)
187
+
188
+ async def hard_delete(self, id: UUID) -> Result[None]:
189
+ """Physical delete. Avoid except for GDPR / right-to-be-forgotten paths."""
190
+ stmt = delete(self.table).where(self.table.c.id == id)
191
+ result = await self._session.execute(stmt)
192
+ if result.rowcount == 0: # type: ignore[attr-defined]
193
+ return Result.failure(
194
+ NotFoundError(
195
+ code=f"{self.entity_cls.__name__.lower()}.not_found",
196
+ message=f"{self.entity_cls.__name__} {id} not found",
197
+ )
198
+ )
199
+ return Result.success(None)
200
+
201
+ # ============================================================
202
+ # Internals — subclasses can override
203
+ # ============================================================
204
+
205
+ def _row_to_entity(self, row: Any) -> TEntity:
206
+ """Default: pass-through to the entity constructor.
207
+
208
+ Subclasses override when the table schema doesn't match the entity
209
+ shape (renamed columns, JSON-packed value objects, etc.).
210
+ """
211
+ return self.entity_cls(**dict(row))
212
+
213
+ def _entity_to_dict(self, entity: TEntity) -> dict[str, Any]:
214
+ """Default: shallow vars(). Override for non-trivial mappings."""
215
+ result = {k: v for k, v in vars(entity).items() if not k.startswith("_")}
216
+ return result
217
+
218
+ def _scope_query(self, stmt: Select[Any], *, include_deleted: bool) -> Select[Any]:
219
+ if not include_deleted and "deleted_at" in self.table.c:
220
+ stmt = stmt.where(self.table.c.deleted_at.is_(None))
221
+ if self.tenanted and "tenant_id" in self.table.c:
222
+ tid = current_context().tenant_id
223
+ # No tenant in context means service-internal call; don't filter.
224
+ # Production deployments should enforce a tenant via middleware.
225
+ if tid is not None:
226
+ stmt = stmt.where(self.table.c.tenant_id == tid)
227
+ return stmt
228
+
229
+ def _apply_filters(self, stmt: Select[Any], filters: Sequence[Filter]) -> Select[Any]:
230
+ for f in filters:
231
+ if f.field not in self.filterable_fields:
232
+ # Silently dropping a filter is a security hazard. Refuse.
233
+ raise ValueError(
234
+ f"Field {f.field!r} is not filterable on "
235
+ f"{self.entity_cls.__name__} (allow-list: {sorted(self.filterable_fields)})"
236
+ )
237
+ col: Column[Any] = self.table.c[f.field]
238
+ stmt = stmt.where(_apply_op(col, f.op, f.value))
239
+ return stmt
240
+
241
+ def _apply_sort(self, stmt: Select[Any], sorts: Sequence[Sort]) -> Select[Any]:
242
+ sortable = self.sortable_fields | {"created_at"}
243
+ order_cols: list[Any] = []
244
+ for s in sorts:
245
+ if s.field not in sortable:
246
+ raise ValueError(f"Field {s.field!r} is not sortable on {self.entity_cls.__name__}")
247
+ col = self.table.c[s.field]
248
+ order_cols.append(col.desc() if s.direction == "desc" else col.asc())
249
+ # Always include id as a stable tiebreaker for deterministic pagination.
250
+ order_cols.append(self.table.c.id.asc())
251
+ return stmt.order_by(*order_cols)
252
+
253
+
254
+ def _apply_op(col: ColumnElement[Any], op: FilterOp, value: Any) -> ColumnElement[bool]: # noqa: PLR0911, PLR0912
255
+ if op == "eq":
256
+ return col == value # type: ignore[no-any-return]
257
+ if op == "neq":
258
+ return col != value # type: ignore[no-any-return]
259
+ if op == "in":
260
+ return col.in_(value)
261
+ if op == "not_in":
262
+ return ~col.in_(value)
263
+ if op == "gt":
264
+ return col > value # type: ignore[no-any-return]
265
+ if op == "gte":
266
+ return col >= value # type: ignore[no-any-return]
267
+ if op == "lt":
268
+ return col < value # type: ignore[no-any-return]
269
+ if op == "lte":
270
+ return col <= value # type: ignore[no-any-return]
271
+ if op == "contains":
272
+ return col.ilike(f"%{value}%")
273
+ if op == "starts_with":
274
+ return col.ilike(f"{value}%")
275
+ if op == "ends_with":
276
+ return col.ilike(f"%{value}")
277
+ if op == "is_null":
278
+ return col.is_(None)
279
+ if op == "is_not_null":
280
+ return col.is_not(None)
281
+ raise ValueError(f"Unsupported filter op: {op}")
@@ -0,0 +1,154 @@
1
+ """Unit of Work.
2
+
3
+ Drives the transactional boundary around a command. Responsibilities:
4
+
5
+ 1. Open an ``AsyncSession`` from the factory.
6
+ 2. Begin a transaction.
7
+ 3. Track aggregates loaded/added so their pending domain events can be drained.
8
+ 4. On commit: drain events, dispatch in-process handlers inside the transaction,
9
+ write integration events to the outbox table (atomic with state change).
10
+ 5. Commit. On any failure, roll back; pending events are discarded.
11
+
12
+ Usage::
13
+
14
+ async with uow_factory() as uow:
15
+ repo = UserRepository(uow.session)
16
+ user = User.create(...)
17
+ await repo.add(user)
18
+ await uow.commit()
19
+ # events have now been dispatched/outboxed; transaction committed.
20
+
21
+ The dispatcher abstraction (``EventDispatcher``) is a protocol so the
22
+ unit-of-work doesn't pull a hard dependency on the mediator. The wiring layer
23
+ (usually ``qx.bootstrap``) provides the concrete implementation.
24
+ """
25
+
26
+ from __future__ import annotations
27
+
28
+ from typing import TYPE_CHECKING, Any, Protocol, runtime_checkable
29
+
30
+ from qx.core import AggregateRoot, DomainEvent, IntegrationEvent, current_context
31
+
32
+ if TYPE_CHECKING:
33
+ from qx.db.engine import SessionFactory
34
+ from qx.db.outbox import OutboxRecorder
35
+ from sqlalchemy.ext.asyncio import AsyncSession
36
+
37
+ __all__ = ["EventDispatcher", "UnitOfWork"]
38
+
39
+
40
+ @runtime_checkable
41
+ class EventDispatcher(Protocol):
42
+ """Protocol the UoW uses to dispatch in-process domain events.
43
+
44
+ Concretely implemented over the mediator's ``publish`` in service bootstrap.
45
+ Kept as a protocol here to keep ``qx-db`` independent of
46
+ ``qx-cqrs`` at the type level (it's still in deps for testing
47
+ convenience).
48
+ """
49
+
50
+ async def publish(self, event: DomainEvent) -> None: ...
51
+
52
+
53
+ class UnitOfWork:
54
+ """Transactional unit-of-work scoped to one logical operation."""
55
+
56
+ def __init__(
57
+ self,
58
+ session_factory: SessionFactory,
59
+ dispatcher: EventDispatcher | None,
60
+ outbox: OutboxRecorder | None = None,
61
+ ) -> None:
62
+ self._factory = session_factory
63
+ self._dispatcher = dispatcher
64
+ self._outbox = outbox
65
+ self._session: AsyncSession | None = None
66
+ self._tracked: list[AggregateRoot[Any]] = []
67
+ self._committed = False
68
+ self._rolled_back = False
69
+
70
+ @property
71
+ def session(self) -> AsyncSession:
72
+ if self._session is None:
73
+ raise RuntimeError("UnitOfWork session accessed outside of `async with` block")
74
+ return self._session
75
+
76
+ def track(self, aggregate: AggregateRoot[Any]) -> None:
77
+ """Register an aggregate so its domain events get drained on commit.
78
+
79
+ Repositories that follow ``qx-db`` conventions call this from
80
+ ``add`` and ``save`` automatically. Aggregates loaded for read can also
81
+ be tracked if their events need to be dispatched (rare).
82
+ """
83
+ if aggregate not in self._tracked:
84
+ self._tracked.append(aggregate)
85
+
86
+ async def commit(self) -> None:
87
+ """Drain events, route to in-process + outbox, then commit the SQL transaction.
88
+
89
+ All event work happens **inside** the open transaction. If the dispatcher
90
+ fails (an in-process handler raised), or the outbox insert fails, the
91
+ commit doesn't happen — the whole unit-of-work rolls back. This is the
92
+ outbox pattern's atomic guarantee.
93
+ """
94
+ if self._session is None:
95
+ raise RuntimeError("UnitOfWork.commit called outside async with block")
96
+ if self._committed or self._rolled_back:
97
+ return
98
+
99
+ # Drain events from every tracked aggregate.
100
+ domain_events: list[DomainEvent] = []
101
+ integration_events: list[IntegrationEvent] = []
102
+ for agg in self._tracked:
103
+ for ev in agg.pull_events():
104
+ # Stamp correlation/tenant from the active request context so
105
+ # downstream consumers can correlate. Aggregates don't see the
106
+ # request context directly — UoW is the right place to enrich.
107
+ ctx = current_context()
108
+ enriched = ev.model_copy(
109
+ update={
110
+ "correlation_id": ev.correlation_id or ctx.correlation_id,
111
+ "tenant_id": ev.tenant_id or ctx.tenant_id,
112
+ "actor_id": ev.actor_id or ctx.user_id,
113
+ }
114
+ )
115
+ if isinstance(enriched, IntegrationEvent):
116
+ integration_events.append(enriched)
117
+ elif isinstance(enriched, DomainEvent):
118
+ domain_events.append(enriched)
119
+
120
+ # Dispatch in-process domain events first (still inside txn).
121
+ if self._dispatcher is not None:
122
+ for ev in domain_events:
123
+ await self._dispatcher.publish(ev)
124
+
125
+ # Write integration events to outbox (still inside txn).
126
+ if self._outbox is not None and integration_events:
127
+ await self._outbox.record(self._session, integration_events)
128
+
129
+ await self._session.commit()
130
+ self._committed = True
131
+
132
+ async def rollback(self) -> None:
133
+ if self._session is None or self._rolled_back or self._committed:
134
+ return
135
+ await self._session.rollback()
136
+ self._rolled_back = True
137
+
138
+ async def __aenter__(self) -> UnitOfWork:
139
+ self._session = self._factory()
140
+ await self._session.begin()
141
+ return self
142
+
143
+ async def __aexit__(self, exc_type: Any, exc: Any, tb: Any) -> None:
144
+ try:
145
+ if exc is not None:
146
+ await self.rollback()
147
+ elif not self._committed and not self._rolled_back:
148
+ # No explicit commit — implicit rollback. Forces callers to be
149
+ # deliberate; silent commits on `async with` exit hide bugs.
150
+ await self.rollback()
151
+ finally:
152
+ if self._session is not None:
153
+ await self._session.close()
154
+ self._session = None
@@ -0,0 +1,77 @@
1
+ """Tests for qx-db pure logic (cursor encode/decode, outbox table shape).
2
+
3
+ The repository / UoW / outbox-relay integration tests live under
4
+ ``tests/integration`` and require Docker (testcontainers). Those run in CI
5
+ against a real Postgres; here we keep the unit tests fast.
6
+ """
7
+
8
+ from __future__ import annotations
9
+
10
+ from datetime import UTC, datetime
11
+ from uuid import uuid4
12
+
13
+ from qx.core.types.pagination import Sort
14
+ from qx.db.outbox import OUTBOX_TABLE_NAME, include_outbox_table
15
+ from qx.db.pagination import (
16
+ build_cursor_page,
17
+ decode_cursor,
18
+ encode_cursor,
19
+ )
20
+ from sqlalchemy import MetaData
21
+
22
+
23
+ def test_cursor_roundtrip_strings() -> None:
24
+ payload = {"id": str(uuid4()), "created_at": "2024-01-01T00:00:00+00:00"}
25
+ cursor = encode_cursor(payload)
26
+ decoded = decode_cursor(cursor)
27
+ assert decoded == payload
28
+
29
+
30
+ def test_cursor_handles_datetime() -> None:
31
+ payload = {"created_at": datetime(2024, 1, 1, tzinfo=UTC)}
32
+ cursor = encode_cursor(payload)
33
+ decoded = decode_cursor(cursor)
34
+ assert decoded["created_at"].startswith("2024-01-01")
35
+
36
+
37
+ def test_cursor_is_opaque_text() -> None:
38
+ cursor = encode_cursor({"id": "x"})
39
+ # Should be safe to put in a URL — base64url, no padding.
40
+ assert "=" not in cursor
41
+ assert "/" not in cursor
42
+
43
+
44
+ def test_build_cursor_page_returns_trimmed_items_and_cursor() -> None:
45
+ from types import SimpleNamespace # noqa: PLC0415
46
+
47
+ rows = [
48
+ SimpleNamespace(id=str(i), created_at=datetime(2024, 1, i + 1, tzinfo=UTC))
49
+ for i in range(6)
50
+ ]
51
+ page = build_cursor_page(
52
+ rows,
53
+ limit=5,
54
+ sort=(Sort(field="created_at", direction="asc"),),
55
+ cursor_fields=("created_at", "id"),
56
+ )
57
+ assert len(page.items) == 5
58
+ assert page.has_next is True
59
+ assert page.next_cursor is not None
60
+
61
+
62
+ def test_build_cursor_page_no_next_when_under_limit() -> None:
63
+ from types import SimpleNamespace # noqa: PLC0415
64
+
65
+ rows = [SimpleNamespace(id=str(i)) for i in range(3)]
66
+ page = build_cursor_page(rows, limit=5, sort=(), cursor_fields=("id",))
67
+ assert page.has_next is False
68
+ assert page.next_cursor is None
69
+
70
+
71
+ def test_outbox_table_registers_in_metadata() -> None:
72
+ md = MetaData()
73
+ table = include_outbox_table(md)
74
+ assert OUTBOX_TABLE_NAME in md.tables
75
+ assert "event_name" in table.c
76
+ assert "payload" in table.c
77
+ assert "published_at" in table.c