faststream-outbox 0.1.0__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- faststream_outbox-0.1.0/PKG-INFO +132 -0
- faststream_outbox-0.1.0/README.md +118 -0
- faststream_outbox-0.1.0/faststream_outbox/__init__.py +25 -0
- faststream_outbox-0.1.0/faststream_outbox/broker.py +237 -0
- faststream_outbox-0.1.0/faststream_outbox/client.py +230 -0
- faststream_outbox-0.1.0/faststream_outbox/configs.py +71 -0
- faststream_outbox-0.1.0/faststream_outbox/envelope.py +27 -0
- faststream_outbox-0.1.0/faststream_outbox/message.py +150 -0
- faststream_outbox-0.1.0/faststream_outbox/parser/__init__.py +0 -0
- faststream_outbox-0.1.0/faststream_outbox/parser/parser.py +21 -0
- faststream_outbox-0.1.0/faststream_outbox/py.typed +0 -0
- faststream_outbox-0.1.0/faststream_outbox/registrator.py +82 -0
- faststream_outbox-0.1.0/faststream_outbox/retry.py +101 -0
- faststream_outbox-0.1.0/faststream_outbox/router.py +90 -0
- faststream_outbox-0.1.0/faststream_outbox/schema.py +87 -0
- faststream_outbox-0.1.0/faststream_outbox/subscriber/__init__.py +0 -0
- faststream_outbox-0.1.0/faststream_outbox/subscriber/config.py +47 -0
- faststream_outbox-0.1.0/faststream_outbox/subscriber/factory.py +58 -0
- faststream_outbox-0.1.0/faststream_outbox/subscriber/usecase.py +239 -0
- faststream_outbox-0.1.0/faststream_outbox/testing.py +231 -0
- faststream_outbox-0.1.0/pyproject.toml +75 -0
|
@@ -0,0 +1,132 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: faststream-outbox
|
|
3
|
+
Version: 0.1.0
|
|
4
|
+
Summary: FastStream broker integration for the transactional outbox pattern: a Postgres table is the queue
|
|
5
|
+
Author: Artur Shiriev
|
|
6
|
+
Author-email: Artur Shiriev <me@shiriev.ru>
|
|
7
|
+
License-Expression: MIT
|
|
8
|
+
Requires-Dist: faststream~=0.6
|
|
9
|
+
Requires-Dist: sqlalchemy[asyncio]>=2.0
|
|
10
|
+
Requires-Dist: asyncpg>=0.29 ; extra == 'asyncpg'
|
|
11
|
+
Requires-Python: >=3.13, <4
|
|
12
|
+
Provides-Extra: asyncpg
|
|
13
|
+
Description-Content-Type: text/markdown
|
|
14
|
+
|
|
15
|
+
faststream-outbox
|
|
16
|
+
==
|
|
17
|
+
|
|
18
|
+
[](https://pypi.python.org/pypi/faststream-outbox)
|
|
19
|
+
[](https://pypistats.org/packages/faststream-outbox)
|
|
20
|
+
|
|
21
|
+
`faststream-outbox` is a [FastStream](https://faststream.airt.ai) broker integration for the **transactional outbox pattern** — a Postgres table is the message queue.
|
|
22
|
+
|
|
23
|
+
A producer writes a domain entity and an outbox row in the *same* SQLAlchemy transaction by calling `broker.publish(body, queue=..., session=session)`. A subscriber polls the table directly with `FOR UPDATE SKIP LOCKED`, runs the handler, and deletes the row on success. No downstream broker, no separate relay process — the table *is* the queue.
|
|
24
|
+
|
|
25
|
+
```python
|
|
26
|
+
from sqlalchemy import MetaData
|
|
27
|
+
from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
|
|
28
|
+
from faststream import FastStream
|
|
29
|
+
from faststream_outbox import OutboxBroker, make_outbox_table
|
|
30
|
+
|
|
31
|
+
metadata = MetaData()
|
|
32
|
+
outbox_table = make_outbox_table(metadata, table_name="outbox")
|
|
33
|
+
|
|
34
|
+
engine = create_async_engine("postgresql+asyncpg://localhost/app")
|
|
35
|
+
broker = OutboxBroker(engine, outbox_table=outbox_table)
|
|
36
|
+
app = FastStream(broker)
|
|
37
|
+
|
|
38
|
+
@broker.subscriber("orders", max_workers=4)
|
|
39
|
+
async def handle(order_id: int) -> None:
|
|
40
|
+
print(f"order {order_id}")
|
|
41
|
+
|
|
42
|
+
# Producer side — share the caller's open transaction:
|
|
43
|
+
session_factory = async_sessionmaker(engine, expire_on_commit=False)
|
|
44
|
+
async with session_factory() as session, session.begin():
|
|
45
|
+
session.add(Order(id=1))
|
|
46
|
+
await broker.publish(1, queue="orders", session=session)
|
|
47
|
+
```
|
|
48
|
+
|
|
49
|
+
## How it works
|
|
50
|
+
|
|
51
|
+
`make_outbox_table(metadata, table_name="outbox")` returns a `sqlalchemy.Table` that you attach to your own `MetaData` and migrate via Alembic. The package does **not** own your schema; it only describes the columns it needs.
|
|
52
|
+
|
|
53
|
+
`broker.publish(body, *, queue, session, headers=None, correlation_id=None)` inserts one outbox row through the caller's `AsyncSession`. It does not flush, commit, or open its own transaction — the whole point is that the row commits atomically with the caller's domain writes. Use it inside an `async with session.begin():` block.
|
|
54
|
+
|
|
55
|
+
`broker.publish_batch(*bodies, queue, session, headers=None)` inserts many rows in a single round-trip with the same transactional contract.
|
|
56
|
+
|
|
57
|
+
A subscriber owns three async loops:
|
|
58
|
+
|
|
59
|
+
1. **fetch** — claims due rows via `SELECT … FOR UPDATE SKIP LOCKED → UPDATE state='processing', acquired_token=:uuid RETURNING *` in a single CTE.
|
|
60
|
+
2. **workers** (× `max_workers`) — dispatch to the handler. On success, `DELETE WHERE id=:id AND acquired_token=:token`. On failure, the retry strategy decides: schedule another attempt, or terminal `DELETE`.
|
|
61
|
+
3. **release-stuck** — periodically flips `processing` rows back to `pending` if their lease is older than `release_stuck_timeout`. Wrapped in a Postgres advisory lock so multiple processes don't compete.
|
|
62
|
+
|
|
63
|
+
The `acquired_token` is critical: a slow handler whose lease expired and was re-claimed by another worker will find its terminal `DELETE`/`UPDATE` to be a no-op (the token no longer matches), preventing it from clobbering the new lease holder's row.
|
|
64
|
+
|
|
65
|
+
## Recommended index
|
|
66
|
+
|
|
67
|
+
Add this to your Alembic migration alongside the table:
|
|
68
|
+
|
|
69
|
+
```sql
|
|
70
|
+
CREATE INDEX outbox_pending_idx ON outbox (queue, next_attempt_at)
|
|
71
|
+
WHERE state = 'pending';
|
|
72
|
+
```
|
|
73
|
+
|
|
74
|
+
## Schema validation
|
|
75
|
+
|
|
76
|
+
Schema validation is opt-in:
|
|
77
|
+
|
|
78
|
+
```python
|
|
79
|
+
await broker.validate_schema() # raises if user's table drifts from expected columns
|
|
80
|
+
```
|
|
81
|
+
|
|
82
|
+
Call it from a `/health` endpoint or startup hook — not at `broker.start()`, so Alembic can run migrations against the same DB without a startup loop.
|
|
83
|
+
|
|
84
|
+
## Retry strategies
|
|
85
|
+
|
|
86
|
+
```python
|
|
87
|
+
from faststream_outbox import ExponentialRetry, ConstantRetry, LinearRetry, NoRetry
|
|
88
|
+
|
|
89
|
+
@broker.subscriber(
|
|
90
|
+
"orders",
|
|
91
|
+
retry_strategy=ExponentialRetry(
|
|
92
|
+
initial_delay_seconds=1.0,
|
|
93
|
+
max_delay_seconds=300.0,
|
|
94
|
+
max_attempts=5,
|
|
95
|
+
jitter_factor=0.5,
|
|
96
|
+
),
|
|
97
|
+
)
|
|
98
|
+
async def handle(order_id: int) -> None: ...
|
|
99
|
+
```
|
|
100
|
+
|
|
101
|
+
Strategies receive the raised `exception` so users may subclass for "retry only on transient errors":
|
|
102
|
+
|
|
103
|
+
```python
|
|
104
|
+
class TransientOnly(ExponentialRetry):
|
|
105
|
+
def get_next_attempt_at(self, *, exception=None, **kw):
|
|
106
|
+
if exception and not isinstance(exception, TransientError):
|
|
107
|
+
return None
|
|
108
|
+
return super().get_next_attempt_at(exception=exception, **kw)
|
|
109
|
+
```
|
|
110
|
+
|
|
111
|
+
## Failure modes
|
|
112
|
+
|
|
113
|
+
- **Handlers must be idempotent.** Crash between commit-of-handler-side-effects and the broker's `DELETE` re-delivers the message.
|
|
114
|
+
- **Best-effort ordering only.** `FOR UPDATE SKIP LOCKED` does not preserve strict order under concurrent workers. If you need strict per-aggregate ordering, route to a single subscriber and run a single worker.
|
|
115
|
+
- **No DLQ / archive.** Terminal failures `DELETE` the row. Hook `on_terminal_failure(row)` to capture them in your own table or alerting.
|
|
116
|
+
|
|
117
|
+
## Connection ownership
|
|
118
|
+
|
|
119
|
+
`OutboxBroker` does **not** close the `AsyncEngine` you pass in — the caller owns its lifecycle.
|
|
120
|
+
|
|
121
|
+
## Tuning
|
|
122
|
+
|
|
123
|
+
Per-subscriber knobs (passed to `@broker.subscriber("…", …)`):
|
|
124
|
+
|
|
125
|
+
- `max_workers` (default `1`) — concurrent handlers per subscriber.
|
|
126
|
+
- `fetch_batch_size` (default `10`) — rows claimed per fetch cycle.
|
|
127
|
+
- `min_fetch_interval` / `max_fetch_interval` (default `1.0` / `10.0` s) — base + ceiling for the adaptive idle backoff with jitter.
|
|
128
|
+
- `release_stuck_timeout` (default `300.0` s) — how long a `processing` row may live before being released back to `pending`.
|
|
129
|
+
- `release_stuck_interval` (default `release_stuck_timeout / 2`).
|
|
130
|
+
- `max_deliveries` (default `None` — unbounded) — total claims (including stuck-recovery re-claims) after which the row is dropped without invoking the handler. Defends against handlers that consistently wedge.
|
|
131
|
+
|
|
132
|
+
## 📝 [License](LICENSE)
|
|
@@ -0,0 +1,118 @@
|
|
|
1
|
+
faststream-outbox
|
|
2
|
+
==
|
|
3
|
+
|
|
4
|
+
[](https://pypi.python.org/pypi/faststream-outbox)
|
|
5
|
+
[](https://pypistats.org/packages/faststream-outbox)
|
|
6
|
+
|
|
7
|
+
`faststream-outbox` is a [FastStream](https://faststream.airt.ai) broker integration for the **transactional outbox pattern** — a Postgres table is the message queue.
|
|
8
|
+
|
|
9
|
+
A producer writes a domain entity and an outbox row in the *same* SQLAlchemy transaction by calling `broker.publish(body, queue=..., session=session)`. A subscriber polls the table directly with `FOR UPDATE SKIP LOCKED`, runs the handler, and deletes the row on success. No downstream broker, no separate relay process — the table *is* the queue.
|
|
10
|
+
|
|
11
|
+
```python
|
|
12
|
+
from sqlalchemy import MetaData
|
|
13
|
+
from sqlalchemy.ext.asyncio import async_sessionmaker, create_async_engine
|
|
14
|
+
from faststream import FastStream
|
|
15
|
+
from faststream_outbox import OutboxBroker, make_outbox_table
|
|
16
|
+
|
|
17
|
+
metadata = MetaData()
|
|
18
|
+
outbox_table = make_outbox_table(metadata, table_name="outbox")
|
|
19
|
+
|
|
20
|
+
engine = create_async_engine("postgresql+asyncpg://localhost/app")
|
|
21
|
+
broker = OutboxBroker(engine, outbox_table=outbox_table)
|
|
22
|
+
app = FastStream(broker)
|
|
23
|
+
|
|
24
|
+
@broker.subscriber("orders", max_workers=4)
|
|
25
|
+
async def handle(order_id: int) -> None:
|
|
26
|
+
print(f"order {order_id}")
|
|
27
|
+
|
|
28
|
+
# Producer side — share the caller's open transaction:
|
|
29
|
+
session_factory = async_sessionmaker(engine, expire_on_commit=False)
|
|
30
|
+
async with session_factory() as session, session.begin():
|
|
31
|
+
session.add(Order(id=1))
|
|
32
|
+
await broker.publish(1, queue="orders", session=session)
|
|
33
|
+
```
|
|
34
|
+
|
|
35
|
+
## How it works
|
|
36
|
+
|
|
37
|
+
`make_outbox_table(metadata, table_name="outbox")` returns a `sqlalchemy.Table` that you attach to your own `MetaData` and migrate via Alembic. The package does **not** own your schema; it only describes the columns it needs.
|
|
38
|
+
|
|
39
|
+
`broker.publish(body, *, queue, session, headers=None, correlation_id=None)` inserts one outbox row through the caller's `AsyncSession`. It does not flush, commit, or open its own transaction — the whole point is that the row commits atomically with the caller's domain writes. Use it inside an `async with session.begin():` block.
|
|
40
|
+
|
|
41
|
+
`broker.publish_batch(*bodies, queue, session, headers=None)` inserts many rows in a single round-trip with the same transactional contract.
|
|
42
|
+
|
|
43
|
+
A subscriber owns three async loops:
|
|
44
|
+
|
|
45
|
+
1. **fetch** — claims due rows via `SELECT … FOR UPDATE SKIP LOCKED → UPDATE state='processing', acquired_token=:uuid RETURNING *` in a single CTE.
|
|
46
|
+
2. **workers** (× `max_workers`) — dispatch to the handler. On success, `DELETE WHERE id=:id AND acquired_token=:token`. On failure, the retry strategy decides: schedule another attempt, or terminal `DELETE`.
|
|
47
|
+
3. **release-stuck** — periodically flips `processing` rows back to `pending` if their lease is older than `release_stuck_timeout`. Wrapped in a Postgres advisory lock so multiple processes don't compete.
|
|
48
|
+
|
|
49
|
+
The `acquired_token` is critical: a slow handler whose lease expired and was re-claimed by another worker will find its terminal `DELETE`/`UPDATE` to be a no-op (the token no longer matches), preventing it from clobbering the new lease holder's row.
|
|
50
|
+
|
|
51
|
+
## Recommended index
|
|
52
|
+
|
|
53
|
+
Add this to your Alembic migration alongside the table:
|
|
54
|
+
|
|
55
|
+
```sql
|
|
56
|
+
CREATE INDEX outbox_pending_idx ON outbox (queue, next_attempt_at)
|
|
57
|
+
WHERE state = 'pending';
|
|
58
|
+
```
|
|
59
|
+
|
|
60
|
+
## Schema validation
|
|
61
|
+
|
|
62
|
+
Schema validation is opt-in:
|
|
63
|
+
|
|
64
|
+
```python
|
|
65
|
+
await broker.validate_schema() # raises if user's table drifts from expected columns
|
|
66
|
+
```
|
|
67
|
+
|
|
68
|
+
Call it from a `/health` endpoint or startup hook — not at `broker.start()`, so Alembic can run migrations against the same DB without a startup loop.
|
|
69
|
+
|
|
70
|
+
## Retry strategies
|
|
71
|
+
|
|
72
|
+
```python
|
|
73
|
+
from faststream_outbox import ExponentialRetry, ConstantRetry, LinearRetry, NoRetry
|
|
74
|
+
|
|
75
|
+
@broker.subscriber(
|
|
76
|
+
"orders",
|
|
77
|
+
retry_strategy=ExponentialRetry(
|
|
78
|
+
initial_delay_seconds=1.0,
|
|
79
|
+
max_delay_seconds=300.0,
|
|
80
|
+
max_attempts=5,
|
|
81
|
+
jitter_factor=0.5,
|
|
82
|
+
),
|
|
83
|
+
)
|
|
84
|
+
async def handle(order_id: int) -> None: ...
|
|
85
|
+
```
|
|
86
|
+
|
|
87
|
+
Strategies receive the raised `exception` so users may subclass for "retry only on transient errors":
|
|
88
|
+
|
|
89
|
+
```python
|
|
90
|
+
class TransientOnly(ExponentialRetry):
|
|
91
|
+
def get_next_attempt_at(self, *, exception=None, **kw):
|
|
92
|
+
if exception and not isinstance(exception, TransientError):
|
|
93
|
+
return None
|
|
94
|
+
return super().get_next_attempt_at(exception=exception, **kw)
|
|
95
|
+
```
|
|
96
|
+
|
|
97
|
+
## Failure modes
|
|
98
|
+
|
|
99
|
+
- **Handlers must be idempotent.** Crash between commit-of-handler-side-effects and the broker's `DELETE` re-delivers the message.
|
|
100
|
+
- **Best-effort ordering only.** `FOR UPDATE SKIP LOCKED` does not preserve strict order under concurrent workers. If you need strict per-aggregate ordering, route to a single subscriber and run a single worker.
|
|
101
|
+
- **No DLQ / archive.** Terminal failures `DELETE` the row. Hook `on_terminal_failure(row)` to capture them in your own table or alerting.
|
|
102
|
+
|
|
103
|
+
## Connection ownership
|
|
104
|
+
|
|
105
|
+
`OutboxBroker` does **not** close the `AsyncEngine` you pass in — the caller owns its lifecycle.
|
|
106
|
+
|
|
107
|
+
## Tuning
|
|
108
|
+
|
|
109
|
+
Per-subscriber knobs (passed to `@broker.subscriber("…", …)`):
|
|
110
|
+
|
|
111
|
+
- `max_workers` (default `1`) — concurrent handlers per subscriber.
|
|
112
|
+
- `fetch_batch_size` (default `10`) — rows claimed per fetch cycle.
|
|
113
|
+
- `min_fetch_interval` / `max_fetch_interval` (default `1.0` / `10.0` s) — base + ceiling for the adaptive idle backoff with jitter.
|
|
114
|
+
- `release_stuck_timeout` (default `300.0` s) — how long a `processing` row may live before being released back to `pending`.
|
|
115
|
+
- `release_stuck_interval` (default `release_stuck_timeout / 2`).
|
|
116
|
+
- `max_deliveries` (default `None` — unbounded) — total claims (including stuck-recovery re-claims) after which the row is dropped without invoking the handler. Defends against handlers that consistently wedge.
|
|
117
|
+
|
|
118
|
+
## 📝 [License](LICENSE)
|
|
@@ -0,0 +1,25 @@
|
|
|
1
|
+
from faststream_outbox.broker import OutboxBroker
|
|
2
|
+
from faststream_outbox.retry import (
|
|
3
|
+
ConstantRetry,
|
|
4
|
+
ExponentialRetry,
|
|
5
|
+
LinearRetry,
|
|
6
|
+
NoRetry,
|
|
7
|
+
RetryStrategyProto,
|
|
8
|
+
)
|
|
9
|
+
from faststream_outbox.router import OutboxRouter
|
|
10
|
+
from faststream_outbox.schema import OutboxState, make_outbox_table
|
|
11
|
+
from faststream_outbox.testing import TestOutboxBroker
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
__all__ = [
|
|
15
|
+
"ConstantRetry",
|
|
16
|
+
"ExponentialRetry",
|
|
17
|
+
"LinearRetry",
|
|
18
|
+
"NoRetry",
|
|
19
|
+
"OutboxBroker",
|
|
20
|
+
"OutboxRouter",
|
|
21
|
+
"OutboxState",
|
|
22
|
+
"RetryStrategyProto",
|
|
23
|
+
"TestOutboxBroker",
|
|
24
|
+
"make_outbox_table",
|
|
25
|
+
]
|
|
@@ -0,0 +1,237 @@
|
|
|
1
|
+
"""
|
|
2
|
+
OutboxBroker — a FastStream broker whose queue is a Postgres table.
|
|
3
|
+
|
|
4
|
+
Producers call ``broker.publish(body, queue=..., session=session)`` inside their
|
|
5
|
+
own SQLAlchemy transaction; the row commits with their domain writes. The broker
|
|
6
|
+
owns subscribers on the consumer side.
|
|
7
|
+
"""
|
|
8
|
+
|
|
9
|
+
import logging
|
|
10
|
+
import typing
|
|
11
|
+
from collections.abc import Iterable, Sequence
|
|
12
|
+
|
|
13
|
+
from faststream import BaseMiddleware
|
|
14
|
+
from faststream._internal.basic_types import LoggerProto
|
|
15
|
+
from faststream._internal.broker import BrokerUsecase
|
|
16
|
+
from faststream._internal.broker.registrator import Registrator
|
|
17
|
+
from faststream._internal.constants import EMPTY
|
|
18
|
+
from faststream._internal.di import FastDependsConfig
|
|
19
|
+
from faststream._internal.logger import DefaultLoggerStorage, make_logger_state
|
|
20
|
+
from faststream._internal.logger.logging import get_broker_logger
|
|
21
|
+
from faststream._internal.types import BrokerMiddleware, CustomCallable
|
|
22
|
+
from faststream.specification.schema import BrokerSpec
|
|
23
|
+
from faststream.specification.schema.extra import Tag, TagDict
|
|
24
|
+
from sqlalchemy import insert
|
|
25
|
+
from sqlalchemy.ext.asyncio import AsyncSession
|
|
26
|
+
|
|
27
|
+
from faststream_outbox.client import OutboxClient
|
|
28
|
+
from faststream_outbox.configs import EngineState, OutboxBrokerConfig
|
|
29
|
+
from faststream_outbox.envelope import _encode_payload
|
|
30
|
+
from faststream_outbox.message import OutboxInnerMessage
|
|
31
|
+
from faststream_outbox.registrator import OutboxRegistrator
|
|
32
|
+
|
|
33
|
+
|
|
34
|
+
if typing.TYPE_CHECKING:
|
|
35
|
+
from fast_depends.dependencies import Dependant
|
|
36
|
+
from faststream._internal.context.repository import ContextRepo
|
|
37
|
+
from sqlalchemy import Table
|
|
38
|
+
from sqlalchemy.ext.asyncio import AsyncEngine
|
|
39
|
+
|
|
40
|
+
from faststream_outbox.subscriber.usecase import OutboxSubscriber
|
|
41
|
+
|
|
42
|
+
|
|
43
|
+
class OutboxParamsStorage(DefaultLoggerStorage):
|
|
44
|
+
_max_msg_id_ln = -1
|
|
45
|
+
_max_queue_name = 7
|
|
46
|
+
|
|
47
|
+
def get_logger(self, *, context: "ContextRepo") -> LoggerProto:
|
|
48
|
+
if logger := self._get_logger_ref():
|
|
49
|
+
return logger
|
|
50
|
+
logger = get_broker_logger(
|
|
51
|
+
name="outbox",
|
|
52
|
+
default_context={"queue": "", "message_id": ""},
|
|
53
|
+
message_id_ln=self._max_msg_id_ln,
|
|
54
|
+
fmt=(
|
|
55
|
+
"%(asctime)s %(levelname)-8s - "
|
|
56
|
+
f"%(queue)-{self._max_queue_name}s | "
|
|
57
|
+
f"%(message_id)-{self._max_msg_id_ln}s "
|
|
58
|
+
"- %(message)s"
|
|
59
|
+
),
|
|
60
|
+
context=context,
|
|
61
|
+
log_level=self.logger_log_level,
|
|
62
|
+
)
|
|
63
|
+
self._logger_ref.add(logger)
|
|
64
|
+
return logger
|
|
65
|
+
|
|
66
|
+
|
|
67
|
+
class OutboxBroker(
|
|
68
|
+
OutboxRegistrator,
|
|
69
|
+
BrokerUsecase[OutboxInnerMessage, "AsyncEngine", OutboxBrokerConfig],
|
|
70
|
+
):
|
|
71
|
+
"""FastStream broker backed by a Postgres outbox table."""
|
|
72
|
+
|
|
73
|
+
_subscribers: list["OutboxSubscriber"]
|
|
74
|
+
|
|
75
|
+
def __init__( # noqa: PLR0913
|
|
76
|
+
self,
|
|
77
|
+
engine: "AsyncEngine | None" = None,
|
|
78
|
+
*,
|
|
79
|
+
outbox_table: "Table",
|
|
80
|
+
decoder: CustomCallable | None = None,
|
|
81
|
+
parser: CustomCallable | None = None,
|
|
82
|
+
dependencies: Iterable["Dependant"] = (),
|
|
83
|
+
middlewares: Sequence[type[BaseMiddleware] | BrokerMiddleware[OutboxInnerMessage]] = (),
|
|
84
|
+
graceful_timeout: float | None = 15.0,
|
|
85
|
+
routers: Sequence[Registrator[OutboxInnerMessage]] = (),
|
|
86
|
+
# Logging
|
|
87
|
+
logger: LoggerProto | None = EMPTY,
|
|
88
|
+
log_level: int = logging.INFO,
|
|
89
|
+
# FastDepends
|
|
90
|
+
apply_types: bool = True,
|
|
91
|
+
# AsyncAPI
|
|
92
|
+
description: str | None = None,
|
|
93
|
+
tags: Iterable[Tag | TagDict] = (),
|
|
94
|
+
) -> None:
|
|
95
|
+
self._outbox_table = outbox_table
|
|
96
|
+
engine_state = EngineState(engine)
|
|
97
|
+
client = OutboxClient(engine, outbox_table) if engine is not None else None
|
|
98
|
+
fd_config = FastDependsConfig(use_fastdepends=apply_types)
|
|
99
|
+
broker_config = OutboxBrokerConfig(
|
|
100
|
+
engine_state=engine_state,
|
|
101
|
+
outbox_table=outbox_table,
|
|
102
|
+
client=client,
|
|
103
|
+
broker_middlewares=middlewares,
|
|
104
|
+
broker_parser=parser,
|
|
105
|
+
broker_decoder=decoder,
|
|
106
|
+
logger=make_logger_state(
|
|
107
|
+
logger=logger,
|
|
108
|
+
log_level=log_level,
|
|
109
|
+
default_storage_cls=OutboxParamsStorage,
|
|
110
|
+
),
|
|
111
|
+
fd_config=fd_config,
|
|
112
|
+
broker_dependencies=dependencies,
|
|
113
|
+
graceful_timeout=graceful_timeout,
|
|
114
|
+
extra_context={"broker": self},
|
|
115
|
+
producer=_NoProducer(), # ty: ignore[invalid-argument-type]
|
|
116
|
+
)
|
|
117
|
+
specification = BrokerSpec(
|
|
118
|
+
url=[],
|
|
119
|
+
protocol="postgresql",
|
|
120
|
+
protocol_version=None,
|
|
121
|
+
description=description,
|
|
122
|
+
tags=tags,
|
|
123
|
+
security=None,
|
|
124
|
+
)
|
|
125
|
+
super().__init__(config=broker_config, specification=specification, routers=routers) # ty: ignore[unknown-argument]
|
|
126
|
+
|
|
127
|
+
@property
|
|
128
|
+
def client(self) -> OutboxClient:
|
|
129
|
+
client = self.config.broker_config.client
|
|
130
|
+
if client is None:
|
|
131
|
+
msg = "OutboxBroker is not connected; pass an AsyncEngine to the constructor."
|
|
132
|
+
raise RuntimeError(msg)
|
|
133
|
+
return client
|
|
134
|
+
|
|
135
|
+
@typing.override
|
|
136
|
+
async def _connect(self) -> "AsyncEngine":
|
|
137
|
+
return self.config.broker_config.engine_state.engine
|
|
138
|
+
|
|
139
|
+
@typing.override
|
|
140
|
+
async def __aenter__(self) -> typing.Self:
|
|
141
|
+
await self.start()
|
|
142
|
+
return self
|
|
143
|
+
|
|
144
|
+
@typing.override
|
|
145
|
+
async def start(self) -> None:
|
|
146
|
+
await self.connect()
|
|
147
|
+
await super().start()
|
|
148
|
+
|
|
149
|
+
@typing.override
|
|
150
|
+
async def ping(self, timeout: float | None = None) -> bool:
|
|
151
|
+
client = self.config.broker_config.client
|
|
152
|
+
if client is None:
|
|
153
|
+
return False
|
|
154
|
+
if not await client.ping():
|
|
155
|
+
return False
|
|
156
|
+
for subscriber in self._subscribers:
|
|
157
|
+
for task in subscriber.tasks:
|
|
158
|
+
if task.done():
|
|
159
|
+
return False
|
|
160
|
+
return True
|
|
161
|
+
|
|
162
|
+
async def validate_schema(self) -> None:
|
|
163
|
+
"""Validate the user's table matches what the package expects. Opt-in."""
|
|
164
|
+
await self.client.validate_schema()
|
|
165
|
+
|
|
166
|
+
async def publish( # ty: ignore[invalid-method-override]
|
|
167
|
+
self,
|
|
168
|
+
body: typing.Any,
|
|
169
|
+
*,
|
|
170
|
+
queue: str,
|
|
171
|
+
session: AsyncSession,
|
|
172
|
+
headers: dict[str, str] | None = None,
|
|
173
|
+
correlation_id: str | None = None,
|
|
174
|
+
) -> None:
|
|
175
|
+
"""
|
|
176
|
+
Insert one outbox row using *session*'s open transaction.
|
|
177
|
+
|
|
178
|
+
Must be called inside a transaction the caller owns (typically inside an
|
|
179
|
+
``async with session.begin():`` block). ``publish`` does not flush, commit,
|
|
180
|
+
or open its own transaction — that is the whole point of the transactional
|
|
181
|
+
outbox pattern: the row commits atomically with the caller's domain writes.
|
|
182
|
+
"""
|
|
183
|
+
if not isinstance(session, AsyncSession):
|
|
184
|
+
msg = "broker.publish requires an sqlalchemy.ext.asyncio.AsyncSession"
|
|
185
|
+
raise TypeError(msg)
|
|
186
|
+
payload, hdrs = _encode_payload(body, headers=headers, correlation_id=correlation_id)
|
|
187
|
+
await session.execute(insert(self._outbox_table).values(queue=queue, payload=payload, headers=hdrs))
|
|
188
|
+
|
|
189
|
+
async def publish_batch( # ty: ignore[invalid-method-override]
|
|
190
|
+
self,
|
|
191
|
+
*bodies: typing.Any,
|
|
192
|
+
queue: str,
|
|
193
|
+
session: AsyncSession,
|
|
194
|
+
headers: dict[str, str] | None = None,
|
|
195
|
+
) -> None:
|
|
196
|
+
"""
|
|
197
|
+
Insert multiple outbox rows via *session*. Same transactional contract as ``publish``.
|
|
198
|
+
|
|
199
|
+
Each row gets its own auto-generated ``correlation_id``; pass *headers* to
|
|
200
|
+
share static headers across all rows.
|
|
201
|
+
"""
|
|
202
|
+
if not isinstance(session, AsyncSession):
|
|
203
|
+
msg = "broker.publish_batch requires an sqlalchemy.ext.asyncio.AsyncSession"
|
|
204
|
+
raise TypeError(msg)
|
|
205
|
+
if not bodies:
|
|
206
|
+
return
|
|
207
|
+
rows = []
|
|
208
|
+
for body in bodies:
|
|
209
|
+
payload, hdrs = _encode_payload(body, headers=headers)
|
|
210
|
+
rows.append({"queue": queue, "payload": payload, "headers": hdrs})
|
|
211
|
+
await session.execute(insert(self._outbox_table), rows)
|
|
212
|
+
|
|
213
|
+
async def request(self, *args: typing.Any, **kwargs: typing.Any) -> typing.NoReturn:
|
|
214
|
+
msg = "OutboxBroker does not support request-reply"
|
|
215
|
+
raise NotImplementedError(msg)
|
|
216
|
+
|
|
217
|
+
|
|
218
|
+
class _NoProducer:
|
|
219
|
+
"""Stub satisfying FastStream's broker producer slot — the outbox has no real producer."""
|
|
220
|
+
|
|
221
|
+
async def publish(self, *_args: typing.Any, **_kwargs: typing.Any) -> typing.NoReturn:
|
|
222
|
+
msg = "OutboxBroker has no producer"
|
|
223
|
+
raise NotImplementedError(msg)
|
|
224
|
+
|
|
225
|
+
async def request(self, *_args: typing.Any, **_kwargs: typing.Any) -> typing.NoReturn:
|
|
226
|
+
msg = "OutboxBroker has no producer"
|
|
227
|
+
raise NotImplementedError(msg)
|
|
228
|
+
|
|
229
|
+
async def publish_batch(self, *_args: typing.Any, **_kwargs: typing.Any) -> typing.NoReturn:
|
|
230
|
+
msg = "OutboxBroker has no producer"
|
|
231
|
+
raise NotImplementedError(msg)
|
|
232
|
+
|
|
233
|
+
def connect(self, *_args: typing.Any, **_kwargs: typing.Any) -> None:
|
|
234
|
+
pass
|
|
235
|
+
|
|
236
|
+
def disconnect(self) -> None:
|
|
237
|
+
pass
|