nlbone 0.6.20__py3-none-any.whl → 0.7.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nlbone/adapters/db/postgres/__init__.py +1 -1
- nlbone/adapters/db/postgres/base.py +2 -1
- nlbone/adapters/db/postgres/repository.py +254 -29
- nlbone/adapters/db/postgres/uow.py +36 -1
- nlbone/adapters/messaging/__init__.py +1 -1
- nlbone/adapters/messaging/event_bus.py +97 -17
- nlbone/adapters/messaging/rabbitmq.py +45 -0
- nlbone/adapters/outbox/__init__.py +1 -0
- nlbone/adapters/outbox/outbox_consumer.py +112 -0
- nlbone/adapters/outbox/outbox_repo.py +191 -0
- nlbone/adapters/ticketing/client.py +39 -0
- nlbone/config/settings.py +9 -5
- nlbone/container.py +1 -8
- nlbone/core/application/bus.py +7 -7
- nlbone/core/application/di.py +43 -14
- nlbone/core/application/registry.py +12 -6
- nlbone/core/domain/base.py +30 -9
- nlbone/core/domain/models.py +46 -3
- nlbone/core/ports/__init__.py +0 -2
- nlbone/core/ports/event_bus.py +23 -6
- nlbone/core/ports/outbox.py +73 -0
- nlbone/core/ports/repository.py +10 -9
- nlbone/core/ports/uow.py +20 -1
- nlbone/interfaces/api/additional_filed/field_registry.py +2 -0
- nlbone/interfaces/cli/init_db.py +39 -2
- nlbone/interfaces/cli/main.py +2 -0
- nlbone/interfaces/cli/ticket.py +29 -0
- nlbone/interfaces/jobs/dispatch_outbox.py +2 -2
- nlbone/utils/crypto.py +7 -4
- {nlbone-0.6.20.dist-info → nlbone-0.7.0.dist-info}/METADATA +3 -2
- {nlbone-0.6.20.dist-info → nlbone-0.7.0.dist-info}/RECORD +35 -34
- nlbone/adapters/repositories/outbox_repo.py +0 -18
- nlbone/core/application/events.py +0 -20
- nlbone/core/application/services.py +0 -0
- nlbone/core/domain/events.py +0 -0
- nlbone/core/ports/messaging.py +0 -0
- nlbone/core/ports/repo.py +0 -19
- /nlbone/adapters/{messaging/redis.py → ticketing/__init__.py} +0 -0
- {nlbone-0.6.20.dist-info → nlbone-0.7.0.dist-info}/WHEEL +0 -0
- {nlbone-0.6.20.dist-info → nlbone-0.7.0.dist-info}/entry_points.txt +0 -0
- {nlbone-0.6.20.dist-info → nlbone-0.7.0.dist-info}/licenses/LICENSE +0 -0
|
@@ -1,4 +1,4 @@
|
|
|
1
1
|
from .engine import async_ping, async_session, init_async_engine, init_sync_engine, sync_ping, sync_session
|
|
2
2
|
from .query_builder import apply_pagination, get_paginated_response
|
|
3
|
-
from .repository import
|
|
3
|
+
from .repository import SQLAlchemyAsyncRepository, SQLAlchemyRepository
|
|
4
4
|
from .uow import AsyncSqlAlchemyUnitOfWork, SqlAlchemyUnitOfWork
|
|
@@ -1,54 +1,279 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from
|
|
3
|
+
from abc import ABC
|
|
4
|
+
from typing import Any, Callable, Iterable, List, Optional, Sequence
|
|
4
5
|
|
|
5
|
-
from sqlalchemy import
|
|
6
|
+
from sqlalchemy import delete as sqla_delete
|
|
7
|
+
from sqlalchemy import desc as sa_desc
|
|
8
|
+
from sqlalchemy import func, select
|
|
6
9
|
from sqlalchemy.ext.asyncio import AsyncSession
|
|
7
10
|
from sqlalchemy.orm import Session
|
|
8
11
|
|
|
9
|
-
from nlbone.core.ports.
|
|
12
|
+
from nlbone.core.ports.repository import ID, AsyncRepository, Repository, T
|
|
13
|
+
from nlbone.interfaces.api.exceptions import NotFoundException
|
|
10
14
|
|
|
11
|
-
T = TypeVar("T")
|
|
12
15
|
|
|
16
|
+
# -----------------------------
|
|
17
|
+
# Helper utilities
|
|
18
|
+
# -----------------------------
|
|
19
|
+
def _apply_python_filters(
|
|
20
|
+
items: Sequence[T],
|
|
21
|
+
*,
|
|
22
|
+
where: Optional[Callable[[T], bool]] = None,
|
|
23
|
+
order_by: Optional[Callable[[T], object]] = None,
|
|
24
|
+
reverse: bool = False,
|
|
25
|
+
offset: int = 0,
|
|
26
|
+
limit: Optional[int] = None,
|
|
27
|
+
) -> List[T]:
|
|
28
|
+
data = list(items)
|
|
29
|
+
if where:
|
|
30
|
+
data = [x for x in data if where(x)]
|
|
31
|
+
if order_by:
|
|
32
|
+
data.sort(key=order_by, reverse=reverse)
|
|
33
|
+
else:
|
|
34
|
+
if reverse:
|
|
35
|
+
data.reverse()
|
|
36
|
+
if offset:
|
|
37
|
+
data = data[offset:]
|
|
38
|
+
if limit is not None:
|
|
39
|
+
data = data[:limit]
|
|
40
|
+
return data
|
|
13
41
|
|
|
14
|
-
|
|
15
|
-
|
|
42
|
+
|
|
43
|
+
def _has_attr_id(entity: Any) -> bool:
|
|
44
|
+
return hasattr(entity, "id")
|
|
45
|
+
|
|
46
|
+
|
|
47
|
+
# -----------------------------
|
|
48
|
+
# SQLAlchemy (sync)
|
|
49
|
+
# -----------------------------
|
|
50
|
+
class SQLAlchemyRepository(Repository, ABC):
|
|
51
|
+
"""
|
|
52
|
+
Concrete Repository[T, ID] backed by SQLAlchemy Session (sync).
|
|
53
|
+
Assumes entities have an `id` attribute and are mapped.
|
|
54
|
+
"""
|
|
55
|
+
|
|
56
|
+
def __init__(self, session: Session, *, autocommit: bool = False):
|
|
16
57
|
self.session = session
|
|
17
|
-
self.
|
|
58
|
+
self.autocommit = autocommit
|
|
18
59
|
|
|
19
|
-
def get(self, id) -> Optional[T]:
|
|
60
|
+
def get(self, id: ID) -> Optional[T]:
|
|
20
61
|
return self.session.get(self.model, id)
|
|
21
62
|
|
|
22
|
-
def
|
|
23
|
-
self.
|
|
63
|
+
def get_or_raise(self, id: ID) -> T:
|
|
64
|
+
entity = self.get(id)
|
|
65
|
+
if entity is None:
|
|
66
|
+
raise NotFoundException(f"Entity with id={id!r} not found")
|
|
67
|
+
return entity
|
|
68
|
+
|
|
69
|
+
def list(
|
|
70
|
+
self,
|
|
71
|
+
*,
|
|
72
|
+
offset: int = 0,
|
|
73
|
+
limit: Optional[int] = None,
|
|
74
|
+
where: Optional[Callable[[T], bool]] = None,
|
|
75
|
+
order_by: Optional[Callable[[T], object]] = None,
|
|
76
|
+
reverse: bool = False,
|
|
77
|
+
) -> List[T]:
|
|
78
|
+
# If where/order_by look like SQLAlchemy expressions (not callables), push down to DB.
|
|
79
|
+
if where is None and (order_by is None or callable(order_by)):
|
|
80
|
+
stmt = select(self.model)
|
|
81
|
+
elif callable(where) or (order_by is not None and callable(order_by)):
|
|
82
|
+
# Fallback to Python-side filtering
|
|
83
|
+
stmt = select(self.model)
|
|
84
|
+
else:
|
|
85
|
+
stmt = select(self.model).where(where) # type: ignore[arg-type]
|
|
86
|
+
if order_by is not None:
|
|
87
|
+
stmt = stmt.order_by(sa_desc(order_by) if reverse else order_by) # type: ignore[arg-type]
|
|
88
|
+
if where is None and (order_by is None or not callable(order_by)):
|
|
89
|
+
if offset:
|
|
90
|
+
stmt = stmt.offset(offset)
|
|
91
|
+
if limit is not None:
|
|
92
|
+
stmt = stmt.limit(limit)
|
|
93
|
+
result = self.session.execute(stmt)
|
|
94
|
+
rows = result.scalars().all()
|
|
95
|
+
# If order_by was a Python callable, apply now
|
|
96
|
+
if order_by is not None and callable(order_by):
|
|
97
|
+
return _apply_python_filters(rows, order_by=order_by, reverse=reverse, offset=0, limit=None)
|
|
98
|
+
return rows
|
|
99
|
+
# Python-side filtering path
|
|
100
|
+
rows = self.session.execute(select(self.model)).scalars().all()
|
|
101
|
+
return _apply_python_filters(rows, where=where, order_by=order_by, reverse=reverse, offset=offset, limit=limit)
|
|
102
|
+
|
|
103
|
+
def count(self, *, where: Optional[Callable[[T], bool]] = None) -> int:
|
|
104
|
+
if where is None:
|
|
105
|
+
stmt = select(func.count()).select_from(self.model)
|
|
106
|
+
return self.session.execute(stmt).scalar_one()
|
|
107
|
+
# Python-side when `where` is a callable
|
|
108
|
+
rows = self.session.execute(select(self.model)).scalars().all()
|
|
109
|
+
return sum(1 for x in rows if where(x))
|
|
110
|
+
|
|
111
|
+
def exists(self, id: ID) -> bool:
|
|
112
|
+
return self.get(id) is not None
|
|
113
|
+
|
|
114
|
+
# --- Write ---
|
|
115
|
+
def add(self, entity: T) -> T:
|
|
116
|
+
if not _has_attr_id(entity):
|
|
117
|
+
raise ValueError("Entity must have an `id` attribute.")
|
|
118
|
+
if self.exists(getattr(entity, "id")):
|
|
119
|
+
raise ValueError(f"Entity with id={getattr(entity, 'id')!r} already exists")
|
|
120
|
+
self.session.add(entity)
|
|
121
|
+
self.session.flush()
|
|
122
|
+
if self.autocommit:
|
|
123
|
+
self.session.commit()
|
|
124
|
+
return entity
|
|
125
|
+
|
|
126
|
+
def add_many(self, entities: Iterable[T]) -> List[T]:
|
|
127
|
+
data = list(entities)
|
|
128
|
+
for e in data:
|
|
129
|
+
if not _has_attr_id(e):
|
|
130
|
+
raise ValueError("All entities must have an `id` attribute.")
|
|
131
|
+
# Basic duplicate check in memory (best-effort)
|
|
132
|
+
ids = [getattr(e, "id") for e in data]
|
|
133
|
+
if len(ids) != len(set(ids)):
|
|
134
|
+
raise ValueError("Duplicate IDs in input batch.")
|
|
135
|
+
self.session.add_all(data)
|
|
136
|
+
if self.autocommit:
|
|
137
|
+
self.session.commit()
|
|
138
|
+
return data
|
|
24
139
|
|
|
25
|
-
def
|
|
140
|
+
def update(self, entity: T) -> T:
|
|
141
|
+
if not _has_attr_id(entity):
|
|
142
|
+
raise ValueError("Entity must have an `id` attribute.")
|
|
143
|
+
id_value = getattr(entity, "id")
|
|
144
|
+
if not self.exists(id_value):
|
|
145
|
+
raise NotFoundException(f"Entity with id={id_value!r} not found")
|
|
146
|
+
merged = self.session.merge(entity)
|
|
147
|
+
if self.autocommit:
|
|
148
|
+
self.session.commit()
|
|
149
|
+
return merged
|
|
150
|
+
|
|
151
|
+
def delete(self, id: ID) -> bool:
|
|
152
|
+
obj = self.get(id)
|
|
153
|
+
if not obj:
|
|
154
|
+
return False
|
|
26
155
|
self.session.delete(obj)
|
|
156
|
+
if self.autocommit:
|
|
157
|
+
self.session.commit()
|
|
158
|
+
return True
|
|
159
|
+
|
|
160
|
+
def clear(self) -> None:
|
|
161
|
+
self.session.execute(sqla_delete(self.model))
|
|
162
|
+
if self.autocommit:
|
|
163
|
+
self.session.commit()
|
|
27
164
|
|
|
28
|
-
def list(self, *, limit: int | None = None, offset: int = 0) -> Iterable[T]:
|
|
29
|
-
q = self.session.query(self.model).offset(offset)
|
|
30
|
-
if limit is not None:
|
|
31
|
-
q = q.limit(limit)
|
|
32
|
-
return q.all()
|
|
33
165
|
|
|
166
|
+
# -----------------------------
|
|
167
|
+
# SQLAlchemy (async)
|
|
168
|
+
# -----------------------------
|
|
169
|
+
class SQLAlchemyAsyncRepository(AsyncRepository, ABC):
|
|
170
|
+
"""
|
|
171
|
+
Concrete AsyncRepository[T, ID] backed by SQLAlchemy AsyncSession.
|
|
172
|
+
Assumes entities have an `id` attribute and are mapped.
|
|
173
|
+
"""
|
|
34
174
|
|
|
35
|
-
|
|
36
|
-
def __init__(self, session: AsyncSession, model: Type[T]) -> None:
|
|
175
|
+
def __init__(self, session: AsyncSession, *, autocommit: bool = True):
|
|
37
176
|
self.session = session
|
|
38
|
-
self.
|
|
177
|
+
self.autocommit = autocommit
|
|
39
178
|
|
|
40
|
-
|
|
179
|
+
# --- Read ---
|
|
180
|
+
async def get(self, id: ID) -> Optional[T]:
|
|
41
181
|
return await self.session.get(self.model, id)
|
|
42
182
|
|
|
43
|
-
def
|
|
44
|
-
self.
|
|
183
|
+
async def get_or_raise(self, id: ID) -> T:
|
|
184
|
+
entity = await self.get(id)
|
|
185
|
+
if entity is None:
|
|
186
|
+
raise NotFoundException(f"Entity with id={id!r} not found")
|
|
187
|
+
return entity
|
|
188
|
+
|
|
189
|
+
async def list(
|
|
190
|
+
self,
|
|
191
|
+
*,
|
|
192
|
+
offset: int = 0,
|
|
193
|
+
limit: Optional[int] = None,
|
|
194
|
+
where: Optional[Callable[[T], bool]] = None,
|
|
195
|
+
order_by: Optional[Callable[[T], object]] = None,
|
|
196
|
+
reverse: bool = False,
|
|
197
|
+
) -> List[T]:
|
|
198
|
+
if where is None and (order_by is None or callable(order_by)):
|
|
199
|
+
stmt = select(self.model)
|
|
200
|
+
elif callable(where) or (order_by is not None and callable(order_by)):
|
|
201
|
+
stmt = select(self.model)
|
|
202
|
+
else:
|
|
203
|
+
stmt = select(self.model).where(where) # type: ignore[arg-type]
|
|
204
|
+
if order_by is not None:
|
|
205
|
+
stmt = stmt.order_by(sa_desc(order_by) if reverse else order_by) # type: ignore[arg-type]
|
|
206
|
+
if where is None and (order_by is None or not callable(order_by)):
|
|
207
|
+
if offset:
|
|
208
|
+
stmt = stmt.offset(offset)
|
|
209
|
+
if limit is not None:
|
|
210
|
+
stmt = stmt.limit(limit)
|
|
211
|
+
result = await self.session.execute(stmt)
|
|
212
|
+
rows = result.scalars().all()
|
|
213
|
+
if order_by is not None and callable(order_by):
|
|
214
|
+
return _apply_python_filters(rows, order_by=order_by, reverse=reverse, offset=0, limit=None)
|
|
215
|
+
return rows
|
|
216
|
+
result = await self.session.execute(select(self.model))
|
|
217
|
+
rows = result.scalars().all()
|
|
218
|
+
return _apply_python_filters(rows, where=where, order_by=order_by, reverse=reverse, offset=offset, limit=limit)
|
|
219
|
+
|
|
220
|
+
async def count(self, *, where: Optional[Callable[[T], bool]] = None) -> int:
|
|
221
|
+
if where is None:
|
|
222
|
+
stmt = select(func.count()).select_from(self.model)
|
|
223
|
+
result = await self.session.execute(stmt)
|
|
224
|
+
return result.scalar_one()
|
|
225
|
+
result = await self.session.execute(select(self.model))
|
|
226
|
+
rows = result.scalars().all()
|
|
227
|
+
return sum(1 for x in rows if where(x))
|
|
228
|
+
|
|
229
|
+
async def exists(self, id: ID) -> bool:
|
|
230
|
+
return (await self.get(id)) is not None
|
|
231
|
+
|
|
232
|
+
# --- Write ---
|
|
233
|
+
async def add(self, entity: T) -> T:
|
|
234
|
+
if not _has_attr_id(entity):
|
|
235
|
+
raise ValueError("Entity must have an `id` attribute.")
|
|
236
|
+
if await self.exists(getattr(entity, "id")):
|
|
237
|
+
raise ValueError(f"Entity with id={getattr(entity, 'id')!r} already exists")
|
|
238
|
+
self.session.add(entity)
|
|
239
|
+
if self.autocommit:
|
|
240
|
+
await self.session.commit()
|
|
241
|
+
return entity
|
|
242
|
+
|
|
243
|
+
async def add_many(self, entities: Iterable[T]) -> List[T]:
|
|
244
|
+
data = list(entities)
|
|
245
|
+
for e in data:
|
|
246
|
+
if not _has_attr_id(e):
|
|
247
|
+
raise ValueError("All entities must have an `id` attribute.")
|
|
248
|
+
ids = [getattr(e, "id") for e in data]
|
|
249
|
+
if len(ids) != len(set(ids)):
|
|
250
|
+
raise ValueError("Duplicate IDs in input batch.")
|
|
251
|
+
self.session.add_all(data)
|
|
252
|
+
if self.autocommit:
|
|
253
|
+
await self.session.commit()
|
|
254
|
+
return data
|
|
255
|
+
|
|
256
|
+
async def update(self, entity: T) -> T:
|
|
257
|
+
if not _has_attr_id(entity):
|
|
258
|
+
raise ValueError("Entity must have an `id` attribute.")
|
|
259
|
+
id_value = getattr(entity, "id")
|
|
260
|
+
if not await self.exists(id_value):
|
|
261
|
+
raise NotFoundException(f"Entity with id={id_value!r} not found")
|
|
262
|
+
merged = await self.session.merge(entity)
|
|
263
|
+
if self.autocommit:
|
|
264
|
+
await self.session.commit()
|
|
265
|
+
return merged
|
|
45
266
|
|
|
46
|
-
async def
|
|
267
|
+
async def delete(self, id: ID) -> bool:
|
|
268
|
+
obj = await self.get(id)
|
|
269
|
+
if not obj:
|
|
270
|
+
return False
|
|
47
271
|
await self.session.delete(obj)
|
|
272
|
+
if self.autocommit:
|
|
273
|
+
await self.session.commit()
|
|
274
|
+
return True
|
|
48
275
|
|
|
49
|
-
async def
|
|
50
|
-
|
|
51
|
-
if
|
|
52
|
-
|
|
53
|
-
res = await self.session.execute(stmt)
|
|
54
|
-
return list(res.scalars().all())
|
|
276
|
+
async def clear(self) -> None:
|
|
277
|
+
await self.session.execute(sqla_delete(self.model))
|
|
278
|
+
if self.autocommit:
|
|
279
|
+
await self.session.commit()
|
|
@@ -1,10 +1,13 @@
|
|
|
1
1
|
from __future__ import annotations
|
|
2
2
|
|
|
3
|
-
from typing import Optional
|
|
3
|
+
from typing import AsyncIterator, Iterator, Optional
|
|
4
4
|
|
|
5
5
|
from sqlalchemy.ext.asyncio import AsyncSession, async_sessionmaker
|
|
6
6
|
from sqlalchemy.orm import Session, sessionmaker
|
|
7
7
|
|
|
8
|
+
from nlbone.adapters.outbox.outbox_repo import SQLAlchemyAsyncOutboxRepository, SQLAlchemyOutboxRepository
|
|
9
|
+
from nlbone.core.domain.base import DomainEvent
|
|
10
|
+
from nlbone.core.ports.repository import AsyncRepository, Repository
|
|
8
11
|
from nlbone.core.ports.uow import AsyncUnitOfWork as AsyncUnitOfWorkPort
|
|
9
12
|
from nlbone.core.ports.uow import UnitOfWork
|
|
10
13
|
|
|
@@ -18,6 +21,7 @@ class SqlAlchemyUnitOfWork(UnitOfWork):
|
|
|
18
21
|
|
|
19
22
|
def __enter__(self) -> "SqlAlchemyUnitOfWork":
|
|
20
23
|
self.session = self._session_factory()
|
|
24
|
+
self.outbox_repo = SQLAlchemyOutboxRepository(self.session)
|
|
21
25
|
return self
|
|
22
26
|
|
|
23
27
|
def __exit__(self, exc_type, exc, tb) -> None:
|
|
@@ -34,11 +38,26 @@ class SqlAlchemyUnitOfWork(UnitOfWork):
|
|
|
34
38
|
def commit(self) -> None:
|
|
35
39
|
if self.session:
|
|
36
40
|
self.session.commit()
|
|
41
|
+
# if self.event_bus:
|
|
42
|
+
# for obj in self.session:
|
|
43
|
+
# events = getattr(obj, "events", None)
|
|
44
|
+
# if events:
|
|
45
|
+
# for evt in list(events):
|
|
46
|
+
# self.event_bus.publish(evt)
|
|
47
|
+
# obj.clear_events()
|
|
37
48
|
|
|
38
49
|
def rollback(self) -> None:
|
|
39
50
|
if self.session:
|
|
40
51
|
self.session.rollback()
|
|
41
52
|
|
|
53
|
+
def collect_new_events(self) -> Iterator[DomainEvent]:
|
|
54
|
+
for name, type_ in self.__annotations__.items():
|
|
55
|
+
if isinstance(type_, type) and issubclass(type_, Repository):
|
|
56
|
+
repo = getattr(self, name)
|
|
57
|
+
for entity in repo.seen:
|
|
58
|
+
for event in entity.events:
|
|
59
|
+
yield event
|
|
60
|
+
|
|
42
61
|
|
|
43
62
|
class AsyncSqlAlchemyUnitOfWork(AsyncUnitOfWorkPort):
|
|
44
63
|
"""Transactional boundary for async SQLAlchemy."""
|
|
@@ -49,6 +68,7 @@ class AsyncSqlAlchemyUnitOfWork(AsyncUnitOfWorkPort):
|
|
|
49
68
|
|
|
50
69
|
async def __aenter__(self) -> "AsyncSqlAlchemyUnitOfWork":
|
|
51
70
|
self.session = self._sf()
|
|
71
|
+
self.outbox_repo = SQLAlchemyAsyncOutboxRepository(self.session)
|
|
52
72
|
return self
|
|
53
73
|
|
|
54
74
|
async def __aexit__(self, exc_type, exc, tb) -> None:
|
|
@@ -65,7 +85,22 @@ class AsyncSqlAlchemyUnitOfWork(AsyncUnitOfWorkPort):
|
|
|
65
85
|
async def commit(self) -> None:
|
|
66
86
|
if self.session:
|
|
67
87
|
await self.session.commit()
|
|
88
|
+
if self.event_bus:
|
|
89
|
+
for obj in self.session:
|
|
90
|
+
events = getattr(obj, "events", None)
|
|
91
|
+
if events:
|
|
92
|
+
for evt in list(events):
|
|
93
|
+
self.event_bus.publish(evt)
|
|
94
|
+
obj.clear_events()
|
|
68
95
|
|
|
69
96
|
async def rollback(self) -> None:
|
|
70
97
|
if self.session:
|
|
71
98
|
await self.session.rollback()
|
|
99
|
+
|
|
100
|
+
async def collect_new_events(self) -> AsyncIterator[DomainEvent]:
|
|
101
|
+
for name, type_ in self.__annotations__.items():
|
|
102
|
+
if isinstance(type_, type) and issubclass(type_, AsyncRepository):
|
|
103
|
+
repo = getattr(self, name)
|
|
104
|
+
for entity in repo.seen:
|
|
105
|
+
for event in entity.events:
|
|
106
|
+
yield event
|
|
@@ -1 +1 @@
|
|
|
1
|
-
from .event_bus import
|
|
1
|
+
from .event_bus import InProcessEventBus
|
|
@@ -1,23 +1,103 @@
|
|
|
1
|
-
|
|
1
|
+
import asyncio
|
|
2
|
+
import json
|
|
3
|
+
import time
|
|
4
|
+
from typing import Callable, Dict, Iterable, List, Type
|
|
2
5
|
|
|
3
|
-
|
|
4
|
-
from typing import Callable, Dict, Iterable, List
|
|
6
|
+
import redis
|
|
5
7
|
|
|
6
8
|
from nlbone.core.domain.base import DomainEvent
|
|
7
|
-
from nlbone.core.
|
|
9
|
+
from nlbone.core.domain.models import Outbox
|
|
10
|
+
from nlbone.core.ports.event_bus import EventBus, EventHandler
|
|
8
11
|
|
|
9
12
|
|
|
10
|
-
class
|
|
13
|
+
class InProcessEventBus(EventBus):
|
|
11
14
|
def __init__(self) -> None:
|
|
12
|
-
self._handlers: Dict[
|
|
13
|
-
|
|
14
|
-
def
|
|
15
|
-
|
|
16
|
-
|
|
17
|
-
|
|
18
|
-
|
|
19
|
-
|
|
20
|
-
|
|
21
|
-
|
|
22
|
-
|
|
23
|
-
|
|
15
|
+
self._handlers: Dict[Type[DomainEvent], List[EventHandler]] = {}
|
|
16
|
+
|
|
17
|
+
def subscribe(self, event_type: Type[DomainEvent], handler: EventHandler) -> None:
|
|
18
|
+
self._handlers.setdefault(event_type, []).append(handler)
|
|
19
|
+
|
|
20
|
+
def publish(self, event: DomainEvent) -> None:
|
|
21
|
+
handlers = list(self._handlers.get(type(event), []))
|
|
22
|
+
loop = None
|
|
23
|
+
for h in handlers:
|
|
24
|
+
res = h(event)
|
|
25
|
+
if asyncio.iscoroutine(res):
|
|
26
|
+
loop = loop or asyncio.get_event_loop()
|
|
27
|
+
loop.create_task(res)
|
|
28
|
+
|
|
29
|
+
|
|
30
|
+
class OutboxDispatcher:
|
|
31
|
+
def __init__(self, session_factory, event_bus: EventBus, batch_size: int = 100):
|
|
32
|
+
self._sf = session_factory
|
|
33
|
+
self._bus = event_bus
|
|
34
|
+
self._batch = batch_size
|
|
35
|
+
|
|
36
|
+
def run_once(self) -> int:
|
|
37
|
+
sent = 0
|
|
38
|
+
with self._sf() as s: # type: Session
|
|
39
|
+
rows: Iterable[Outbox] = (
|
|
40
|
+
s.query(Outbox).filter_by(published=False).order_by(Outbox.occurred_at).limit(self._batch).all()
|
|
41
|
+
)
|
|
42
|
+
for r in rows:
|
|
43
|
+
self._bus.publish(type("OutboxEvent", (), r.payload))
|
|
44
|
+
r.published = True
|
|
45
|
+
sent += 1
|
|
46
|
+
s.commit()
|
|
47
|
+
return sent
|
|
48
|
+
|
|
49
|
+
|
|
50
|
+
class RedisStreamsEventBus(EventBus):
|
|
51
|
+
"""Topic = stream name. routing_key = event.type"""
|
|
52
|
+
|
|
53
|
+
def __init__(self, client: redis.Redis, stream: str = "nlb:domain:events"):
|
|
54
|
+
self.client = client
|
|
55
|
+
self.stream = stream
|
|
56
|
+
self._local_handlers: dict[type[DomainEvent], list[EventHandler]] = {}
|
|
57
|
+
|
|
58
|
+
def subscribe(self, event_type: type[DomainEvent], handler: EventHandler) -> None:
|
|
59
|
+
self._local_handlers.setdefault(event_type, []).append(handler)
|
|
60
|
+
|
|
61
|
+
def publish(self, event: DomainEvent) -> None:
|
|
62
|
+
self.client.xadd(
|
|
63
|
+
self.stream,
|
|
64
|
+
{
|
|
65
|
+
"type": event.type,
|
|
66
|
+
"payload": json.dumps(event.__dict__, default=str),
|
|
67
|
+
},
|
|
68
|
+
maxlen=10_000,
|
|
69
|
+
approximate=True,
|
|
70
|
+
)
|
|
71
|
+
# optional: local handlers in same process (choreography ترکیبی)
|
|
72
|
+
for h in self._local_handlers.get(type(event), []):
|
|
73
|
+
h(event)
|
|
74
|
+
|
|
75
|
+
|
|
76
|
+
class RedisStreamsConsumer:
|
|
77
|
+
def __init__(self, client: redis.Redis, stream: str, group: str, consumer: str, dlq: str | None = None):
|
|
78
|
+
self.client = client
|
|
79
|
+
self.stream = stream
|
|
80
|
+
self.group = group
|
|
81
|
+
self.consumer = consumer
|
|
82
|
+
self.dlq = dlq or f"{stream}:dlq"
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
self.client.xgroup_create(name=self.stream, groupname=self.group, id="$", mkstream=True)
|
|
86
|
+
except redis.ResponseError:
|
|
87
|
+
pass # group exists
|
|
88
|
+
|
|
89
|
+
def consume_forever(self, handler: Callable[[dict], None], block_ms: int = 2000, count: int = 32):
|
|
90
|
+
while True:
|
|
91
|
+
resp = self.client.xreadgroup(self.group, self.consumer, {self.stream: ">"}, count=count, block=block_ms)
|
|
92
|
+
if not resp:
|
|
93
|
+
continue
|
|
94
|
+
for _stream, messages in resp:
|
|
95
|
+
for msg_id, fields in messages:
|
|
96
|
+
try:
|
|
97
|
+
payload = json.loads(fields[b"payload"].decode())
|
|
98
|
+
handler(payload)
|
|
99
|
+
self.client.xack(self.stream, self.group, msg_id)
|
|
100
|
+
except Exception:
|
|
101
|
+
self.client.xack(self.stream, self.group, msg_id)
|
|
102
|
+
self.client.xadd(self.dlq, fields)
|
|
103
|
+
time.sleep(0.05)
|
|
@@ -0,0 +1,45 @@
|
|
|
1
|
+
import json
|
|
2
|
+
from typing import Mapping, Any, Optional
|
|
3
|
+
|
|
4
|
+
import aio_pika
|
|
5
|
+
from aio_pika import ExchangeType, Message
|
|
6
|
+
|
|
7
|
+
|
|
8
|
+
import json
|
|
9
|
+
from typing import Mapping, Any, Optional
|
|
10
|
+
import aio_pika
|
|
11
|
+
from aio_pika import ExchangeType, Message
|
|
12
|
+
from nlbone.core.ports.event_bus import EventBus
|
|
13
|
+
|
|
14
|
+
class RabbitMQEventBus(EventBus):
|
|
15
|
+
def __init__(self, amqp_url: str, declare_passive: bool = True, exchange_type: ExchangeType = ExchangeType.DIRECT):
|
|
16
|
+
self._amqp_url = amqp_url
|
|
17
|
+
self._declare_passive = declare_passive
|
|
18
|
+
self._exchange_type = exchange_type
|
|
19
|
+
self._connection: Optional[aio_pika.RobustConnection] = None
|
|
20
|
+
self._channel: Optional[aio_pika.Channel] = None
|
|
21
|
+
self._exchange_cache: dict[str, aio_pika.Exchange] = {}
|
|
22
|
+
|
|
23
|
+
async def _ensure_channel(self) -> aio_pika.Channel:
|
|
24
|
+
if not self._connection or self._connection.is_closed:
|
|
25
|
+
self._connection = await aio_pika.connect_robust(self._amqp_url)
|
|
26
|
+
if not self._channel or self._channel.is_closed:
|
|
27
|
+
self._channel = await self._connection.channel(publisher_confirms=True)
|
|
28
|
+
return self._channel
|
|
29
|
+
|
|
30
|
+
async def _get_exchange(self, name: str) -> aio_pika.Exchange:
|
|
31
|
+
if name in self._exchange_cache:
|
|
32
|
+
return self._exchange_cache[name]
|
|
33
|
+
ch = await self._ensure_channel()
|
|
34
|
+
if self._declare_passive:
|
|
35
|
+
ex = await ch.declare_exchange(name, self._exchange_type, durable=True, passive=True)
|
|
36
|
+
else:
|
|
37
|
+
ex = await ch.declare_exchange(name, self._exchange_type, durable=True, passive=False)
|
|
38
|
+
self._exchange_cache[name] = ex
|
|
39
|
+
return ex
|
|
40
|
+
|
|
41
|
+
async def publish(self, *, exchange: str, routing_key: str, payload: Mapping[str, Any]) -> None:
|
|
42
|
+
ex = await self._get_exchange(exchange)
|
|
43
|
+
body = json.dumps(payload, ensure_ascii=False).encode("utf-8")
|
|
44
|
+
msg = Message(body=body, content_type="application/json", delivery_mode=aio_pika.DeliveryMode.PERSISTENT)
|
|
45
|
+
await ex.publish(msg, routing_key=routing_key)
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
from .outbox_consumer import outbox_stream, outbox_stream_sync, process_batch, process_message, process_message_sync
|
|
@@ -0,0 +1,112 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import asyncio
|
|
4
|
+
import time
|
|
5
|
+
from contextlib import asynccontextmanager, contextmanager
|
|
6
|
+
from datetime import timedelta
|
|
7
|
+
from typing import AsyncIterator, Iterable, Iterator, Optional
|
|
8
|
+
|
|
9
|
+
from nlbone.adapters.outbox.outbox_repo import AsyncOutboxRepository, OutboxRepository
|
|
10
|
+
from nlbone.core.domain.models import Outbox
|
|
11
|
+
from nlbone.core.ports import UnitOfWork
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
async def outbox_stream(
|
|
15
|
+
repo: AsyncOutboxRepository,
|
|
16
|
+
*,
|
|
17
|
+
batch_size: int = 100,
|
|
18
|
+
idle_sleep: float = 1.0,
|
|
19
|
+
stop_event: Optional[asyncio.Event] = None,
|
|
20
|
+
) -> AsyncIterator[Outbox]:
|
|
21
|
+
"""
|
|
22
|
+
Yields Outbox one-by-one. If none available, waits (idle_sleep) and tries again.
|
|
23
|
+
Designed to run forever until stop_event is set.
|
|
24
|
+
"""
|
|
25
|
+
while True:
|
|
26
|
+
if stop_event and stop_event.is_set():
|
|
27
|
+
return
|
|
28
|
+
batch: list[Outbox] = await repo.claim_batch(limit=batch_size)
|
|
29
|
+
if not batch:
|
|
30
|
+
await asyncio.sleep(idle_sleep)
|
|
31
|
+
continue
|
|
32
|
+
for msg in batch:
|
|
33
|
+
yield msg
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
@asynccontextmanager
|
|
37
|
+
async def process_message(
|
|
38
|
+
repo: AsyncOutboxRepository,
|
|
39
|
+
msg: Outbox,
|
|
40
|
+
*,
|
|
41
|
+
backoff: timedelta = timedelta(seconds=30),
|
|
42
|
+
):
|
|
43
|
+
"""
|
|
44
|
+
Usage:
|
|
45
|
+
async with process_message(repo, msg):
|
|
46
|
+
... do work ...
|
|
47
|
+
On success -> mark_published
|
|
48
|
+
On exception -> mark_failed with backoff
|
|
49
|
+
"""
|
|
50
|
+
try:
|
|
51
|
+
yield msg
|
|
52
|
+
except Exception as e: # noqa: BLE001
|
|
53
|
+
await repo.mark_failed(msg.id, str(e), backoff=backoff)
|
|
54
|
+
raise
|
|
55
|
+
else:
|
|
56
|
+
await repo.mark_published([msg.id])
|
|
57
|
+
|
|
58
|
+
|
|
59
|
+
async def process_batch(
|
|
60
|
+
repo: AsyncOutboxRepository,
|
|
61
|
+
messages: Iterable[Outbox],
|
|
62
|
+
*,
|
|
63
|
+
backoff: timedelta = timedelta(seconds=30),
|
|
64
|
+
concurrency: int = 1,
|
|
65
|
+
handler=None,
|
|
66
|
+
):
|
|
67
|
+
"""
|
|
68
|
+
Optional helper: run a handler concurrently on a batch.
|
|
69
|
+
handler: async callable(msg) -> None/… ; ack/nack handled via context manager.
|
|
70
|
+
"""
|
|
71
|
+
sem = asyncio.Semaphore(concurrency)
|
|
72
|
+
|
|
73
|
+
async def _run(m: Outbox):
|
|
74
|
+
async with sem:
|
|
75
|
+
async with process_message(repo, m, backoff=backoff):
|
|
76
|
+
if handler:
|
|
77
|
+
await handler(m)
|
|
78
|
+
|
|
79
|
+
await asyncio.gather(*(_run(m) for m in messages))
|
|
80
|
+
|
|
81
|
+
|
|
82
|
+
def outbox_stream_sync(
|
|
83
|
+
repo: OutboxRepository,
|
|
84
|
+
*,
|
|
85
|
+
topics : list[str] = None,
|
|
86
|
+
batch_size: int = 100,
|
|
87
|
+
idle_sleep: float = 1.0,
|
|
88
|
+
stop_flag: Optional[callable] = None,
|
|
89
|
+
) -> Iterator[Outbox]:
|
|
90
|
+
while True:
|
|
91
|
+
if stop_flag and stop_flag():
|
|
92
|
+
return
|
|
93
|
+
batch = repo.claim_batch(limit=batch_size, topics=topics)
|
|
94
|
+
if not batch:
|
|
95
|
+
time.sleep(idle_sleep)
|
|
96
|
+
continue
|
|
97
|
+
for msg in batch:
|
|
98
|
+
yield msg
|
|
99
|
+
|
|
100
|
+
|
|
101
|
+
@contextmanager
|
|
102
|
+
def process_message_sync(uow: UnitOfWork, msg: Outbox, *, backoff: timedelta = timedelta(seconds=30)):
|
|
103
|
+
try:
|
|
104
|
+
yield msg
|
|
105
|
+
except Exception as e:
|
|
106
|
+
uow.rollback()
|
|
107
|
+
msg.mark_failed(str(e), backoff=backoff)
|
|
108
|
+
uow.commit()
|
|
109
|
+
raise
|
|
110
|
+
else:
|
|
111
|
+
msg.mark_published()
|
|
112
|
+
uow.commit()
|