aury-boot 0.0.37__py3-none-any.whl → 0.0.39__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- aury/boot/_version.py +2 -2
- aury/boot/application/migrations/setup.py +14 -2
- aury/boot/infrastructure/cache/backends.py +4 -422
- aury/boot/infrastructure/cache/base.py +38 -0
- aury/boot/infrastructure/cache/manager.py +151 -1
- aury/boot/infrastructure/cache/memory.py +306 -0
- aury/boot/infrastructure/cache/redis.py +259 -0
- aury/boot/infrastructure/mq/backends/redis.py +1 -1
- aury/boot/infrastructure/mq/backends/redis_stream.py +9 -2
- {aury_boot-0.0.37.dist-info → aury_boot-0.0.39.dist-info}/METADATA +1 -1
- {aury_boot-0.0.37.dist-info → aury_boot-0.0.39.dist-info}/RECORD +13 -11
- {aury_boot-0.0.37.dist-info → aury_boot-0.0.39.dist-info}/WHEEL +0 -0
- {aury_boot-0.0.37.dist-info → aury_boot-0.0.39.dist-info}/entry_points.txt +0 -0
aury/boot/_version.py
CHANGED
|
@@ -28,7 +28,7 @@ version_tuple: VERSION_TUPLE
|
|
|
28
28
|
commit_id: COMMIT_ID
|
|
29
29
|
__commit_id__: COMMIT_ID
|
|
30
30
|
|
|
31
|
-
__version__ = version = '0.0.
|
|
32
|
-
__version_tuple__ = version_tuple = (0, 0,
|
|
31
|
+
__version__ = version = '0.0.39'
|
|
32
|
+
__version_tuple__ = version_tuple = (0, 0, 39)
|
|
33
33
|
|
|
34
34
|
__commit_id__ = commit_id = None
|
|
@@ -102,6 +102,18 @@ load_all_models(_model_modules)
|
|
|
102
102
|
target_metadata = Base.metadata
|
|
103
103
|
|
|
104
104
|
|
|
105
|
+
# === 兼容性处理 ===
|
|
106
|
+
# 过滤 PostgreSQL 15+ 特有的约束参数,确保生成的 migration 兼容旧版本 PG
|
|
107
|
+
_PG15_CONSTRAINT_KWARGS = {{"postgresql_nulls_not_distinct", "postgresql_include"}}
|
|
108
|
+
|
|
109
|
+
def _render_item(type_, obj, autogen_context):
|
|
110
|
+
"""自定义渲染,过滤不兼容的约束参数。"""
|
|
111
|
+
if type_ == "unique_constraint" and hasattr(obj, "kwargs"):
|
|
112
|
+
for key in _PG15_CONSTRAINT_KWARGS:
|
|
113
|
+
obj.kwargs.pop(key, None)
|
|
114
|
+
return False # 使用默认渲染
|
|
115
|
+
|
|
116
|
+
|
|
105
117
|
def get_url() -> str:
|
|
106
118
|
"""获取数据库 URL(优先环境变量,其次 alembic.ini)。"""
|
|
107
119
|
return os.environ.get("DATABASE_URL") or config.get_main_option("sqlalchemy.url", "")
|
|
@@ -117,8 +129,8 @@ def run_migrations_offline() -> None:
|
|
|
117
129
|
dialect_opts={{"paramstyle": "named"}},
|
|
118
130
|
compare_type=True,
|
|
119
131
|
compare_server_default=True,
|
|
120
|
-
# 启用 batch 模式以更好地支持 SQLite 等不完整 DDL 的后端
|
|
121
132
|
render_as_batch=True,
|
|
133
|
+
render_item=_render_item,
|
|
122
134
|
)
|
|
123
135
|
with context.begin_transaction():
|
|
124
136
|
context.run_migrations()
|
|
@@ -131,8 +143,8 @@ def _do_run_migrations(connection) -> None:
|
|
|
131
143
|
target_metadata=target_metadata,
|
|
132
144
|
compare_type=True,
|
|
133
145
|
compare_server_default=True,
|
|
134
|
-
# 启用 batch 模式以更好地支持 SQLite 等不完整 DDL 的后端
|
|
135
146
|
render_as_batch=True,
|
|
147
|
+
render_item=_render_item,
|
|
136
148
|
)
|
|
137
149
|
with context.begin_transaction():
|
|
138
150
|
context.run_migrations()
|
|
@@ -1,428 +1,10 @@
|
|
|
1
|
-
"""
|
|
1
|
+
"""缓存后端实现(兼容层)。
|
|
2
2
|
|
|
3
|
-
|
|
3
|
+
实际实现在 redis.py 和 memory.py 中。
|
|
4
4
|
"""
|
|
5
5
|
|
|
6
|
-
from
|
|
7
|
-
|
|
8
|
-
import asyncio
|
|
9
|
-
from collections.abc import Callable
|
|
10
|
-
from datetime import timedelta
|
|
11
|
-
import json
|
|
12
|
-
import pickle
|
|
13
|
-
from typing import TYPE_CHECKING, Any
|
|
14
|
-
|
|
15
|
-
from redis.asyncio import Redis
|
|
16
|
-
|
|
17
|
-
from aury.boot.common.logging import logger
|
|
18
|
-
|
|
19
|
-
from .base import ICache
|
|
20
|
-
|
|
21
|
-
if TYPE_CHECKING:
|
|
22
|
-
from aury.boot.infrastructure.clients.redis import RedisClient
|
|
23
|
-
|
|
24
|
-
|
|
25
|
-
class RedisCache(ICache):
|
|
26
|
-
"""Redis缓存实现。
|
|
27
|
-
|
|
28
|
-
支持两种初始化方式:
|
|
29
|
-
1. 传入 URL 自行创建连接
|
|
30
|
-
2. 传入 RedisClient 实例(推荐)
|
|
31
|
-
"""
|
|
32
|
-
|
|
33
|
-
def __init__(
|
|
34
|
-
self,
|
|
35
|
-
url: str | None = None,
|
|
36
|
-
*,
|
|
37
|
-
redis_client: RedisClient | None = None,
|
|
38
|
-
serializer: str = "json",
|
|
39
|
-
):
|
|
40
|
-
"""初始化Redis缓存。
|
|
41
|
-
|
|
42
|
-
Args:
|
|
43
|
-
url: Redis连接URL
|
|
44
|
-
redis_client: RedisClient 实例(推荐)
|
|
45
|
-
serializer: 序列化方式(json/pickle)
|
|
46
|
-
"""
|
|
47
|
-
self._url = url
|
|
48
|
-
self._redis_client = redis_client
|
|
49
|
-
self._serializer = serializer
|
|
50
|
-
self._redis: Redis | None = None
|
|
51
|
-
self._owns_connection = False # 是否自己拥有连接(需要自己关闭)
|
|
52
|
-
|
|
53
|
-
async def initialize(self) -> None:
|
|
54
|
-
"""初始化连接。"""
|
|
55
|
-
# 优先使用 RedisClient
|
|
56
|
-
if self._redis_client is not None:
|
|
57
|
-
self._redis = self._redis_client.connection
|
|
58
|
-
self._owns_connection = False
|
|
59
|
-
logger.info("Redis缓存初始化成功(使用 RedisClient)")
|
|
60
|
-
return
|
|
61
|
-
|
|
62
|
-
# 使用 URL 创建连接
|
|
63
|
-
if self._url:
|
|
64
|
-
try:
|
|
65
|
-
self._redis = Redis.from_url(
|
|
66
|
-
self._url,
|
|
67
|
-
encoding="utf-8",
|
|
68
|
-
decode_responses=False,
|
|
69
|
-
socket_connect_timeout=5,
|
|
70
|
-
socket_timeout=5,
|
|
71
|
-
)
|
|
72
|
-
await self._redis.ping()
|
|
73
|
-
self._owns_connection = True
|
|
74
|
-
logger.info("Redis缓存初始化成功")
|
|
75
|
-
except Exception as exc:
|
|
76
|
-
logger.error(f"Redis连接失败: {exc}")
|
|
77
|
-
raise
|
|
78
|
-
else:
|
|
79
|
-
raise ValueError("Redis缓存需要提供 url 或 redis_client 参数")
|
|
80
|
-
|
|
81
|
-
async def get(self, key: str, default: Any = None) -> Any:
|
|
82
|
-
"""获取缓存。"""
|
|
83
|
-
if not self._redis:
|
|
84
|
-
return default
|
|
85
|
-
|
|
86
|
-
try:
|
|
87
|
-
data = await self._redis.get(key)
|
|
88
|
-
if data is None:
|
|
89
|
-
return default
|
|
90
|
-
|
|
91
|
-
# 使用函数式编程处理序列化器
|
|
92
|
-
deserializers: dict[str, Callable[[bytes], Any]] = {
|
|
93
|
-
"json": lambda d: json.loads(d.decode()),
|
|
94
|
-
"pickle": pickle.loads,
|
|
95
|
-
}
|
|
96
|
-
|
|
97
|
-
deserializer = deserializers.get(self._serializer)
|
|
98
|
-
if deserializer:
|
|
99
|
-
return deserializer(data)
|
|
100
|
-
return data.decode()
|
|
101
|
-
except Exception as exc:
|
|
102
|
-
logger.error(f"Redis获取失败: {key}, {exc}")
|
|
103
|
-
return default
|
|
104
|
-
|
|
105
|
-
async def set(
|
|
106
|
-
self,
|
|
107
|
-
key: str,
|
|
108
|
-
value: Any,
|
|
109
|
-
expire: int | timedelta | None = None,
|
|
110
|
-
) -> bool:
|
|
111
|
-
"""设置缓存。"""
|
|
112
|
-
if not self._redis:
|
|
113
|
-
return False
|
|
114
|
-
|
|
115
|
-
try:
|
|
116
|
-
# 使用函数式编程处理序列化器
|
|
117
|
-
serializers: dict[str, Callable[[Any], bytes]] = {
|
|
118
|
-
"json": lambda v: json.dumps(v).encode(),
|
|
119
|
-
"pickle": pickle.dumps,
|
|
120
|
-
}
|
|
121
|
-
|
|
122
|
-
serializer = serializers.get(self._serializer)
|
|
123
|
-
if serializer:
|
|
124
|
-
data = serializer(value)
|
|
125
|
-
else:
|
|
126
|
-
data = str(value).encode()
|
|
127
|
-
|
|
128
|
-
# 转换过期时间
|
|
129
|
-
if isinstance(expire, timedelta):
|
|
130
|
-
expire = int(expire.total_seconds())
|
|
131
|
-
|
|
132
|
-
await self._redis.set(key, data, ex=expire)
|
|
133
|
-
return True
|
|
134
|
-
except Exception as exc:
|
|
135
|
-
logger.error(f"Redis设置失败: {key}, {exc}")
|
|
136
|
-
return False
|
|
137
|
-
|
|
138
|
-
async def delete(self, *keys: str) -> int:
|
|
139
|
-
"""删除缓存。"""
|
|
140
|
-
if not self._redis or not keys:
|
|
141
|
-
return 0
|
|
142
|
-
|
|
143
|
-
try:
|
|
144
|
-
return await self._redis.delete(*keys)
|
|
145
|
-
except Exception as exc:
|
|
146
|
-
logger.error(f"Redis删除失败: {keys}, {exc}")
|
|
147
|
-
return 0
|
|
148
|
-
|
|
149
|
-
async def exists(self, *keys: str) -> int:
|
|
150
|
-
"""检查缓存是否存在。"""
|
|
151
|
-
if not self._redis or not keys:
|
|
152
|
-
return 0
|
|
153
|
-
|
|
154
|
-
try:
|
|
155
|
-
return await self._redis.exists(*keys)
|
|
156
|
-
except Exception as exc:
|
|
157
|
-
logger.error(f"Redis检查失败: {keys}, {exc}")
|
|
158
|
-
return 0
|
|
159
|
-
|
|
160
|
-
async def clear(self) -> None:
|
|
161
|
-
"""清空所有缓存。"""
|
|
162
|
-
if self._redis:
|
|
163
|
-
await self._redis.flushdb()
|
|
164
|
-
logger.info("Redis缓存已清空")
|
|
165
|
-
|
|
166
|
-
async def delete_pattern(self, pattern: str) -> int:
|
|
167
|
-
"""按模式删除缓存。
|
|
168
|
-
|
|
169
|
-
Args:
|
|
170
|
-
pattern: 通配符模式,如 "todo:*"
|
|
171
|
-
|
|
172
|
-
Returns:
|
|
173
|
-
int: 删除的键数量
|
|
174
|
-
"""
|
|
175
|
-
if not self._redis:
|
|
176
|
-
return 0
|
|
177
|
-
|
|
178
|
-
try:
|
|
179
|
-
# 使用 SCAN 遍历匹配的键(比 KEYS 更安全,不会阻塞)
|
|
180
|
-
count = 0
|
|
181
|
-
cursor = 0
|
|
182
|
-
while True:
|
|
183
|
-
cursor, keys = await self._redis.scan(cursor, match=pattern, count=100)
|
|
184
|
-
if keys:
|
|
185
|
-
count += await self._redis.delete(*keys)
|
|
186
|
-
if cursor == 0:
|
|
187
|
-
break
|
|
188
|
-
logger.debug(f"按模式删除缓存: {pattern}, 删除 {count} 个键")
|
|
189
|
-
return count
|
|
190
|
-
except Exception as exc:
|
|
191
|
-
logger.error(f"Redis模式删除失败: {pattern}, {exc}")
|
|
192
|
-
return 0
|
|
193
|
-
|
|
194
|
-
async def close(self) -> None:
|
|
195
|
-
"""关闭连接(仅当自己拥有连接时)。"""
|
|
196
|
-
if self._redis and self._owns_connection:
|
|
197
|
-
await self._redis.close()
|
|
198
|
-
logger.info("Redis连接已关闭")
|
|
199
|
-
self._redis = None
|
|
200
|
-
|
|
201
|
-
@property
|
|
202
|
-
def redis(self) -> Redis | None:
|
|
203
|
-
"""获取Redis客户端。"""
|
|
204
|
-
return self._redis
|
|
205
|
-
|
|
206
|
-
|
|
207
|
-
class MemoryCache(ICache):
|
|
208
|
-
"""内存缓存实现。"""
|
|
209
|
-
|
|
210
|
-
def __init__(self, max_size: int = 1000):
|
|
211
|
-
"""初始化内存缓存。
|
|
212
|
-
|
|
213
|
-
Args:
|
|
214
|
-
max_size: 最大缓存项数
|
|
215
|
-
"""
|
|
216
|
-
self._max_size = max_size
|
|
217
|
-
self._cache: dict[str, tuple[Any, float | None]] = {}
|
|
218
|
-
self._lock = asyncio.Lock()
|
|
219
|
-
|
|
220
|
-
async def get(self, key: str, default: Any = None) -> Any:
|
|
221
|
-
"""获取缓存。"""
|
|
222
|
-
async with self._lock:
|
|
223
|
-
if key not in self._cache:
|
|
224
|
-
return default
|
|
225
|
-
|
|
226
|
-
value, expire_at = self._cache[key]
|
|
227
|
-
|
|
228
|
-
# 检查过期
|
|
229
|
-
if expire_at is not None and asyncio.get_event_loop().time() > expire_at:
|
|
230
|
-
del self._cache[key]
|
|
231
|
-
return default
|
|
232
|
-
|
|
233
|
-
return value
|
|
234
|
-
|
|
235
|
-
async def set(
|
|
236
|
-
self,
|
|
237
|
-
key: str,
|
|
238
|
-
value: Any,
|
|
239
|
-
expire: int | timedelta | None = None,
|
|
240
|
-
) -> bool:
|
|
241
|
-
"""设置缓存。"""
|
|
242
|
-
async with self._lock:
|
|
243
|
-
# 转换过期时间
|
|
244
|
-
expire_at = None
|
|
245
|
-
if expire:
|
|
246
|
-
if isinstance(expire, timedelta):
|
|
247
|
-
expire_seconds = expire.total_seconds()
|
|
248
|
-
else:
|
|
249
|
-
expire_seconds = expire
|
|
250
|
-
expire_at = asyncio.get_event_loop().time() + expire_seconds
|
|
251
|
-
|
|
252
|
-
# 如果超出容量,删除最旧的
|
|
253
|
-
if len(self._cache) >= self._max_size and key not in self._cache:
|
|
254
|
-
# 简单策略:删除第一个
|
|
255
|
-
first_key = next(iter(self._cache))
|
|
256
|
-
del self._cache[first_key]
|
|
257
|
-
|
|
258
|
-
self._cache[key] = (value, expire_at)
|
|
259
|
-
return True
|
|
260
|
-
|
|
261
|
-
async def delete(self, *keys: str) -> int:
|
|
262
|
-
"""删除缓存。"""
|
|
263
|
-
async with self._lock:
|
|
264
|
-
count = 0
|
|
265
|
-
for key in keys:
|
|
266
|
-
if key in self._cache:
|
|
267
|
-
del self._cache[key]
|
|
268
|
-
count += 1
|
|
269
|
-
return count
|
|
270
|
-
|
|
271
|
-
async def exists(self, *keys: str) -> int:
|
|
272
|
-
"""检查缓存是否存在。"""
|
|
273
|
-
async with self._lock:
|
|
274
|
-
count = 0
|
|
275
|
-
for key in keys:
|
|
276
|
-
if key in self._cache:
|
|
277
|
-
_value, expire_at = self._cache[key]
|
|
278
|
-
# 检查是否过期
|
|
279
|
-
if expire_at is None or asyncio.get_event_loop().time() <= expire_at:
|
|
280
|
-
count += 1
|
|
281
|
-
return count
|
|
282
|
-
|
|
283
|
-
async def clear(self) -> None:
|
|
284
|
-
"""清空所有缓存。"""
|
|
285
|
-
async with self._lock:
|
|
286
|
-
self._cache.clear()
|
|
287
|
-
logger.info("内存缓存已清空")
|
|
288
|
-
|
|
289
|
-
async def delete_pattern(self, pattern: str) -> int:
|
|
290
|
-
"""按模式删除缓存。
|
|
291
|
-
|
|
292
|
-
Args:
|
|
293
|
-
pattern: 通配符模式,支持 * 和 ?
|
|
294
|
-
|
|
295
|
-
Returns:
|
|
296
|
-
int: 删除的键数量
|
|
297
|
-
"""
|
|
298
|
-
import fnmatch
|
|
299
|
-
|
|
300
|
-
async with self._lock:
|
|
301
|
-
keys_to_delete = [
|
|
302
|
-
key for key in self._cache
|
|
303
|
-
if fnmatch.fnmatch(key, pattern)
|
|
304
|
-
]
|
|
305
|
-
for key in keys_to_delete:
|
|
306
|
-
del self._cache[key]
|
|
307
|
-
logger.debug(f"按模式删除缓存: {pattern}, 删除 {len(keys_to_delete)} 个键")
|
|
308
|
-
return len(keys_to_delete)
|
|
309
|
-
|
|
310
|
-
async def close(self) -> None:
|
|
311
|
-
"""关闭连接(内存缓存无需关闭)。"""
|
|
312
|
-
await self.clear()
|
|
313
|
-
|
|
314
|
-
async def size(self) -> int:
|
|
315
|
-
"""获取缓存大小。"""
|
|
316
|
-
return len(self._cache)
|
|
317
|
-
|
|
318
|
-
|
|
319
|
-
class MemcachedCache(ICache):
|
|
320
|
-
"""Memcached缓存实现(可选)。"""
|
|
321
|
-
|
|
322
|
-
def __init__(self, servers: list[str]):
|
|
323
|
-
"""初始化Memcached缓存。
|
|
324
|
-
|
|
325
|
-
Args:
|
|
326
|
-
servers: Memcached服务器列表,如 ["127.0.0.1:11211"]
|
|
327
|
-
"""
|
|
328
|
-
self._servers = servers
|
|
329
|
-
self._client = None
|
|
330
|
-
|
|
331
|
-
async def initialize(self) -> None:
|
|
332
|
-
"""初始化连接。"""
|
|
333
|
-
try:
|
|
334
|
-
# 需要安装 python-memcached 或 aiomcache
|
|
335
|
-
try:
|
|
336
|
-
import aiomcache
|
|
337
|
-
self._client = aiomcache.Client(
|
|
338
|
-
self._servers[0].split(":")[0],
|
|
339
|
-
int(self._servers[0].split(":")[1]) if ":" in self._servers[0] else 11211,
|
|
340
|
-
)
|
|
341
|
-
logger.info("Memcached缓存初始化成功")
|
|
342
|
-
except ImportError:
|
|
343
|
-
logger.error("请安装 aiomcache: pip install aiomcache")
|
|
344
|
-
raise
|
|
345
|
-
except Exception as exc:
|
|
346
|
-
logger.error(f"Memcached连接失败: {exc}")
|
|
347
|
-
raise
|
|
348
|
-
|
|
349
|
-
async def get(self, key: str, default: Any = None) -> Any:
|
|
350
|
-
"""获取缓存。"""
|
|
351
|
-
if not self._client:
|
|
352
|
-
return default
|
|
353
|
-
|
|
354
|
-
try:
|
|
355
|
-
data = await self._client.get(key.encode())
|
|
356
|
-
if data is None:
|
|
357
|
-
return default
|
|
358
|
-
return json.loads(data.decode())
|
|
359
|
-
except Exception as exc:
|
|
360
|
-
logger.error(f"Memcached获取失败: {key}, {exc}")
|
|
361
|
-
return default
|
|
362
|
-
|
|
363
|
-
async def set(
|
|
364
|
-
self,
|
|
365
|
-
key: str,
|
|
366
|
-
value: Any,
|
|
367
|
-
expire: int | timedelta | None = None,
|
|
368
|
-
) -> bool:
|
|
369
|
-
"""设置缓存。"""
|
|
370
|
-
if not self._client:
|
|
371
|
-
return False
|
|
372
|
-
|
|
373
|
-
try:
|
|
374
|
-
if isinstance(expire, timedelta):
|
|
375
|
-
expire = int(expire.total_seconds())
|
|
376
|
-
|
|
377
|
-
data = json.dumps(value).encode()
|
|
378
|
-
return await self._client.set(key.encode(), data, exptime=expire or 0)
|
|
379
|
-
except Exception as exc:
|
|
380
|
-
logger.error(f"Memcached设置失败: {key}, {exc}")
|
|
381
|
-
return False
|
|
382
|
-
|
|
383
|
-
async def delete(self, *keys: str) -> int:
|
|
384
|
-
"""删除缓存。"""
|
|
385
|
-
if not self._client or not keys:
|
|
386
|
-
return 0
|
|
387
|
-
|
|
388
|
-
count = 0
|
|
389
|
-
for key in keys:
|
|
390
|
-
try:
|
|
391
|
-
if await self._client.delete(key.encode()):
|
|
392
|
-
count += 1
|
|
393
|
-
except Exception as exc:
|
|
394
|
-
logger.error(f"Memcached删除失败: {key}, {exc}")
|
|
395
|
-
return count
|
|
396
|
-
|
|
397
|
-
async def exists(self, *keys: str) -> int:
|
|
398
|
-
"""检查缓存是否存在。"""
|
|
399
|
-
if not self._client or not keys:
|
|
400
|
-
return 0
|
|
401
|
-
|
|
402
|
-
count = 0
|
|
403
|
-
for key in keys:
|
|
404
|
-
try:
|
|
405
|
-
if await self._client.get(key.encode()) is not None:
|
|
406
|
-
count += 1
|
|
407
|
-
except Exception:
|
|
408
|
-
pass
|
|
409
|
-
return count
|
|
410
|
-
|
|
411
|
-
async def clear(self) -> None:
|
|
412
|
-
"""清空所有缓存(Memcached不支持)。"""
|
|
413
|
-
logger.warning("Memcached不支持清空所有缓存")
|
|
414
|
-
|
|
415
|
-
async def delete_pattern(self, pattern: str) -> int:
|
|
416
|
-
"""按模式删除缓存(Memcached 不支持)。"""
|
|
417
|
-
logger.warning("Memcached 不支持模式删除,请使用 Redis 或 Memory 后端")
|
|
418
|
-
return 0
|
|
419
|
-
|
|
420
|
-
async def close(self) -> None:
|
|
421
|
-
"""关闭连接。"""
|
|
422
|
-
if self._client:
|
|
423
|
-
self._client.close()
|
|
424
|
-
logger.info("Memcached连接已关闭")
|
|
425
|
-
|
|
6
|
+
from .memory import MemcachedCache, MemoryCache
|
|
7
|
+
from .redis import RedisCache
|
|
426
8
|
|
|
427
9
|
__all__ = [
|
|
428
10
|
"MemcachedCache",
|
|
@@ -71,6 +71,44 @@ class ICache(ABC):
|
|
|
71
71
|
async def close(self) -> None:
|
|
72
72
|
"""关闭连接。"""
|
|
73
73
|
pass
|
|
74
|
+
|
|
75
|
+
# ==================== 分布式锁 ====================
|
|
76
|
+
|
|
77
|
+
@abstractmethod
|
|
78
|
+
async def acquire_lock(
|
|
79
|
+
self,
|
|
80
|
+
key: str,
|
|
81
|
+
token: str,
|
|
82
|
+
timeout: int,
|
|
83
|
+
blocking: bool,
|
|
84
|
+
blocking_timeout: float | None,
|
|
85
|
+
) -> bool:
|
|
86
|
+
"""获取分布式锁。
|
|
87
|
+
|
|
88
|
+
Args:
|
|
89
|
+
key: 锁的键名(已加 lock: 前缀)
|
|
90
|
+
token: 锁的 token
|
|
91
|
+
timeout: 锁的超时时间(秒)
|
|
92
|
+
blocking: 是否阻塞等待
|
|
93
|
+
blocking_timeout: 阻塞等待的最大时间(秒)
|
|
94
|
+
|
|
95
|
+
Returns:
|
|
96
|
+
bool: 是否获取成功
|
|
97
|
+
"""
|
|
98
|
+
pass
|
|
99
|
+
|
|
100
|
+
@abstractmethod
|
|
101
|
+
async def release_lock(self, key: str, token: str) -> bool:
|
|
102
|
+
"""释放分布式锁。
|
|
103
|
+
|
|
104
|
+
Args:
|
|
105
|
+
key: 锁的键名(已加 lock: 前缀)
|
|
106
|
+
token: 获取锁时的 token
|
|
107
|
+
|
|
108
|
+
Returns:
|
|
109
|
+
bool: 是否成功释放
|
|
110
|
+
"""
|
|
111
|
+
pass
|
|
74
112
|
|
|
75
113
|
|
|
76
114
|
__all__ = [
|
|
@@ -5,11 +5,15 @@
|
|
|
5
5
|
|
|
6
6
|
from __future__ import annotations
|
|
7
7
|
|
|
8
|
+
import asyncio
|
|
8
9
|
from collections.abc import Callable
|
|
10
|
+
from contextlib import asynccontextmanager
|
|
9
11
|
from datetime import timedelta
|
|
10
12
|
from functools import wraps
|
|
11
13
|
import hashlib
|
|
12
|
-
|
|
14
|
+
import time
|
|
15
|
+
from typing import Any, AsyncIterator, TypeVar
|
|
16
|
+
import uuid
|
|
13
17
|
|
|
14
18
|
from aury.boot.common.logging import logger
|
|
15
19
|
|
|
@@ -328,6 +332,152 @@ class CacheManager:
|
|
|
328
332
|
self._backend = None
|
|
329
333
|
logger.info("缓存管理器已清理")
|
|
330
334
|
|
|
335
|
+
# ==================== 分布式锁 ====================
|
|
336
|
+
|
|
337
|
+
async def acquire_lock(
|
|
338
|
+
self,
|
|
339
|
+
key: str,
|
|
340
|
+
*,
|
|
341
|
+
timeout: int = 30,
|
|
342
|
+
blocking: bool = True,
|
|
343
|
+
blocking_timeout: float | None = None,
|
|
344
|
+
) -> str | None:
|
|
345
|
+
"""获取分布式锁。
|
|
346
|
+
|
|
347
|
+
Args:
|
|
348
|
+
key: 锁的键名
|
|
349
|
+
timeout: 锁的超时时间(秒),防止死锁
|
|
350
|
+
blocking: 是否阻塞等待
|
|
351
|
+
blocking_timeout: 阻塞等待的最大时间(秒)
|
|
352
|
+
|
|
353
|
+
Returns:
|
|
354
|
+
str | None: 锁的 token(用于释放),获取失败返回 None
|
|
355
|
+
"""
|
|
356
|
+
lock_key = f"lock:{key}"
|
|
357
|
+
token = str(uuid.uuid4())
|
|
358
|
+
|
|
359
|
+
acquired = await self.backend.acquire_lock(
|
|
360
|
+
lock_key, token, timeout, blocking, blocking_timeout
|
|
361
|
+
)
|
|
362
|
+
return token if acquired else None
|
|
363
|
+
|
|
364
|
+
async def release_lock(self, key: str, token: str) -> bool:
|
|
365
|
+
"""释放分布式锁。
|
|
366
|
+
|
|
367
|
+
Args:
|
|
368
|
+
key: 锁的键名
|
|
369
|
+
token: acquire_lock 返回的 token
|
|
370
|
+
|
|
371
|
+
Returns:
|
|
372
|
+
bool: 是否成功释放
|
|
373
|
+
"""
|
|
374
|
+
lock_key = f"lock:{key}"
|
|
375
|
+
return await self.backend.release_lock(lock_key, token)
|
|
376
|
+
|
|
377
|
+
@asynccontextmanager
|
|
378
|
+
async def lock(
|
|
379
|
+
self,
|
|
380
|
+
key: str,
|
|
381
|
+
*,
|
|
382
|
+
timeout: int = 30,
|
|
383
|
+
blocking: bool = True,
|
|
384
|
+
blocking_timeout: float | None = None,
|
|
385
|
+
) -> AsyncIterator[bool]:
|
|
386
|
+
"""分布式锁上下文管理器。
|
|
387
|
+
|
|
388
|
+
Args:
|
|
389
|
+
key: 锁的键名
|
|
390
|
+
timeout: 锁的超时时间(秒)
|
|
391
|
+
blocking: 是否阻塞等待
|
|
392
|
+
blocking_timeout: 阻塞等待的最大时间(秒)
|
|
393
|
+
|
|
394
|
+
Yields:
|
|
395
|
+
bool: 是否成功获取锁
|
|
396
|
+
|
|
397
|
+
示例:
|
|
398
|
+
async with cache.lock("my_resource") as acquired:
|
|
399
|
+
if acquired:
|
|
400
|
+
# 执行需要互斥的操作
|
|
401
|
+
pass
|
|
402
|
+
"""
|
|
403
|
+
token = await self.acquire_lock(
|
|
404
|
+
key,
|
|
405
|
+
timeout=timeout,
|
|
406
|
+
blocking=blocking,
|
|
407
|
+
blocking_timeout=blocking_timeout,
|
|
408
|
+
)
|
|
409
|
+
try:
|
|
410
|
+
yield token is not None
|
|
411
|
+
finally:
|
|
412
|
+
if token:
|
|
413
|
+
await self.release_lock(key, token)
|
|
414
|
+
|
|
415
|
+
@asynccontextmanager
|
|
416
|
+
async def semaphore(
|
|
417
|
+
self,
|
|
418
|
+
key: str,
|
|
419
|
+
max_concurrency: int,
|
|
420
|
+
*,
|
|
421
|
+
timeout: int = 300,
|
|
422
|
+
blocking: bool = True,
|
|
423
|
+
blocking_timeout: float | None = None,
|
|
424
|
+
) -> AsyncIterator[bool]:
|
|
425
|
+
"""分布式信号量(限制并发数)。
|
|
426
|
+
|
|
427
|
+
Args:
|
|
428
|
+
key: 信号量的键名
|
|
429
|
+
max_concurrency: 最大并发数
|
|
430
|
+
timeout: 单个槽位的超时时间(秒)
|
|
431
|
+
blocking: 是否阻塞等待
|
|
432
|
+
blocking_timeout: 阻塞等待的最大时间(秒)
|
|
433
|
+
|
|
434
|
+
Yields:
|
|
435
|
+
bool: 是否成功获取槽位
|
|
436
|
+
|
|
437
|
+
示例:
|
|
438
|
+
async with cache.semaphore("pdf_ocr", max_concurrency=2) as acquired:
|
|
439
|
+
if acquired:
|
|
440
|
+
# 执行受并发限制的操作
|
|
441
|
+
pass
|
|
442
|
+
"""
|
|
443
|
+
slot_token: str | None = None
|
|
444
|
+
acquired_slot: int | None = None
|
|
445
|
+
start_time = time.monotonic()
|
|
446
|
+
|
|
447
|
+
try:
|
|
448
|
+
while True:
|
|
449
|
+
# 尝试获取任意一个槽位
|
|
450
|
+
for slot in range(max_concurrency):
|
|
451
|
+
slot_key = f"{key}:slot:{slot}"
|
|
452
|
+
token = await self.acquire_lock(
|
|
453
|
+
slot_key,
|
|
454
|
+
timeout=timeout,
|
|
455
|
+
blocking=False,
|
|
456
|
+
)
|
|
457
|
+
if token:
|
|
458
|
+
slot_token = token
|
|
459
|
+
acquired_slot = slot
|
|
460
|
+
yield True
|
|
461
|
+
return
|
|
462
|
+
|
|
463
|
+
if not blocking:
|
|
464
|
+
yield False
|
|
465
|
+
return
|
|
466
|
+
|
|
467
|
+
# 检查是否超时
|
|
468
|
+
if blocking_timeout is not None:
|
|
469
|
+
elapsed = time.monotonic() - start_time
|
|
470
|
+
if elapsed >= blocking_timeout:
|
|
471
|
+
yield False
|
|
472
|
+
return
|
|
473
|
+
|
|
474
|
+
# 等待后重试
|
|
475
|
+
await asyncio.sleep(0.1)
|
|
476
|
+
finally:
|
|
477
|
+
if slot_token and acquired_slot is not None:
|
|
478
|
+
slot_key = f"{key}:slot:{acquired_slot}"
|
|
479
|
+
await self.release_lock(slot_key, slot_token)
|
|
480
|
+
|
|
331
481
|
def __repr__(self) -> str:
|
|
332
482
|
"""字符串表示。"""
|
|
333
483
|
backend_name = self.backend_type if self._backend else "未初始化"
|
|
@@ -0,0 +1,306 @@
|
|
|
1
|
+
"""内存和 Memcached 缓存后端实现。"""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import fnmatch
|
|
7
|
+
import json
|
|
8
|
+
import time
|
|
9
|
+
from datetime import timedelta
|
|
10
|
+
from typing import Any
|
|
11
|
+
|
|
12
|
+
from aury.boot.common.logging import logger
|
|
13
|
+
|
|
14
|
+
from .base import ICache
|
|
15
|
+
|
|
16
|
+
|
|
17
|
+
class MemoryCache(ICache):
|
|
18
|
+
"""内存缓存实现。"""
|
|
19
|
+
|
|
20
|
+
def __init__(self, max_size: int = 1000):
|
|
21
|
+
"""初始化内存缓存。
|
|
22
|
+
|
|
23
|
+
Args:
|
|
24
|
+
max_size: 最大缓存项数
|
|
25
|
+
"""
|
|
26
|
+
self._max_size = max_size
|
|
27
|
+
self._cache: dict[str, tuple[Any, float | None]] = {}
|
|
28
|
+
self._lock = asyncio.Lock()
|
|
29
|
+
|
|
30
|
+
async def get(self, key: str, default: Any = None) -> Any:
|
|
31
|
+
"""获取缓存。"""
|
|
32
|
+
async with self._lock:
|
|
33
|
+
if key not in self._cache:
|
|
34
|
+
return default
|
|
35
|
+
|
|
36
|
+
value, expire_at = self._cache[key]
|
|
37
|
+
|
|
38
|
+
# 检查过期
|
|
39
|
+
if expire_at is not None and asyncio.get_event_loop().time() > expire_at:
|
|
40
|
+
del self._cache[key]
|
|
41
|
+
return default
|
|
42
|
+
|
|
43
|
+
return value
|
|
44
|
+
|
|
45
|
+
async def set(
|
|
46
|
+
self,
|
|
47
|
+
key: str,
|
|
48
|
+
value: Any,
|
|
49
|
+
expire: int | timedelta | None = None,
|
|
50
|
+
) -> bool:
|
|
51
|
+
"""设置缓存。"""
|
|
52
|
+
async with self._lock:
|
|
53
|
+
# 转换过期时间
|
|
54
|
+
expire_at = None
|
|
55
|
+
if expire:
|
|
56
|
+
if isinstance(expire, timedelta):
|
|
57
|
+
expire_seconds = expire.total_seconds()
|
|
58
|
+
else:
|
|
59
|
+
expire_seconds = expire
|
|
60
|
+
expire_at = asyncio.get_event_loop().time() + expire_seconds
|
|
61
|
+
|
|
62
|
+
# 如果超出容量,删除最旧的
|
|
63
|
+
if len(self._cache) >= self._max_size and key not in self._cache:
|
|
64
|
+
# 简单策略:删除第一个
|
|
65
|
+
first_key = next(iter(self._cache))
|
|
66
|
+
del self._cache[first_key]
|
|
67
|
+
|
|
68
|
+
self._cache[key] = (value, expire_at)
|
|
69
|
+
return True
|
|
70
|
+
|
|
71
|
+
async def delete(self, *keys: str) -> int:
|
|
72
|
+
"""删除缓存。"""
|
|
73
|
+
async with self._lock:
|
|
74
|
+
count = 0
|
|
75
|
+
for key in keys:
|
|
76
|
+
if key in self._cache:
|
|
77
|
+
del self._cache[key]
|
|
78
|
+
count += 1
|
|
79
|
+
return count
|
|
80
|
+
|
|
81
|
+
async def exists(self, *keys: str) -> int:
|
|
82
|
+
"""检查缓存是否存在。"""
|
|
83
|
+
async with self._lock:
|
|
84
|
+
count = 0
|
|
85
|
+
for key in keys:
|
|
86
|
+
if key in self._cache:
|
|
87
|
+
_value, expire_at = self._cache[key]
|
|
88
|
+
# 检查是否过期
|
|
89
|
+
if expire_at is None or asyncio.get_event_loop().time() <= expire_at:
|
|
90
|
+
count += 1
|
|
91
|
+
return count
|
|
92
|
+
|
|
93
|
+
async def clear(self) -> None:
|
|
94
|
+
"""清空所有缓存。"""
|
|
95
|
+
async with self._lock:
|
|
96
|
+
self._cache.clear()
|
|
97
|
+
logger.info("内存缓存已清空")
|
|
98
|
+
|
|
99
|
+
async def delete_pattern(self, pattern: str) -> int:
|
|
100
|
+
"""按模式删除缓存。
|
|
101
|
+
|
|
102
|
+
Args:
|
|
103
|
+
pattern: 通配符模式,支持 * 和 ?
|
|
104
|
+
|
|
105
|
+
Returns:
|
|
106
|
+
int: 删除的键数量
|
|
107
|
+
"""
|
|
108
|
+
async with self._lock:
|
|
109
|
+
keys_to_delete = [
|
|
110
|
+
key for key in self._cache
|
|
111
|
+
if fnmatch.fnmatch(key, pattern)
|
|
112
|
+
]
|
|
113
|
+
for key in keys_to_delete:
|
|
114
|
+
del self._cache[key]
|
|
115
|
+
logger.debug(f"按模式删除缓存: {pattern}, 删除 {len(keys_to_delete)} 个键")
|
|
116
|
+
return len(keys_to_delete)
|
|
117
|
+
|
|
118
|
+
async def close(self) -> None:
|
|
119
|
+
"""关闭连接(内存缓存无需关闭)。"""
|
|
120
|
+
await self.clear()
|
|
121
|
+
|
|
122
|
+
async def size(self) -> int:
|
|
123
|
+
"""获取缓存大小。"""
|
|
124
|
+
return len(self._cache)
|
|
125
|
+
|
|
126
|
+
# ==================== 内存锁 ====================
|
|
127
|
+
|
|
128
|
+
async def acquire_lock(
|
|
129
|
+
self,
|
|
130
|
+
key: str,
|
|
131
|
+
token: str,
|
|
132
|
+
timeout: int,
|
|
133
|
+
blocking: bool,
|
|
134
|
+
blocking_timeout: float | None,
|
|
135
|
+
) -> bool:
|
|
136
|
+
"""获取内存锁(单进程)。"""
|
|
137
|
+
start_time = time.monotonic()
|
|
138
|
+
|
|
139
|
+
while True:
|
|
140
|
+
async with self._lock:
|
|
141
|
+
# 检查锁是否存在
|
|
142
|
+
if key not in self._cache:
|
|
143
|
+
# 设置锁
|
|
144
|
+
expire_at = asyncio.get_event_loop().time() + timeout
|
|
145
|
+
self._cache[key] = (token, expire_at)
|
|
146
|
+
return True
|
|
147
|
+
|
|
148
|
+
# 检查锁是否过期
|
|
149
|
+
existing_token, expire_at = self._cache[key]
|
|
150
|
+
if expire_at is not None and asyncio.get_event_loop().time() > expire_at:
|
|
151
|
+
# 锁已过期,重新获取
|
|
152
|
+
new_expire_at = asyncio.get_event_loop().time() + timeout
|
|
153
|
+
self._cache[key] = (token, new_expire_at)
|
|
154
|
+
return True
|
|
155
|
+
|
|
156
|
+
if not blocking:
|
|
157
|
+
return False
|
|
158
|
+
|
|
159
|
+
# 检查是否超时
|
|
160
|
+
if blocking_timeout is not None:
|
|
161
|
+
elapsed = time.monotonic() - start_time
|
|
162
|
+
if elapsed >= blocking_timeout:
|
|
163
|
+
return False
|
|
164
|
+
|
|
165
|
+
# 短暂等待后重试
|
|
166
|
+
await asyncio.sleep(0.05)
|
|
167
|
+
|
|
168
|
+
async def release_lock(self, key: str, token: str) -> bool:
|
|
169
|
+
"""释放内存锁。"""
|
|
170
|
+
async with self._lock:
|
|
171
|
+
if key not in self._cache:
|
|
172
|
+
return False
|
|
173
|
+
|
|
174
|
+
existing_token, _ = self._cache[key]
|
|
175
|
+
if existing_token == token:
|
|
176
|
+
del self._cache[key]
|
|
177
|
+
return True
|
|
178
|
+
return False
|
|
179
|
+
|
|
180
|
+
|
|
181
|
+
class MemcachedCache(ICache):
|
|
182
|
+
"""Memcached缓存实现(可选)。"""
|
|
183
|
+
|
|
184
|
+
def __init__(self, servers: list[str]):
|
|
185
|
+
"""初始化Memcached缓存。
|
|
186
|
+
|
|
187
|
+
Args:
|
|
188
|
+
servers: Memcached服务器列表,如 ["*********:11211"]
|
|
189
|
+
"""
|
|
190
|
+
self._servers = servers
|
|
191
|
+
self._client = None
|
|
192
|
+
|
|
193
|
+
async def initialize(self) -> None:
|
|
194
|
+
"""初始化连接。"""
|
|
195
|
+
try:
|
|
196
|
+
# 需要安装 python-memcached 或 aiomcache
|
|
197
|
+
try:
|
|
198
|
+
import aiomcache
|
|
199
|
+
self._client = aiomcache.Client(
|
|
200
|
+
self._servers[0].split(":")[0],
|
|
201
|
+
int(self._servers[0].split(":")[1]) if ":" in self._servers[0] else 11211,
|
|
202
|
+
)
|
|
203
|
+
logger.info("Memcached缓存初始化成功")
|
|
204
|
+
except ImportError:
|
|
205
|
+
logger.error("请安装 aiomcache: pip install aiomcache")
|
|
206
|
+
raise
|
|
207
|
+
except Exception as exc:
|
|
208
|
+
logger.error(f"Memcached连接失败: {exc}")
|
|
209
|
+
raise
|
|
210
|
+
|
|
211
|
+
async def get(self, key: str, default: Any = None) -> Any:
|
|
212
|
+
"""获取缓存。"""
|
|
213
|
+
if not self._client:
|
|
214
|
+
return default
|
|
215
|
+
|
|
216
|
+
try:
|
|
217
|
+
data = await self._client.get(key.encode())
|
|
218
|
+
if data is None:
|
|
219
|
+
return default
|
|
220
|
+
return json.loads(data.decode())
|
|
221
|
+
except Exception as exc:
|
|
222
|
+
logger.error(f"Memcached获取失败: {key}, {exc}")
|
|
223
|
+
return default
|
|
224
|
+
|
|
225
|
+
async def set(
|
|
226
|
+
self,
|
|
227
|
+
key: str,
|
|
228
|
+
value: Any,
|
|
229
|
+
expire: int | timedelta | None = None,
|
|
230
|
+
) -> bool:
|
|
231
|
+
"""设置缓存。"""
|
|
232
|
+
if not self._client:
|
|
233
|
+
return False
|
|
234
|
+
|
|
235
|
+
try:
|
|
236
|
+
if isinstance(expire, timedelta):
|
|
237
|
+
expire = int(expire.total_seconds())
|
|
238
|
+
|
|
239
|
+
data = json.dumps(value).encode()
|
|
240
|
+
return await self._client.set(key.encode(), data, exptime=expire or 0)
|
|
241
|
+
except Exception as exc:
|
|
242
|
+
logger.error(f"Memcached设置失败: {key}, {exc}")
|
|
243
|
+
return False
|
|
244
|
+
|
|
245
|
+
async def delete(self, *keys: str) -> int:
|
|
246
|
+
"""删除缓存。"""
|
|
247
|
+
if not self._client or not keys:
|
|
248
|
+
return 0
|
|
249
|
+
|
|
250
|
+
count = 0
|
|
251
|
+
for key in keys:
|
|
252
|
+
try:
|
|
253
|
+
if await self._client.delete(key.encode()):
|
|
254
|
+
count += 1
|
|
255
|
+
except Exception as exc:
|
|
256
|
+
logger.error(f"Memcached删除失败: {key}, {exc}")
|
|
257
|
+
return count
|
|
258
|
+
|
|
259
|
+
async def exists(self, *keys: str) -> int:
|
|
260
|
+
"""检查缓存是否存在。"""
|
|
261
|
+
if not self._client or not keys:
|
|
262
|
+
return 0
|
|
263
|
+
|
|
264
|
+
count = 0
|
|
265
|
+
for key in keys:
|
|
266
|
+
try:
|
|
267
|
+
if await self._client.get(key.encode()) is not None:
|
|
268
|
+
count += 1
|
|
269
|
+
except Exception:
|
|
270
|
+
pass
|
|
271
|
+
return count
|
|
272
|
+
|
|
273
|
+
async def clear(self) -> None:
|
|
274
|
+
"""清空所有缓存(Memcached不支持)。"""
|
|
275
|
+
logger.warning("Memcached不支持清空所有缓存")
|
|
276
|
+
|
|
277
|
+
async def delete_pattern(self, pattern: str) -> int:
|
|
278
|
+
"""按模式删除缓存(Memcached 不支持)。"""
|
|
279
|
+
logger.warning("Memcached 不支持模式删除,请使用 Redis 或 Memory 后端")
|
|
280
|
+
return 0
|
|
281
|
+
|
|
282
|
+
async def close(self) -> None:
|
|
283
|
+
"""关闭连接。"""
|
|
284
|
+
if self._client:
|
|
285
|
+
self._client.close()
|
|
286
|
+
logger.info("Memcached连接已关闭")
|
|
287
|
+
|
|
288
|
+
# Memcached 不支持分布式锁
|
|
289
|
+
async def acquire_lock(
|
|
290
|
+
self,
|
|
291
|
+
key: str,
|
|
292
|
+
token: str,
|
|
293
|
+
timeout: int,
|
|
294
|
+
blocking: bool,
|
|
295
|
+
blocking_timeout: float | None,
|
|
296
|
+
) -> bool:
|
|
297
|
+
"""获取锁(Memcached 不支持)。"""
|
|
298
|
+
logger.warning("Memcached 不支持分布式锁,请使用 Redis 或 Memory 后端")
|
|
299
|
+
return False
|
|
300
|
+
|
|
301
|
+
async def release_lock(self, key: str, token: str) -> bool:
|
|
302
|
+
"""释放锁(Memcached 不支持)。"""
|
|
303
|
+
return False
|
|
304
|
+
|
|
305
|
+
|
|
306
|
+
__all__ = ["MemcachedCache", "MemoryCache"]
|
|
@@ -0,0 +1,259 @@
|
|
|
1
|
+
"""Redis 缓存后端实现。"""
|
|
2
|
+
|
|
3
|
+
from __future__ import annotations
|
|
4
|
+
|
|
5
|
+
import asyncio
|
|
6
|
+
import json
|
|
7
|
+
import pickle
|
|
8
|
+
import time
|
|
9
|
+
from collections.abc import Callable
|
|
10
|
+
from datetime import timedelta
|
|
11
|
+
from typing import TYPE_CHECKING, Any
|
|
12
|
+
|
|
13
|
+
from redis.asyncio import Redis
|
|
14
|
+
|
|
15
|
+
from aury.boot.common.logging import logger
|
|
16
|
+
|
|
17
|
+
from .base import ICache
|
|
18
|
+
|
|
19
|
+
if TYPE_CHECKING:
|
|
20
|
+
from aury.boot.infrastructure.clients.redis import RedisClient
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
class RedisCache(ICache):
|
|
24
|
+
"""Redis缓存实现。
|
|
25
|
+
|
|
26
|
+
支持两种初始化方式:
|
|
27
|
+
1. 传入 URL 自行创建连接
|
|
28
|
+
2. 传入 RedisClient 实例(推荐)
|
|
29
|
+
"""
|
|
30
|
+
|
|
31
|
+
def __init__(
|
|
32
|
+
self,
|
|
33
|
+
url: str | None = None,
|
|
34
|
+
*,
|
|
35
|
+
redis_client: RedisClient | None = None,
|
|
36
|
+
serializer: str = "json",
|
|
37
|
+
):
|
|
38
|
+
"""初始化Redis缓存。
|
|
39
|
+
|
|
40
|
+
Args:
|
|
41
|
+
url: Redis连接URL
|
|
42
|
+
redis_client: RedisClient 实例(推荐)
|
|
43
|
+
serializer: 序列化方式(json/pickle)
|
|
44
|
+
"""
|
|
45
|
+
self._url = url
|
|
46
|
+
self._redis_client = redis_client
|
|
47
|
+
self._serializer = serializer
|
|
48
|
+
self._redis: Redis | None = None
|
|
49
|
+
self._owns_connection = False # 是否自己拥有连接(需要自己关闭)
|
|
50
|
+
|
|
51
|
+
async def initialize(self) -> None:
|
|
52
|
+
"""初始化连接。"""
|
|
53
|
+
# 优先使用 RedisClient
|
|
54
|
+
if self._redis_client is not None:
|
|
55
|
+
self._redis = self._redis_client.connection
|
|
56
|
+
self._owns_connection = False
|
|
57
|
+
logger.info("Redis缓存初始化成功(使用 RedisClient)")
|
|
58
|
+
return
|
|
59
|
+
|
|
60
|
+
# 使用 URL 创建连接
|
|
61
|
+
if self._url:
|
|
62
|
+
try:
|
|
63
|
+
self._redis = Redis.from_url(
|
|
64
|
+
self._url,
|
|
65
|
+
encoding="utf-8",
|
|
66
|
+
decode_responses=False,
|
|
67
|
+
socket_connect_timeout=5,
|
|
68
|
+
socket_timeout=5,
|
|
69
|
+
)
|
|
70
|
+
await self._redis.ping()
|
|
71
|
+
self._owns_connection = True
|
|
72
|
+
logger.info("Redis缓存初始化成功")
|
|
73
|
+
except Exception as exc:
|
|
74
|
+
logger.error(f"Redis连接失败: {exc}")
|
|
75
|
+
raise
|
|
76
|
+
else:
|
|
77
|
+
raise ValueError("Redis缓存需要提供 url 或 redis_client 参数")
|
|
78
|
+
|
|
79
|
+
async def get(self, key: str, default: Any = None) -> Any:
|
|
80
|
+
"""获取缓存。"""
|
|
81
|
+
if not self._redis:
|
|
82
|
+
return default
|
|
83
|
+
|
|
84
|
+
try:
|
|
85
|
+
data = await self._redis.get(key)
|
|
86
|
+
if data is None:
|
|
87
|
+
return default
|
|
88
|
+
|
|
89
|
+
# 使用函数式编程处理序列化器
|
|
90
|
+
deserializers: dict[str, Callable[[bytes], Any]] = {
|
|
91
|
+
"json": lambda d: json.loads(d.decode()),
|
|
92
|
+
"pickle": pickle.loads,
|
|
93
|
+
}
|
|
94
|
+
|
|
95
|
+
deserializer = deserializers.get(self._serializer)
|
|
96
|
+
if deserializer:
|
|
97
|
+
return deserializer(data)
|
|
98
|
+
return data.decode()
|
|
99
|
+
except Exception as exc:
|
|
100
|
+
logger.error(f"Redis获取失败: {key}, {exc}")
|
|
101
|
+
return default
|
|
102
|
+
|
|
103
|
+
async def set(
|
|
104
|
+
self,
|
|
105
|
+
key: str,
|
|
106
|
+
value: Any,
|
|
107
|
+
expire: int | timedelta | None = None,
|
|
108
|
+
) -> bool:
|
|
109
|
+
"""设置缓存。"""
|
|
110
|
+
if not self._redis:
|
|
111
|
+
return False
|
|
112
|
+
|
|
113
|
+
try:
|
|
114
|
+
# 使用函数式编程处理序列化器
|
|
115
|
+
serializers: dict[str, Callable[[Any], bytes]] = {
|
|
116
|
+
"json": lambda v: json.dumps(v).encode(),
|
|
117
|
+
"pickle": pickle.dumps,
|
|
118
|
+
}
|
|
119
|
+
|
|
120
|
+
serializer = serializers.get(self._serializer)
|
|
121
|
+
if serializer:
|
|
122
|
+
data = serializer(value)
|
|
123
|
+
else:
|
|
124
|
+
data = str(value).encode()
|
|
125
|
+
|
|
126
|
+
# 转换过期时间
|
|
127
|
+
if isinstance(expire, timedelta):
|
|
128
|
+
expire = int(expire.total_seconds())
|
|
129
|
+
|
|
130
|
+
await self._redis.set(key, data, ex=expire)
|
|
131
|
+
return True
|
|
132
|
+
except Exception as exc:
|
|
133
|
+
logger.error(f"Redis设置失败: {key}, {exc}")
|
|
134
|
+
return False
|
|
135
|
+
|
|
136
|
+
async def delete(self, *keys: str) -> int:
|
|
137
|
+
"""删除缓存。"""
|
|
138
|
+
if not self._redis or not keys:
|
|
139
|
+
return 0
|
|
140
|
+
|
|
141
|
+
try:
|
|
142
|
+
return await self._redis.delete(*keys)
|
|
143
|
+
except Exception as exc:
|
|
144
|
+
logger.error(f"Redis删除失败: {keys}, {exc}")
|
|
145
|
+
return 0
|
|
146
|
+
|
|
147
|
+
async def exists(self, *keys: str) -> int:
|
|
148
|
+
"""检查缓存是否存在。"""
|
|
149
|
+
if not self._redis or not keys:
|
|
150
|
+
return 0
|
|
151
|
+
|
|
152
|
+
try:
|
|
153
|
+
return await self._redis.exists(*keys)
|
|
154
|
+
except Exception as exc:
|
|
155
|
+
logger.error(f"Redis检查失败: {keys}, {exc}")
|
|
156
|
+
return 0
|
|
157
|
+
|
|
158
|
+
async def clear(self) -> None:
|
|
159
|
+
"""清空所有缓存。"""
|
|
160
|
+
if self._redis:
|
|
161
|
+
await self._redis.flushdb()
|
|
162
|
+
logger.info("Redis缓存已清空")
|
|
163
|
+
|
|
164
|
+
async def delete_pattern(self, pattern: str) -> int:
|
|
165
|
+
"""按模式删除缓存。
|
|
166
|
+
|
|
167
|
+
Args:
|
|
168
|
+
pattern: 通配符模式,如 "todo:*"
|
|
169
|
+
|
|
170
|
+
Returns:
|
|
171
|
+
int: 删除的键数量
|
|
172
|
+
"""
|
|
173
|
+
if not self._redis:
|
|
174
|
+
return 0
|
|
175
|
+
|
|
176
|
+
try:
|
|
177
|
+
# 使用 SCAN 遍历匹配的键(比 KEYS 更安全,不会阻塞)
|
|
178
|
+
count = 0
|
|
179
|
+
cursor = 0
|
|
180
|
+
while True:
|
|
181
|
+
cursor, keys = await self._redis.scan(cursor, match=pattern, count=100)
|
|
182
|
+
if keys:
|
|
183
|
+
count += await self._redis.delete(*keys)
|
|
184
|
+
if cursor == 0:
|
|
185
|
+
break
|
|
186
|
+
logger.debug(f"按模式删除缓存: {pattern}, 删除 {count} 个键")
|
|
187
|
+
return count
|
|
188
|
+
except Exception as exc:
|
|
189
|
+
logger.error(f"Redis模式删除失败: {pattern}, {exc}")
|
|
190
|
+
return 0
|
|
191
|
+
|
|
192
|
+
async def close(self) -> None:
|
|
193
|
+
"""关闭连接(仅当自己拥有连接时)。"""
|
|
194
|
+
if self._redis and self._owns_connection:
|
|
195
|
+
await self._redis.close()
|
|
196
|
+
logger.info("Redis连接已关闭")
|
|
197
|
+
self._redis = None
|
|
198
|
+
|
|
199
|
+
@property
|
|
200
|
+
def redis(self) -> Redis | None:
|
|
201
|
+
"""获取Redis客户端。"""
|
|
202
|
+
return self._redis
|
|
203
|
+
|
|
204
|
+
# ==================== 分布式锁 ====================
|
|
205
|
+
# TODO: 后续优化考虑:
|
|
206
|
+
# - 看门狗(Watchdog)机制:自动续期,防止业务执行超过锁超时导致提前释放
|
|
207
|
+
# - 可重入锁(Reentrant Lock)
|
|
208
|
+
# - Redlock 算法(多 Redis 实例)
|
|
209
|
+
|
|
210
|
+
async def acquire_lock(
|
|
211
|
+
self,
|
|
212
|
+
key: str,
|
|
213
|
+
token: str,
|
|
214
|
+
timeout: int,
|
|
215
|
+
blocking: bool,
|
|
216
|
+
blocking_timeout: float | None,
|
|
217
|
+
) -> bool:
|
|
218
|
+
"""获取 Redis 分布式锁。"""
|
|
219
|
+
if not self._redis:
|
|
220
|
+
return False
|
|
221
|
+
|
|
222
|
+
start_time = time.monotonic()
|
|
223
|
+
|
|
224
|
+
while True:
|
|
225
|
+
# SET NX EX 原子操作
|
|
226
|
+
acquired = await self._redis.set(key, token, nx=True, ex=timeout)
|
|
227
|
+
if acquired:
|
|
228
|
+
return True
|
|
229
|
+
|
|
230
|
+
if not blocking:
|
|
231
|
+
return False
|
|
232
|
+
|
|
233
|
+
# 检查是否超时
|
|
234
|
+
if blocking_timeout is not None:
|
|
235
|
+
elapsed = time.monotonic() - start_time
|
|
236
|
+
if elapsed >= blocking_timeout:
|
|
237
|
+
return False
|
|
238
|
+
|
|
239
|
+
# 短暂等待后重试
|
|
240
|
+
await asyncio.sleep(0.05)
|
|
241
|
+
|
|
242
|
+
async def release_lock(self, key: str, token: str) -> bool:
|
|
243
|
+
"""释放 Redis 锁(Lua 脚本保证原子性)。"""
|
|
244
|
+
if not self._redis:
|
|
245
|
+
return False
|
|
246
|
+
|
|
247
|
+
# Lua 脚本:只有 token 匹配才删除
|
|
248
|
+
script = """
|
|
249
|
+
if redis.call("get", KEYS[1]) == ARGV[1] then
|
|
250
|
+
return redis.call("del", KEYS[1])
|
|
251
|
+
else
|
|
252
|
+
return 0
|
|
253
|
+
end
|
|
254
|
+
"""
|
|
255
|
+
result = await self._redis.eval(script, 1, key, token)
|
|
256
|
+
return bool(result)
|
|
257
|
+
|
|
258
|
+
|
|
259
|
+
__all__ = ["RedisCache"]
|
|
@@ -65,6 +65,10 @@ class RedisStreamMQ(IMQ):
|
|
|
65
65
|
self._max_len = max_len
|
|
66
66
|
self._consuming = False
|
|
67
67
|
self._owns_client = False
|
|
68
|
+
self._log_sample_counter = 0 # 日志采样计数器
|
|
69
|
+
|
|
70
|
+
# 日志采样率:每 N 个 send 打印 1 次
|
|
71
|
+
LOG_SAMPLE_RATE = 100
|
|
68
72
|
|
|
69
73
|
async def _ensure_client(self) -> None:
|
|
70
74
|
"""确保 Redis 客户端已初始化。"""
|
|
@@ -122,7 +126,10 @@ class RedisStreamMQ(IMQ):
|
|
|
122
126
|
else:
|
|
123
127
|
msg_id = await self._client.connection.xadd(stream_key, data)
|
|
124
128
|
|
|
125
|
-
|
|
129
|
+
# 采样日志:每 N 个消息打印 1 次
|
|
130
|
+
self._log_sample_counter += 1
|
|
131
|
+
if self._log_sample_counter % self.LOG_SAMPLE_RATE == 1:
|
|
132
|
+
logger.debug(f"发送消息到 Stream: {stream_key}, id={msg_id}, count={self._log_sample_counter}")
|
|
126
133
|
return message.id
|
|
127
134
|
|
|
128
135
|
async def receive(
|
|
@@ -420,7 +427,7 @@ class RedisStreamMQ(IMQ):
|
|
|
420
427
|
"""关闭连接。"""
|
|
421
428
|
self._consuming = False
|
|
422
429
|
if self._owns_client and self._client:
|
|
423
|
-
await self._client.
|
|
430
|
+
await self._client.cleanup()
|
|
424
431
|
self._client = None
|
|
425
432
|
logger.debug("Redis Stream 消息队列已关闭")
|
|
426
433
|
|
|
@@ -1,5 +1,5 @@
|
|
|
1
1
|
aury/boot/__init__.py,sha256=pCno-EInnpIBa1OtxNYF-JWf9j95Cd2h6vmu0xqa_-4,1791
|
|
2
|
-
aury/boot/_version.py,sha256=
|
|
2
|
+
aury/boot/_version.py,sha256=4ZEdlNp6MHArYK_GfVRp7IC9mBDzzgBih3DH2hlHeCo,706
|
|
3
3
|
aury/boot/application/__init__.py,sha256=I2KqNVdYg2q5nlOXr0TtFGyHmhj4oWdaR6ZB73Mwg7Y,3041
|
|
4
4
|
aury/boot/application/adapter/__init__.py,sha256=e1bcSb1bxUMfofTwiCuHBZJk5-STkMCWPF2EJXHQ7UU,3976
|
|
5
5
|
aury/boot/application/adapter/base.py,sha256=Ar_66fiHPDEmV-1DKnqXKwc53p3pozG31bgTJTEUriY,15763
|
|
@@ -32,7 +32,7 @@ aury/boot/application/middleware/__init__.py,sha256=T01fmbcdO0Sm6JE74g23uuDyebBG
|
|
|
32
32
|
aury/boot/application/middleware/logging.py,sha256=kStfLskA_srNvWIuNMs0ptZ4Wr9-ke6hYl8kESxbhxc,13509
|
|
33
33
|
aury/boot/application/migrations/__init__.py,sha256=Z5Gizx7f3AImRcl3cooiIDAZcNi5W-6GvB7mK5w1TNA,204
|
|
34
34
|
aury/boot/application/migrations/manager.py,sha256=G7mzkNA3MFjyQmM2UwY0ZFNgGGVS4W5GoG2Sbj5AUXk,23685
|
|
35
|
-
aury/boot/application/migrations/setup.py,sha256=
|
|
35
|
+
aury/boot/application/migrations/setup.py,sha256=mA7q8JEkVVDW55PdSE6KUhhmDNMMWmbQxqfwOts4vIw,6889
|
|
36
36
|
aury/boot/application/rpc/__init__.py,sha256=0mVyksLbTOLYMN4OtYrdf9naBNVQnAU9pt2kS2w_9ZY,2064
|
|
37
37
|
aury/boot/application/rpc/base.py,sha256=KqdWupF2PTizr--jE0KgJUDCfBap72ZWk9FtU5FM9_8,2618
|
|
38
38
|
aury/boot/application/rpc/client.py,sha256=ApW4h_DrwnnkAh921TVUd4fvdWP-rVIse3VW1_1TLPk,9113
|
|
@@ -136,11 +136,13 @@ aury/boot/domain/service/base.py,sha256=6sN0nf8r5yUZsE6AcZOiOXFCqzb61oCxTfrWlqjI
|
|
|
136
136
|
aury/boot/domain/transaction/__init__.py,sha256=EKnjJ235SYjMCvGIuLVlTdYRzU35RxNMejRGUExYqqE,15488
|
|
137
137
|
aury/boot/infrastructure/__init__.py,sha256=DDEr_BIL5OyMJjNlI05jGIUrSHn6MPdnW9xnCS4eHfg,2949
|
|
138
138
|
aury/boot/infrastructure/cache/__init__.py,sha256=G40uCkpJ1jSs2fc_CBDem73iQQzCcp-4GG1WpDJzwaA,658
|
|
139
|
-
aury/boot/infrastructure/cache/backends.py,sha256=
|
|
140
|
-
aury/boot/infrastructure/cache/base.py,sha256=
|
|
139
|
+
aury/boot/infrastructure/cache/backends.py,sha256=wh9U2LjlXV22dpV87ID8DL9Rmgztw3yMUrDjJdGgTQw,245
|
|
140
|
+
aury/boot/infrastructure/cache/base.py,sha256=3XA6v_zwsgMSUDN8aG7eu_2-zCnGWF9eeEyVZxxezBU,2621
|
|
141
141
|
aury/boot/infrastructure/cache/exceptions.py,sha256=KZsFIHXW3_kOh_KB93EVZJKbiDvDw8aloAefJ3kasP8,622
|
|
142
142
|
aury/boot/infrastructure/cache/factory.py,sha256=aF74JoiiSKFgctqqh2Z8OtGRS2Am_ou-I40GyygLzC0,2489
|
|
143
|
-
aury/boot/infrastructure/cache/manager.py,sha256=
|
|
143
|
+
aury/boot/infrastructure/cache/manager.py,sha256=2jlshbO4NqpPxH-8DBiMFNAvWuZUI3atUCsw9GGlzc8,16807
|
|
144
|
+
aury/boot/infrastructure/cache/memory.py,sha256=qGhLKKjGsEUHjVRFMV6A33MB_1iPaKCEEkT6VFrLkQY,9832
|
|
145
|
+
aury/boot/infrastructure/cache/redis.py,sha256=MxLwqnBrPWmkS_AGDq4hKfPlBrF358cq_nKbUlP-JYE,8085
|
|
144
146
|
aury/boot/infrastructure/channel/__init__.py,sha256=NmjddenZPz1Dcl0glwIF1Xn9gxBzvGvlOlzhV3eEnEQ,664
|
|
145
147
|
aury/boot/infrastructure/channel/base.py,sha256=TDiP7pXyd2ixiOM3cbxqCSOluGLTkmLCa8pv-KyQ0jo,2941
|
|
146
148
|
aury/boot/infrastructure/channel/manager.py,sha256=GT6eG6PglduKAr23i1PSmjjTQsALvGGoLjYiQ33aZiw,7488
|
|
@@ -190,8 +192,8 @@ aury/boot/infrastructure/mq/base.py,sha256=ld4wtzhO_6y8wJRXL1DagqJiwhd0VQ6MJlJGD
|
|
|
190
192
|
aury/boot/infrastructure/mq/manager.py,sha256=Bu4E1Tgz0CzFvJuCS9_fBMj9eAqmXcZp8aFIYhvNUl4,7692
|
|
191
193
|
aury/boot/infrastructure/mq/backends/__init__.py,sha256=10nggw2V-AzuZ1vvzq_ksoXR4FI3e4BR36EfY49Pek4,200
|
|
192
194
|
aury/boot/infrastructure/mq/backends/rabbitmq.py,sha256=0NWgPKEwtbmI63EVvKINdfXXDNyOvuOOP9LlBzqH91E,5493
|
|
193
|
-
aury/boot/infrastructure/mq/backends/redis.py,sha256=
|
|
194
|
-
aury/boot/infrastructure/mq/backends/redis_stream.py,sha256=
|
|
195
|
+
aury/boot/infrastructure/mq/backends/redis.py,sha256=B89U7mqIceUsCXE4G3u1u6aFM9hv4mmLLwuCYq1T9tQ,5281
|
|
196
|
+
aury/boot/infrastructure/mq/backends/redis_stream.py,sha256=p2WTj10-zbxQ_2NPU97w-n4DZ8KSHhLjqcnplLPCw4U,14761
|
|
195
197
|
aury/boot/infrastructure/scheduler/__init__.py,sha256=eTRJ5dSPcKvyFvLVtraoQteXTTDDGwIrmw06J2hoNdA,323
|
|
196
198
|
aury/boot/infrastructure/scheduler/exceptions.py,sha256=ROltrhSctVWA-6ulnjuYeHAk3ZF-sykDoesuierYzew,634
|
|
197
199
|
aury/boot/infrastructure/scheduler/manager.py,sha256=OHQOHQlcoN8yFnky4kfuhsEIk39qX6nLZ7xJ51tfg68,23130
|
|
@@ -210,7 +212,7 @@ aury/boot/testing/client.py,sha256=KOg1EemuIVsBG68G5y0DjSxZGcIQVdWQ4ASaHE3o1R0,4
|
|
|
210
212
|
aury/boot/testing/factory.py,sha256=8GvwX9qIDu0L65gzJMlrWB0xbmJ-7zPHuwk3eECULcg,5185
|
|
211
213
|
aury/boot/toolkit/__init__.py,sha256=AcyVb9fDf3CaEmJPNkWC4iGv32qCPyk4BuFKSuNiJRQ,334
|
|
212
214
|
aury/boot/toolkit/http/__init__.py,sha256=zIPmpIZ9Qbqe25VmEr7jixoY2fkRbLm7NkCB9vKpg6I,11039
|
|
213
|
-
aury_boot-0.0.
|
|
214
|
-
aury_boot-0.0.
|
|
215
|
-
aury_boot-0.0.
|
|
216
|
-
aury_boot-0.0.
|
|
215
|
+
aury_boot-0.0.39.dist-info/METADATA,sha256=qX6IZza7loT9gQzeJlbhPz1QKshBy3fZH__Fic8U1s0,8694
|
|
216
|
+
aury_boot-0.0.39.dist-info/WHEEL,sha256=WLgqFyCfm_KASv4WHyYy0P3pM_m7J5L9k2skdKLirC8,87
|
|
217
|
+
aury_boot-0.0.39.dist-info/entry_points.txt,sha256=f9KXEkDIGc0BGkgBvsNx_HMz9VhDjNxu26q00jUpDwQ,49
|
|
218
|
+
aury_boot-0.0.39.dist-info/RECORD,,
|
|
File without changes
|
|
File without changes
|