nb-cache 0.1.0__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- nb_cache/__init__.py +97 -0
- nb_cache/_compat.py +58 -0
- nb_cache/backends/__init__.py +5 -0
- nb_cache/backends/base.py +186 -0
- nb_cache/backends/dual.py +253 -0
- nb_cache/backends/memory.py +289 -0
- nb_cache/backends/redis.py +362 -0
- nb_cache/condition.py +65 -0
- nb_cache/decorators/__init__.py +18 -0
- nb_cache/decorators/bloom.py +193 -0
- nb_cache/decorators/cache.py +136 -0
- nb_cache/decorators/circuit_breaker.py +148 -0
- nb_cache/decorators/early.py +123 -0
- nb_cache/decorators/failover.py +86 -0
- nb_cache/decorators/hit.py +136 -0
- nb_cache/decorators/iterator.py +141 -0
- nb_cache/decorators/locked.py +176 -0
- nb_cache/decorators/rate_limit.py +180 -0
- nb_cache/decorators/soft.py +138 -0
- nb_cache/exceptions.py +47 -0
- nb_cache/helpers.py +37 -0
- nb_cache/key.py +155 -0
- nb_cache/middleware.py +64 -0
- nb_cache/serialize.py +126 -0
- nb_cache/tags.py +55 -0
- nb_cache/transaction.py +113 -0
- nb_cache/ttl.py +67 -0
- nb_cache/wrapper.py +619 -0
- nb_cache-0.1.0.dist-info/METADATA +766 -0
- nb_cache-0.1.0.dist-info/RECORD +32 -0
- nb_cache-0.1.0.dist-info/WHEEL +5 -0
- nb_cache-0.1.0.dist-info/top_level.txt +1 -0
nb_cache/__init__.py
ADDED
|
@@ -0,0 +1,97 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""nb_cache — 更强的缓存装饰器
|
|
3
|
+
|
|
4
|
+
支持同步和异步函数,支持加锁防止缓存击穿,
|
|
5
|
+
支持内存和Redis作为缓存器,支持Redis+内存双缓存提高性能。
|
|
6
|
+
|
|
7
|
+
Usage::
|
|
8
|
+
|
|
9
|
+
from nb_cache import Cache
|
|
10
|
+
|
|
11
|
+
cache = Cache()
|
|
12
|
+
cache.setup("mem://")
|
|
13
|
+
|
|
14
|
+
@cache.cache(ttl=60)
|
|
15
|
+
def get_data(key):
|
|
16
|
+
return expensive_query(key)
|
|
17
|
+
|
|
18
|
+
@cache.cache(ttl="1h", lock=True)
|
|
19
|
+
async def get_data_async(key):
|
|
20
|
+
return await expensive_query_async(key)
|
|
21
|
+
"""
|
|
22
|
+
|
|
23
|
+
__version__ = "0.1.0"
|
|
24
|
+
|
|
25
|
+
from nb_cache.wrapper import Cache, register_backend
|
|
26
|
+
from nb_cache.condition import NOT_NONE, with_exceptions, only_exceptions
|
|
27
|
+
from nb_cache.exceptions import (
|
|
28
|
+
CacheError,
|
|
29
|
+
BackendNotInitializedError,
|
|
30
|
+
CacheBackendInteractionError,
|
|
31
|
+
LockError,
|
|
32
|
+
LockedError,
|
|
33
|
+
CircuitBreakerOpen,
|
|
34
|
+
RateLimitError,
|
|
35
|
+
SerializationError,
|
|
36
|
+
TagError,
|
|
37
|
+
)
|
|
38
|
+
from nb_cache.transaction import TransactionMode
|
|
39
|
+
from nb_cache.key import get_cache_key_template
|
|
40
|
+
from nb_cache.helpers import noself, add_prefix, memory_limit, invalidate_further
|
|
41
|
+
from nb_cache.serialize import (
|
|
42
|
+
Serializer,
|
|
43
|
+
PickleSerializer,
|
|
44
|
+
JsonSerializer,
|
|
45
|
+
GzipCompressor,
|
|
46
|
+
ZlibCompressor,
|
|
47
|
+
HashSigner,
|
|
48
|
+
)
|
|
49
|
+
from nb_cache.ttl import ttl_to_seconds
|
|
50
|
+
from nb_cache.backends.base import BaseBackend
|
|
51
|
+
from nb_cache.backends.memory import MemoryBackend
|
|
52
|
+
|
|
53
|
+
mem = Cache()
|
|
54
|
+
mem.setup("mem://")
|
|
55
|
+
|
|
56
|
+
__all__ = [
|
|
57
|
+
'__version__',
|
|
58
|
+
# Main class
|
|
59
|
+
'Cache',
|
|
60
|
+
'mem',
|
|
61
|
+
# Conditions
|
|
62
|
+
'NOT_NONE',
|
|
63
|
+
'with_exceptions',
|
|
64
|
+
'only_exceptions',
|
|
65
|
+
# Exceptions
|
|
66
|
+
'CacheError',
|
|
67
|
+
'BackendNotInitializedError',
|
|
68
|
+
'CacheBackendInteractionError',
|
|
69
|
+
'LockError',
|
|
70
|
+
'LockedError',
|
|
71
|
+
'CircuitBreakerOpen',
|
|
72
|
+
'RateLimitError',
|
|
73
|
+
'SerializationError',
|
|
74
|
+
'TagError',
|
|
75
|
+
# Transaction
|
|
76
|
+
'TransactionMode',
|
|
77
|
+
# Key
|
|
78
|
+
'get_cache_key_template',
|
|
79
|
+
# Helpers
|
|
80
|
+
'noself',
|
|
81
|
+
'add_prefix',
|
|
82
|
+
'memory_limit',
|
|
83
|
+
'invalidate_further',
|
|
84
|
+
# Serialization
|
|
85
|
+
'Serializer',
|
|
86
|
+
'PickleSerializer',
|
|
87
|
+
'JsonSerializer',
|
|
88
|
+
'GzipCompressor',
|
|
89
|
+
'ZlibCompressor',
|
|
90
|
+
'HashSigner',
|
|
91
|
+
# TTL
|
|
92
|
+
'ttl_to_seconds',
|
|
93
|
+
# Backends
|
|
94
|
+
'BaseBackend',
|
|
95
|
+
'MemoryBackend',
|
|
96
|
+
'register_backend',
|
|
97
|
+
]
|
nb_cache/_compat.py
ADDED
|
@@ -0,0 +1,58 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""Python version compatibility utilities for Python 3.6+."""
|
|
3
|
+
import sys
|
|
4
|
+
import asyncio
|
|
5
|
+
import inspect
|
|
6
|
+
|
|
7
|
+
PY36 = sys.version_info[:2] == (3, 6)
|
|
8
|
+
PY37_PLUS = sys.version_info >= (3, 7)
|
|
9
|
+
PY38_PLUS = sys.version_info >= (3, 8)
|
|
10
|
+
PY310_PLUS = sys.version_info >= (3, 10)
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
def get_event_loop():
|
|
14
|
+
if PY310_PLUS:
|
|
15
|
+
try:
|
|
16
|
+
return asyncio.get_running_loop()
|
|
17
|
+
except RuntimeError:
|
|
18
|
+
return asyncio.new_event_loop()
|
|
19
|
+
else:
|
|
20
|
+
return asyncio.get_event_loop()
|
|
21
|
+
|
|
22
|
+
|
|
23
|
+
def is_coroutine_function(func):
|
|
24
|
+
if hasattr(func, '__wrapped__'):
|
|
25
|
+
return asyncio.iscoroutinefunction(func.__wrapped__)
|
|
26
|
+
return asyncio.iscoroutinefunction(func)
|
|
27
|
+
|
|
28
|
+
|
|
29
|
+
def create_task(coro):
|
|
30
|
+
loop = get_event_loop()
|
|
31
|
+
if hasattr(loop, 'create_task'):
|
|
32
|
+
return loop.create_task(coro)
|
|
33
|
+
return asyncio.ensure_future(coro, loop=loop)
|
|
34
|
+
|
|
35
|
+
|
|
36
|
+
def run_sync(coro):
|
|
37
|
+
"""Run a coroutine synchronously. Works across Python versions."""
|
|
38
|
+
if PY37_PLUS:
|
|
39
|
+
try:
|
|
40
|
+
loop = asyncio.get_event_loop()
|
|
41
|
+
if loop.is_running():
|
|
42
|
+
import concurrent.futures
|
|
43
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
|
|
44
|
+
future = pool.submit(asyncio.run, coro)
|
|
45
|
+
return future.result()
|
|
46
|
+
else:
|
|
47
|
+
return loop.run_until_complete(coro)
|
|
48
|
+
except RuntimeError:
|
|
49
|
+
return asyncio.run(coro)
|
|
50
|
+
else:
|
|
51
|
+
loop = asyncio.get_event_loop()
|
|
52
|
+
if loop.is_running():
|
|
53
|
+
import concurrent.futures
|
|
54
|
+
with concurrent.futures.ThreadPoolExecutor(max_workers=1) as pool:
|
|
55
|
+
new_loop = asyncio.new_event_loop()
|
|
56
|
+
future = pool.submit(new_loop.run_until_complete, coro)
|
|
57
|
+
return future.result()
|
|
58
|
+
return loop.run_until_complete(coro)
|
|
@@ -0,0 +1,186 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""Abstract base class for cache backends.
|
|
3
|
+
|
|
4
|
+
Every backend must provide both sync and async interfaces.
|
|
5
|
+
"""
|
|
6
|
+
import asyncio
|
|
7
|
+
|
|
8
|
+
|
|
9
|
+
class BaseBackend(object):
|
|
10
|
+
"""Abstract cache backend with sync and async interfaces."""
|
|
11
|
+
|
|
12
|
+
def __init__(self, **kwargs):
|
|
13
|
+
self._is_init = False
|
|
14
|
+
|
|
15
|
+
@property
|
|
16
|
+
def is_init(self):
|
|
17
|
+
return self._is_init
|
|
18
|
+
|
|
19
|
+
# --- Lifecycle ---
|
|
20
|
+
|
|
21
|
+
async def init(self):
|
|
22
|
+
self._is_init = True
|
|
23
|
+
|
|
24
|
+
def init_sync(self):
|
|
25
|
+
self._is_init = True
|
|
26
|
+
|
|
27
|
+
async def close(self):
|
|
28
|
+
self._is_init = False
|
|
29
|
+
|
|
30
|
+
def close_sync(self):
|
|
31
|
+
self._is_init = False
|
|
32
|
+
|
|
33
|
+
async def ping(self):
|
|
34
|
+
return True
|
|
35
|
+
|
|
36
|
+
def ping_sync(self):
|
|
37
|
+
return True
|
|
38
|
+
|
|
39
|
+
# --- Core GET/SET/DELETE (async) ---
|
|
40
|
+
|
|
41
|
+
async def get(self, key):
|
|
42
|
+
raise NotImplementedError
|
|
43
|
+
|
|
44
|
+
async def set(self, key, value, ttl=None):
|
|
45
|
+
raise NotImplementedError
|
|
46
|
+
|
|
47
|
+
async def delete(self, key):
|
|
48
|
+
raise NotImplementedError
|
|
49
|
+
|
|
50
|
+
async def exists(self, key):
|
|
51
|
+
raise NotImplementedError
|
|
52
|
+
|
|
53
|
+
async def expire(self, key, ttl):
|
|
54
|
+
raise NotImplementedError
|
|
55
|
+
|
|
56
|
+
async def get_expire(self, key):
|
|
57
|
+
"""Return remaining TTL in seconds, or -1 if no expiry, or None if key missing."""
|
|
58
|
+
raise NotImplementedError
|
|
59
|
+
|
|
60
|
+
async def clear(self):
|
|
61
|
+
raise NotImplementedError
|
|
62
|
+
|
|
63
|
+
async def incr(self, key, amount=1):
|
|
64
|
+
raise NotImplementedError
|
|
65
|
+
|
|
66
|
+
# --- Batch operations (async) ---
|
|
67
|
+
|
|
68
|
+
async def get_many(self, *keys):
|
|
69
|
+
results = []
|
|
70
|
+
for k in keys:
|
|
71
|
+
results.append(await self.get(k))
|
|
72
|
+
return results
|
|
73
|
+
|
|
74
|
+
async def set_many(self, pairs, ttl=None):
|
|
75
|
+
for key, value in pairs.items():
|
|
76
|
+
await self.set(key, value, ttl=ttl)
|
|
77
|
+
|
|
78
|
+
async def delete_many(self, *keys):
|
|
79
|
+
for k in keys:
|
|
80
|
+
await self.delete(k)
|
|
81
|
+
|
|
82
|
+
async def delete_match(self, pattern):
|
|
83
|
+
raise NotImplementedError
|
|
84
|
+
|
|
85
|
+
# --- Scan / Match (async) ---
|
|
86
|
+
|
|
87
|
+
async def scan(self, pattern):
|
|
88
|
+
raise NotImplementedError
|
|
89
|
+
|
|
90
|
+
async def get_match(self, pattern):
|
|
91
|
+
raise NotImplementedError
|
|
92
|
+
|
|
93
|
+
async def get_keys_count(self):
|
|
94
|
+
raise NotImplementedError
|
|
95
|
+
|
|
96
|
+
async def get_size(self):
|
|
97
|
+
raise NotImplementedError
|
|
98
|
+
|
|
99
|
+
# --- Lock (async) ---
|
|
100
|
+
|
|
101
|
+
async def set_lock(self, key, ttl):
|
|
102
|
+
raise NotImplementedError
|
|
103
|
+
|
|
104
|
+
async def unlock(self, key):
|
|
105
|
+
raise NotImplementedError
|
|
106
|
+
|
|
107
|
+
async def is_locked(self, key):
|
|
108
|
+
raise NotImplementedError
|
|
109
|
+
|
|
110
|
+
# --- Set operations (async) ---
|
|
111
|
+
|
|
112
|
+
async def set_add(self, key, *values):
|
|
113
|
+
raise NotImplementedError
|
|
114
|
+
|
|
115
|
+
async def set_remove(self, key, *values):
|
|
116
|
+
raise NotImplementedError
|
|
117
|
+
|
|
118
|
+
async def set_pop(self, key, count=1):
|
|
119
|
+
raise NotImplementedError
|
|
120
|
+
|
|
121
|
+
# --- Core GET/SET/DELETE (sync) ---
|
|
122
|
+
|
|
123
|
+
def get_sync(self, key):
|
|
124
|
+
raise NotImplementedError
|
|
125
|
+
|
|
126
|
+
def set_sync(self, key, value, ttl=None):
|
|
127
|
+
raise NotImplementedError
|
|
128
|
+
|
|
129
|
+
def delete_sync(self, key):
|
|
130
|
+
raise NotImplementedError
|
|
131
|
+
|
|
132
|
+
def exists_sync(self, key):
|
|
133
|
+
raise NotImplementedError
|
|
134
|
+
|
|
135
|
+
def expire_sync(self, key, ttl):
|
|
136
|
+
raise NotImplementedError
|
|
137
|
+
|
|
138
|
+
def get_expire_sync(self, key):
|
|
139
|
+
raise NotImplementedError
|
|
140
|
+
|
|
141
|
+
def clear_sync(self):
|
|
142
|
+
raise NotImplementedError
|
|
143
|
+
|
|
144
|
+
def incr_sync(self, key, amount=1):
|
|
145
|
+
raise NotImplementedError
|
|
146
|
+
|
|
147
|
+
# --- Batch (sync) ---
|
|
148
|
+
|
|
149
|
+
def get_many_sync(self, *keys):
|
|
150
|
+
return [self.get_sync(k) for k in keys]
|
|
151
|
+
|
|
152
|
+
def set_many_sync(self, pairs, ttl=None):
|
|
153
|
+
for key, value in pairs.items():
|
|
154
|
+
self.set_sync(key, value, ttl=ttl)
|
|
155
|
+
|
|
156
|
+
def delete_many_sync(self, *keys):
|
|
157
|
+
for k in keys:
|
|
158
|
+
self.delete_sync(k)
|
|
159
|
+
|
|
160
|
+
def delete_match_sync(self, pattern):
|
|
161
|
+
raise NotImplementedError
|
|
162
|
+
|
|
163
|
+
# --- Scan (sync) ---
|
|
164
|
+
|
|
165
|
+
def scan_sync(self, pattern):
|
|
166
|
+
raise NotImplementedError
|
|
167
|
+
|
|
168
|
+
def get_match_sync(self, pattern):
|
|
169
|
+
raise NotImplementedError
|
|
170
|
+
|
|
171
|
+
def get_keys_count_sync(self):
|
|
172
|
+
raise NotImplementedError
|
|
173
|
+
|
|
174
|
+
def get_size_sync(self):
|
|
175
|
+
raise NotImplementedError
|
|
176
|
+
|
|
177
|
+
# --- Lock (sync) ---
|
|
178
|
+
|
|
179
|
+
def set_lock_sync(self, key, ttl):
|
|
180
|
+
raise NotImplementedError
|
|
181
|
+
|
|
182
|
+
def unlock_sync(self, key):
|
|
183
|
+
raise NotImplementedError
|
|
184
|
+
|
|
185
|
+
def is_locked_sync(self, key):
|
|
186
|
+
raise NotImplementedError
|
|
@@ -0,0 +1,253 @@
|
|
|
1
|
+
# -*- coding: utf-8 -*-
|
|
2
|
+
"""Dual cache backend: memory (L1) + Redis (L2).
|
|
3
|
+
|
|
4
|
+
Reads check memory first, then Redis on miss.
|
|
5
|
+
Writes go to both memory and Redis.
|
|
6
|
+
"""
|
|
7
|
+
from nb_cache.backends.base import BaseBackend
|
|
8
|
+
from nb_cache.backends.memory import MemoryBackend
|
|
9
|
+
from nb_cache.backends.redis import RedisBackend
|
|
10
|
+
from nb_cache.ttl import ttl_to_seconds
|
|
11
|
+
|
|
12
|
+
|
|
13
|
+
class DualBackend(BaseBackend):
|
|
14
|
+
"""Two-layer cache: fast in-memory L1 + Redis L2.
|
|
15
|
+
|
|
16
|
+
Args:
|
|
17
|
+
memory_size: Max entries for the memory layer.
|
|
18
|
+
local_ttl: Default TTL for local memory cache (seconds).
|
|
19
|
+
If set, local cache uses this TTL regardless of the outer TTL,
|
|
20
|
+
which is useful for keeping local cache short-lived.
|
|
21
|
+
url/host/port/db/password: Redis connection parameters.
|
|
22
|
+
prefix: Redis key prefix.
|
|
23
|
+
"""
|
|
24
|
+
|
|
25
|
+
def __init__(self, memory_size=1000, local_ttl=None,
|
|
26
|
+
url=None, host="localhost", port=6379, db=0,
|
|
27
|
+
password=None, prefix="", **kwargs):
|
|
28
|
+
super(DualBackend, self).__init__(**kwargs)
|
|
29
|
+
self._local_ttl = local_ttl
|
|
30
|
+
self._memory = MemoryBackend(size=memory_size)
|
|
31
|
+
self._redis = RedisBackend(
|
|
32
|
+
url=url, host=host, port=port, db=db,
|
|
33
|
+
password=password, prefix=prefix, **kwargs,
|
|
34
|
+
)
|
|
35
|
+
|
|
36
|
+
def _local_ttl_val(self, ttl):
|
|
37
|
+
if self._local_ttl is not None:
|
|
38
|
+
return self._local_ttl
|
|
39
|
+
return ttl
|
|
40
|
+
|
|
41
|
+
# --- Lifecycle ---
|
|
42
|
+
|
|
43
|
+
async def init(self):
|
|
44
|
+
await self._memory.init()
|
|
45
|
+
await self._redis.init()
|
|
46
|
+
await super(DualBackend, self).init()
|
|
47
|
+
|
|
48
|
+
def init_sync(self):
|
|
49
|
+
self._memory.init_sync()
|
|
50
|
+
self._redis.init_sync()
|
|
51
|
+
super(DualBackend, self).init_sync()
|
|
52
|
+
|
|
53
|
+
async def close(self):
|
|
54
|
+
await self._memory.close()
|
|
55
|
+
await self._redis.close()
|
|
56
|
+
await super(DualBackend, self).close()
|
|
57
|
+
|
|
58
|
+
def close_sync(self):
|
|
59
|
+
self._memory.close_sync()
|
|
60
|
+
self._redis.close_sync()
|
|
61
|
+
super(DualBackend, self).close_sync()
|
|
62
|
+
|
|
63
|
+
async def ping(self):
|
|
64
|
+
return await self._redis.ping()
|
|
65
|
+
|
|
66
|
+
def ping_sync(self):
|
|
67
|
+
return self._redis.ping_sync()
|
|
68
|
+
|
|
69
|
+
# --- Async interface ---
|
|
70
|
+
|
|
71
|
+
async def get(self, key):
|
|
72
|
+
val = await self._memory.get(key)
|
|
73
|
+
if val is not None:
|
|
74
|
+
return val
|
|
75
|
+
val = await self._redis.get(key)
|
|
76
|
+
if val is not None:
|
|
77
|
+
local_ttl = self._local_ttl_val(None)
|
|
78
|
+
await self._memory.set(key, val, ttl=local_ttl)
|
|
79
|
+
return val
|
|
80
|
+
|
|
81
|
+
async def set(self, key, value, ttl=None):
|
|
82
|
+
await self._redis.set(key, value, ttl=ttl)
|
|
83
|
+
await self._memory.set(key, value, ttl=self._local_ttl_val(ttl))
|
|
84
|
+
|
|
85
|
+
async def delete(self, key):
|
|
86
|
+
await self._memory.delete(key)
|
|
87
|
+
return await self._redis.delete(key)
|
|
88
|
+
|
|
89
|
+
async def exists(self, key):
|
|
90
|
+
if await self._memory.exists(key):
|
|
91
|
+
return True
|
|
92
|
+
return await self._redis.exists(key)
|
|
93
|
+
|
|
94
|
+
async def expire(self, key, ttl):
|
|
95
|
+
await self._memory.expire(key, self._local_ttl_val(ttl))
|
|
96
|
+
return await self._redis.expire(key, ttl)
|
|
97
|
+
|
|
98
|
+
async def get_expire(self, key):
|
|
99
|
+
return await self._redis.get_expire(key)
|
|
100
|
+
|
|
101
|
+
async def clear(self):
|
|
102
|
+
await self._memory.clear()
|
|
103
|
+
await self._redis.clear()
|
|
104
|
+
|
|
105
|
+
async def incr(self, key, amount=1):
|
|
106
|
+
result = await self._redis.incr(key, amount)
|
|
107
|
+
self._memory.set_sync(key, result, ttl=self._local_ttl_val(None))
|
|
108
|
+
return result
|
|
109
|
+
|
|
110
|
+
async def get_many(self, *keys):
|
|
111
|
+
results = []
|
|
112
|
+
missed_keys = []
|
|
113
|
+
missed_indices = []
|
|
114
|
+
for i, k in enumerate(keys):
|
|
115
|
+
val = await self._memory.get(k)
|
|
116
|
+
results.append(val)
|
|
117
|
+
if val is None:
|
|
118
|
+
missed_keys.append(k)
|
|
119
|
+
missed_indices.append(i)
|
|
120
|
+
if missed_keys:
|
|
121
|
+
redis_vals = await self._redis.get_many(*missed_keys)
|
|
122
|
+
for idx, val in zip(missed_indices, redis_vals):
|
|
123
|
+
results[idx] = val
|
|
124
|
+
if val is not None:
|
|
125
|
+
k = keys[idx]
|
|
126
|
+
await self._memory.set(k, val, ttl=self._local_ttl_val(None))
|
|
127
|
+
return results
|
|
128
|
+
|
|
129
|
+
async def set_many(self, pairs, ttl=None):
|
|
130
|
+
await self._redis.set_many(pairs, ttl=ttl)
|
|
131
|
+
await self._memory.set_many(pairs, ttl=self._local_ttl_val(ttl))
|
|
132
|
+
|
|
133
|
+
async def delete_many(self, *keys):
|
|
134
|
+
await self._memory.delete_many(*keys)
|
|
135
|
+
await self._redis.delete_many(*keys)
|
|
136
|
+
|
|
137
|
+
async def delete_match(self, pattern):
|
|
138
|
+
self._memory.delete_match_sync(pattern)
|
|
139
|
+
await self._redis.delete_match(pattern)
|
|
140
|
+
|
|
141
|
+
async def scan(self, pattern):
|
|
142
|
+
return await self._redis.scan(pattern)
|
|
143
|
+
|
|
144
|
+
async def get_match(self, pattern):
|
|
145
|
+
return await self._redis.get_match(pattern)
|
|
146
|
+
|
|
147
|
+
async def get_keys_count(self):
|
|
148
|
+
return await self._redis.get_keys_count()
|
|
149
|
+
|
|
150
|
+
async def get_size(self):
|
|
151
|
+
return await self._redis.get_size()
|
|
152
|
+
|
|
153
|
+
async def set_lock(self, key, ttl):
|
|
154
|
+
return await self._redis.set_lock(key, ttl)
|
|
155
|
+
|
|
156
|
+
async def unlock(self, key):
|
|
157
|
+
return await self._redis.unlock(key)
|
|
158
|
+
|
|
159
|
+
async def is_locked(self, key):
|
|
160
|
+
return await self._redis.is_locked(key)
|
|
161
|
+
|
|
162
|
+
# --- Sync interface ---
|
|
163
|
+
|
|
164
|
+
def get_sync(self, key):
|
|
165
|
+
val = self._memory.get_sync(key)
|
|
166
|
+
if val is not None:
|
|
167
|
+
return val
|
|
168
|
+
val = self._redis.get_sync(key)
|
|
169
|
+
if val is not None:
|
|
170
|
+
local_ttl = self._local_ttl_val(None)
|
|
171
|
+
self._memory.set_sync(key, val, ttl=local_ttl)
|
|
172
|
+
return val
|
|
173
|
+
|
|
174
|
+
def set_sync(self, key, value, ttl=None):
|
|
175
|
+
self._redis.set_sync(key, value, ttl=ttl)
|
|
176
|
+
self._memory.set_sync(key, value, ttl=self._local_ttl_val(ttl))
|
|
177
|
+
|
|
178
|
+
def delete_sync(self, key):
|
|
179
|
+
self._memory.delete_sync(key)
|
|
180
|
+
return self._redis.delete_sync(key)
|
|
181
|
+
|
|
182
|
+
def exists_sync(self, key):
|
|
183
|
+
if self._memory.exists_sync(key):
|
|
184
|
+
return True
|
|
185
|
+
return self._redis.exists_sync(key)
|
|
186
|
+
|
|
187
|
+
def expire_sync(self, key, ttl):
|
|
188
|
+
self._memory.expire_sync(key, self._local_ttl_val(ttl))
|
|
189
|
+
return self._redis.expire_sync(key, ttl)
|
|
190
|
+
|
|
191
|
+
def get_expire_sync(self, key):
|
|
192
|
+
return self._redis.get_expire_sync(key)
|
|
193
|
+
|
|
194
|
+
def clear_sync(self):
|
|
195
|
+
self._memory.clear_sync()
|
|
196
|
+
self._redis.clear_sync()
|
|
197
|
+
|
|
198
|
+
def incr_sync(self, key, amount=1):
|
|
199
|
+
result = self._redis.incr_sync(key, amount)
|
|
200
|
+
self._memory.set_sync(key, result, ttl=self._local_ttl_val(None))
|
|
201
|
+
return result
|
|
202
|
+
|
|
203
|
+
def get_many_sync(self, *keys):
|
|
204
|
+
results = []
|
|
205
|
+
missed_keys = []
|
|
206
|
+
missed_indices = []
|
|
207
|
+
for i, k in enumerate(keys):
|
|
208
|
+
val = self._memory.get_sync(k)
|
|
209
|
+
results.append(val)
|
|
210
|
+
if val is None:
|
|
211
|
+
missed_keys.append(k)
|
|
212
|
+
missed_indices.append(i)
|
|
213
|
+
if missed_keys:
|
|
214
|
+
redis_vals = self._redis.get_many_sync(*missed_keys)
|
|
215
|
+
for idx, val in zip(missed_indices, redis_vals):
|
|
216
|
+
results[idx] = val
|
|
217
|
+
if val is not None:
|
|
218
|
+
k = keys[idx]
|
|
219
|
+
self._memory.set_sync(k, val, ttl=self._local_ttl_val(None))
|
|
220
|
+
return results
|
|
221
|
+
|
|
222
|
+
def set_many_sync(self, pairs, ttl=None):
|
|
223
|
+
self._redis.set_many_sync(pairs, ttl=ttl)
|
|
224
|
+
self._memory.set_many_sync(pairs, ttl=self._local_ttl_val(ttl))
|
|
225
|
+
|
|
226
|
+
def delete_many_sync(self, *keys):
|
|
227
|
+
self._memory.delete_many_sync(*keys)
|
|
228
|
+
self._redis.delete_many_sync(*keys)
|
|
229
|
+
|
|
230
|
+
def delete_match_sync(self, pattern):
|
|
231
|
+
self._memory.delete_match_sync(pattern)
|
|
232
|
+
self._redis.delete_match_sync(pattern)
|
|
233
|
+
|
|
234
|
+
def scan_sync(self, pattern):
|
|
235
|
+
return self._redis.scan_sync(pattern)
|
|
236
|
+
|
|
237
|
+
def get_match_sync(self, pattern):
|
|
238
|
+
return self._redis.get_match_sync(pattern)
|
|
239
|
+
|
|
240
|
+
def get_keys_count_sync(self):
|
|
241
|
+
return self._redis.get_keys_count_sync()
|
|
242
|
+
|
|
243
|
+
def get_size_sync(self):
|
|
244
|
+
return self._redis.get_size_sync()
|
|
245
|
+
|
|
246
|
+
def set_lock_sync(self, key, ttl):
|
|
247
|
+
return self._redis.set_lock_sync(key, ttl)
|
|
248
|
+
|
|
249
|
+
def unlock_sync(self, key):
|
|
250
|
+
return self._redis.unlock_sync(key)
|
|
251
|
+
|
|
252
|
+
def is_locked_sync(self, key):
|
|
253
|
+
return self._redis.is_locked_sync(key)
|