limits 4.1__py3-none-any.whl → 4.3__py3-none-any.whl
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- limits/__init__.py +8 -6
- limits/_version.py +4 -4
- limits/aio/__init__.py +2 -0
- limits/aio/storage/__init__.py +6 -4
- limits/aio/storage/base.py +5 -8
- limits/aio/storage/etcd.py +6 -4
- limits/aio/storage/memcached.py +6 -4
- limits/aio/storage/memory.py +42 -26
- limits/aio/storage/mongodb.py +4 -7
- limits/aio/storage/redis/__init__.py +402 -0
- limits/aio/storage/redis/bridge.py +120 -0
- limits/aio/storage/redis/coredis.py +209 -0
- limits/aio/storage/redis/redispy.py +257 -0
- limits/aio/storage/redis/valkey.py +9 -0
- limits/aio/strategies.py +4 -2
- limits/errors.py +2 -0
- limits/storage/__init__.py +14 -11
- limits/storage/base.py +5 -10
- limits/storage/etcd.py +6 -4
- limits/storage/memcached.py +6 -7
- limits/storage/memory.py +42 -31
- limits/storage/mongodb.py +7 -10
- limits/storage/redis.py +48 -18
- limits/storage/redis_cluster.py +31 -11
- limits/storage/redis_sentinel.py +35 -11
- limits/storage/registry.py +1 -3
- limits/strategies.py +11 -9
- limits/typing.py +45 -42
- limits/util.py +12 -12
- {limits-4.1.dist-info → limits-4.3.dist-info}/METADATA +52 -36
- limits-4.3.dist-info/RECORD +43 -0
- {limits-4.1.dist-info → limits-4.3.dist-info}/WHEEL +1 -1
- limits/aio/storage/redis.py +0 -555
- limits-4.1.dist-info/RECORD +0 -39
- {limits-4.1.dist-info → limits-4.3.dist-info}/LICENSE.txt +0 -0
- {limits-4.1.dist-info → limits-4.3.dist-info}/top_level.txt +0 -0
|
@@ -0,0 +1,402 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
from deprecated.sphinx import versionadded, versionchanged
|
|
4
|
+
from packaging.version import Version
|
|
5
|
+
|
|
6
|
+
from limits.aio.storage import MovingWindowSupport, SlidingWindowCounterSupport, Storage
|
|
7
|
+
from limits.aio.storage.redis.bridge import RedisBridge
|
|
8
|
+
from limits.aio.storage.redis.coredis import CoredisBridge
|
|
9
|
+
from limits.aio.storage.redis.redispy import RedispyBridge
|
|
10
|
+
from limits.aio.storage.redis.valkey import ValkeyBridge
|
|
11
|
+
from limits.typing import Literal
|
|
12
|
+
|
|
13
|
+
|
|
14
|
+
@versionadded(version="2.1")
|
|
15
|
+
@versionchanged(
|
|
16
|
+
version="4.2",
|
|
17
|
+
reason=(
|
|
18
|
+
"Added support for using the asyncio redis client from :pypi:`redis`"
|
|
19
|
+
" through :paramref:`implementation`"
|
|
20
|
+
),
|
|
21
|
+
)
|
|
22
|
+
@versionchanged(
|
|
23
|
+
version="4.3",
|
|
24
|
+
reason=(
|
|
25
|
+
"Added support for using the asyncio redis client from :pypi:`valkey`"
|
|
26
|
+
" through :paramref:`implementation` or if :paramref:`uri` has the"
|
|
27
|
+
" ``async+valkey`` schema"
|
|
28
|
+
),
|
|
29
|
+
)
|
|
30
|
+
class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
|
|
31
|
+
"""
|
|
32
|
+
Rate limit storage with redis as backend.
|
|
33
|
+
|
|
34
|
+
Depends on :pypi:`coredis` or :pypi:`redis`
|
|
35
|
+
"""
|
|
36
|
+
|
|
37
|
+
STORAGE_SCHEME = [
|
|
38
|
+
"async+redis",
|
|
39
|
+
"async+rediss",
|
|
40
|
+
"async+redis+unix",
|
|
41
|
+
"async+valkey",
|
|
42
|
+
"async+valkeys",
|
|
43
|
+
"async+valkey+unix",
|
|
44
|
+
]
|
|
45
|
+
"""
|
|
46
|
+
The storage schemes for redis to be used in an async context
|
|
47
|
+
"""
|
|
48
|
+
DEPENDENCIES = {
|
|
49
|
+
"redis": Version("5.2.0"),
|
|
50
|
+
"coredis": Version("3.4.0"),
|
|
51
|
+
"valkey": Version("6.0"),
|
|
52
|
+
}
|
|
53
|
+
MODE: Literal["BASIC", "CLUSTER", "SENTINEL"] = "BASIC"
|
|
54
|
+
bridge: RedisBridge
|
|
55
|
+
storage_exceptions: tuple[Exception, ...]
|
|
56
|
+
target_server: Literal["redis", "valkey"]
|
|
57
|
+
|
|
58
|
+
def __init__(
|
|
59
|
+
self,
|
|
60
|
+
uri: str,
|
|
61
|
+
wrap_exceptions: bool = False,
|
|
62
|
+
implementation: Literal["redispy", "coredis", "valkey"] = "coredis",
|
|
63
|
+
**options: float | str | bool,
|
|
64
|
+
) -> None:
|
|
65
|
+
"""
|
|
66
|
+
:param uri: uri of the form:
|
|
67
|
+
|
|
68
|
+
- ``async+redis://[:password]@host:port``
|
|
69
|
+
- ``async+redis://[:password]@host:port/db``
|
|
70
|
+
- ``async+rediss://[:password]@host:port``
|
|
71
|
+
- ``async+redis+unix:///path/to/sock?db=0`` etc...
|
|
72
|
+
|
|
73
|
+
This uri is passed directly to :meth:`coredis.Redis.from_url` or
|
|
74
|
+
:meth:`redis.asyncio.client.Redis.from_url` with the initial ``async`` removed,
|
|
75
|
+
except for the case of ``async+redis+unix`` where it is replaced with ``unix``.
|
|
76
|
+
|
|
77
|
+
If the uri scheme is ``async+valkey`` the implementation used will be from
|
|
78
|
+
:pypi:`valkey`.
|
|
79
|
+
:param connection_pool: if provided, the redis client is initialized with
|
|
80
|
+
the connection pool and any other params passed as :paramref:`options`
|
|
81
|
+
:param wrap_exceptions: Whether to wrap storage exceptions in
|
|
82
|
+
:exc:`limits.errors.StorageError` before raising it.
|
|
83
|
+
:param implementation: Whether to use the client implementation from
|
|
84
|
+
|
|
85
|
+
- ``coredis``: :class:`coredis.Redis`
|
|
86
|
+
- ``redispy``: :class:`redis.asyncio.client.Redis`
|
|
87
|
+
- ``valkey``: :class:`valkey.asyncio.client.Valkey`
|
|
88
|
+
|
|
89
|
+
:param options: all remaining keyword arguments are passed
|
|
90
|
+
directly to the constructor of :class:`coredis.Redis` or :class:`redis.asyncio.client.Redis`
|
|
91
|
+
:raise ConfigurationError: when the redis library is not available
|
|
92
|
+
"""
|
|
93
|
+
uri = uri.removeprefix("async+")
|
|
94
|
+
self.target_server = "redis" if uri.startswith("redis") else "valkey"
|
|
95
|
+
uri = uri.replace(f"{self.target_server}+unix", "unix")
|
|
96
|
+
|
|
97
|
+
super().__init__(uri, wrap_exceptions=wrap_exceptions)
|
|
98
|
+
self.options = options
|
|
99
|
+
if self.target_server == "valkey" or implementation == "valkey":
|
|
100
|
+
self.bridge = ValkeyBridge(uri, self.dependencies["valkey"].module)
|
|
101
|
+
else:
|
|
102
|
+
if implementation == "redispy":
|
|
103
|
+
self.bridge = RedispyBridge(uri, self.dependencies["redis"].module)
|
|
104
|
+
else:
|
|
105
|
+
self.bridge = CoredisBridge(uri, self.dependencies["coredis"].module)
|
|
106
|
+
self.configure_bridge()
|
|
107
|
+
self.bridge.register_scripts()
|
|
108
|
+
|
|
109
|
+
def _current_window_key(self, key: str) -> str:
|
|
110
|
+
"""
|
|
111
|
+
Return the current window's storage key (Sliding window strategy)
|
|
112
|
+
|
|
113
|
+
Contrary to other strategies that have one key per rate limit item,
|
|
114
|
+
this strategy has two keys per rate limit item than must be on the same machine.
|
|
115
|
+
To keep the current key and the previous key on the same Redis cluster node,
|
|
116
|
+
curly braces are added.
|
|
117
|
+
|
|
118
|
+
Eg: "{constructed_key}"
|
|
119
|
+
"""
|
|
120
|
+
return f"{{{key}}}"
|
|
121
|
+
|
|
122
|
+
def _previous_window_key(self, key: str) -> str:
|
|
123
|
+
"""
|
|
124
|
+
Return the previous window's storage key (Sliding window strategy).
|
|
125
|
+
|
|
126
|
+
Curvy braces are added on the common pattern with the current window's key,
|
|
127
|
+
so the current and the previous key are stored on the same Redis cluster node.
|
|
128
|
+
|
|
129
|
+
Eg: "{constructed_key}/-1"
|
|
130
|
+
"""
|
|
131
|
+
return f"{self._current_window_key(key)}/-1"
|
|
132
|
+
|
|
133
|
+
def configure_bridge(self) -> None:
|
|
134
|
+
self.bridge.use_basic(**self.options)
|
|
135
|
+
|
|
136
|
+
@property
|
|
137
|
+
def base_exceptions(
|
|
138
|
+
self,
|
|
139
|
+
) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover
|
|
140
|
+
return self.bridge.base_exceptions
|
|
141
|
+
|
|
142
|
+
async def incr(
|
|
143
|
+
self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1
|
|
144
|
+
) -> int:
|
|
145
|
+
"""
|
|
146
|
+
increments the counter for a given rate limit key
|
|
147
|
+
|
|
148
|
+
:param key: the key to increment
|
|
149
|
+
:param expiry: amount in seconds for the key to expire in
|
|
150
|
+
:param amount: the number to increment by
|
|
151
|
+
"""
|
|
152
|
+
|
|
153
|
+
return await self.bridge.incr(key, expiry, elastic_expiry, amount)
|
|
154
|
+
|
|
155
|
+
async def get(self, key: str) -> int:
|
|
156
|
+
"""
|
|
157
|
+
:param key: the key to get the counter value for
|
|
158
|
+
"""
|
|
159
|
+
|
|
160
|
+
return await self.bridge.get(key)
|
|
161
|
+
|
|
162
|
+
async def clear(self, key: str) -> None:
|
|
163
|
+
"""
|
|
164
|
+
:param key: the key to clear rate limits for
|
|
165
|
+
"""
|
|
166
|
+
|
|
167
|
+
return await self.bridge.clear(key)
|
|
168
|
+
|
|
169
|
+
async def acquire_entry(
|
|
170
|
+
self, key: str, limit: int, expiry: int, amount: int = 1
|
|
171
|
+
) -> bool:
|
|
172
|
+
"""
|
|
173
|
+
:param key: rate limit key to acquire an entry in
|
|
174
|
+
:param limit: amount of entries allowed
|
|
175
|
+
:param expiry: expiry of the entry
|
|
176
|
+
:param amount: the number of entries to acquire
|
|
177
|
+
"""
|
|
178
|
+
|
|
179
|
+
return await self.bridge.acquire_entry(key, limit, expiry, amount)
|
|
180
|
+
|
|
181
|
+
async def get_moving_window(
|
|
182
|
+
self, key: str, limit: int, expiry: int
|
|
183
|
+
) -> tuple[float, int]:
|
|
184
|
+
"""
|
|
185
|
+
returns the starting point and the number of entries in the moving
|
|
186
|
+
window
|
|
187
|
+
|
|
188
|
+
:param key: rate limit key
|
|
189
|
+
:param expiry: expiry of entry
|
|
190
|
+
:return: (previous count, previous TTL, current count, current TTL)
|
|
191
|
+
"""
|
|
192
|
+
return await self.bridge.get_moving_window(key, limit, expiry)
|
|
193
|
+
|
|
194
|
+
async def acquire_sliding_window_entry(
|
|
195
|
+
self,
|
|
196
|
+
key: str,
|
|
197
|
+
limit: int,
|
|
198
|
+
expiry: int,
|
|
199
|
+
amount: int = 1,
|
|
200
|
+
) -> bool:
|
|
201
|
+
current_key = self._current_window_key(key)
|
|
202
|
+
previous_key = self._previous_window_key(key)
|
|
203
|
+
return await self.bridge.acquire_sliding_window_entry(
|
|
204
|
+
previous_key, current_key, limit, expiry, amount
|
|
205
|
+
)
|
|
206
|
+
|
|
207
|
+
async def get_sliding_window(
|
|
208
|
+
self, key: str, expiry: int
|
|
209
|
+
) -> tuple[int, float, int, float]:
|
|
210
|
+
previous_key = self._previous_window_key(key)
|
|
211
|
+
current_key = self._current_window_key(key)
|
|
212
|
+
return await self.bridge.get_sliding_window(previous_key, current_key, expiry)
|
|
213
|
+
|
|
214
|
+
async def get_expiry(self, key: str) -> float:
|
|
215
|
+
"""
|
|
216
|
+
:param key: the key to get the expiry for
|
|
217
|
+
"""
|
|
218
|
+
|
|
219
|
+
return await self.bridge.get_expiry(key)
|
|
220
|
+
|
|
221
|
+
async def check(self) -> bool:
|
|
222
|
+
"""
|
|
223
|
+
Check if storage is healthy by calling ``PING``
|
|
224
|
+
"""
|
|
225
|
+
|
|
226
|
+
return await self.bridge.check()
|
|
227
|
+
|
|
228
|
+
async def reset(self) -> int | None:
|
|
229
|
+
"""
|
|
230
|
+
This function calls a Lua Script to delete keys prefixed with
|
|
231
|
+
``self.PREFIX`` in blocks of 5000.
|
|
232
|
+
|
|
233
|
+
.. warning:: This operation was designed to be fast, but was not tested
|
|
234
|
+
on a large production based system. Be careful with its usage as it
|
|
235
|
+
could be slow on very large data sets.
|
|
236
|
+
"""
|
|
237
|
+
|
|
238
|
+
return await self.bridge.lua_reset()
|
|
239
|
+
|
|
240
|
+
|
|
241
|
+
@versionadded(version="2.1")
|
|
242
|
+
@versionchanged(
|
|
243
|
+
version="4.2",
|
|
244
|
+
reason="Added support for using the asyncio redis client from :pypi:`redis` ",
|
|
245
|
+
)
|
|
246
|
+
@versionchanged(
|
|
247
|
+
version="4.3",
|
|
248
|
+
reason=(
|
|
249
|
+
"Added support for using the asyncio redis client from :pypi:`valkey`"
|
|
250
|
+
" through :paramref:`implementation` or if :paramref:`uri` has the"
|
|
251
|
+
" ``async+valkey+cluster`` schema"
|
|
252
|
+
),
|
|
253
|
+
)
|
|
254
|
+
class RedisClusterStorage(RedisStorage):
|
|
255
|
+
"""
|
|
256
|
+
Rate limit storage with redis cluster as backend
|
|
257
|
+
|
|
258
|
+
Depends on :pypi:`coredis` or :pypi:`redis`
|
|
259
|
+
"""
|
|
260
|
+
|
|
261
|
+
STORAGE_SCHEME = ["async+redis+cluster", "async+valkey+cluster"]
|
|
262
|
+
"""
|
|
263
|
+
The storage schemes for redis cluster to be used in an async context
|
|
264
|
+
"""
|
|
265
|
+
|
|
266
|
+
MODE = "CLUSTER"
|
|
267
|
+
|
|
268
|
+
def __init__(
|
|
269
|
+
self,
|
|
270
|
+
uri: str,
|
|
271
|
+
wrap_exceptions: bool = False,
|
|
272
|
+
implementation: Literal["redispy", "coredis", "valkey"] = "coredis",
|
|
273
|
+
**options: float | str | bool,
|
|
274
|
+
) -> None:
|
|
275
|
+
"""
|
|
276
|
+
:param uri: url of the form
|
|
277
|
+
``async+redis+cluster://[:password]@host:port,host:port``
|
|
278
|
+
|
|
279
|
+
If the uri scheme is ``async+valkey+cluster`` the implementation used will be from
|
|
280
|
+
:pypi:`valkey`.
|
|
281
|
+
:param wrap_exceptions: Whether to wrap storage exceptions in
|
|
282
|
+
:exc:`limits.errors.StorageError` before raising it.
|
|
283
|
+
:param implementation: Whether to use the client implementation from
|
|
284
|
+
|
|
285
|
+
- ``coredis``: :class:`coredis.RedisCluster`
|
|
286
|
+
- ``redispy``: :class:`redis.asyncio.cluster.RedisCluster`
|
|
287
|
+
- ``valkey``: :class:`valkey.asyncio.cluster.ValkeyCluster`
|
|
288
|
+
:param options: all remaining keyword arguments are passed
|
|
289
|
+
directly to the constructor of :class:`coredis.RedisCluster` or
|
|
290
|
+
:class:`redis.asyncio.RedisCluster`
|
|
291
|
+
:raise ConfigurationError: when the redis library is not
|
|
292
|
+
available or if the redis host cannot be pinged.
|
|
293
|
+
"""
|
|
294
|
+
super().__init__(
|
|
295
|
+
uri,
|
|
296
|
+
wrap_exceptions=wrap_exceptions,
|
|
297
|
+
implementation=implementation,
|
|
298
|
+
**options,
|
|
299
|
+
)
|
|
300
|
+
|
|
301
|
+
def configure_bridge(self) -> None:
|
|
302
|
+
self.bridge.use_cluster(**self.options)
|
|
303
|
+
|
|
304
|
+
async def reset(self) -> int | None:
|
|
305
|
+
"""
|
|
306
|
+
Redis Clusters are sharded and deleting across shards
|
|
307
|
+
can't be done atomically. Because of this, this reset loops over all
|
|
308
|
+
keys that are prefixed with ``self.PREFIX`` and calls delete on them,
|
|
309
|
+
one at a time.
|
|
310
|
+
|
|
311
|
+
.. warning:: This operation was not tested with extremely large data sets.
|
|
312
|
+
On a large production based system, care should be taken with its
|
|
313
|
+
usage as it could be slow on very large data sets
|
|
314
|
+
"""
|
|
315
|
+
|
|
316
|
+
return await self.bridge.reset()
|
|
317
|
+
|
|
318
|
+
|
|
319
|
+
@versionadded(version="2.1")
|
|
320
|
+
@versionchanged(
|
|
321
|
+
version="4.2",
|
|
322
|
+
reason="Added support for using the asyncio redis client from :pypi:`redis` ",
|
|
323
|
+
)
|
|
324
|
+
@versionchanged(
|
|
325
|
+
version="4.3",
|
|
326
|
+
reason=(
|
|
327
|
+
"Added support for using the asyncio redis client from :pypi:`valkey`"
|
|
328
|
+
" through :paramref:`implementation` or if :paramref:`uri` has the"
|
|
329
|
+
" ``async+valkey+sentinel`` schema"
|
|
330
|
+
),
|
|
331
|
+
)
|
|
332
|
+
class RedisSentinelStorage(RedisStorage):
|
|
333
|
+
"""
|
|
334
|
+
Rate limit storage with redis sentinel as backend
|
|
335
|
+
|
|
336
|
+
Depends on :pypi:`coredis` or :pypi:`redis`
|
|
337
|
+
"""
|
|
338
|
+
|
|
339
|
+
STORAGE_SCHEME = [
|
|
340
|
+
"async+redis+sentinel",
|
|
341
|
+
"async+valkey+sentinel",
|
|
342
|
+
]
|
|
343
|
+
"""The storage scheme for redis accessed via a redis sentinel installation"""
|
|
344
|
+
|
|
345
|
+
MODE = "SENTINEL"
|
|
346
|
+
|
|
347
|
+
DEPENDENCIES = {
|
|
348
|
+
"redis": Version("5.2.0"),
|
|
349
|
+
"coredis": Version("3.4.0"),
|
|
350
|
+
"coredis.sentinel": Version("3.4.0"),
|
|
351
|
+
"valkey": Version("6.0"),
|
|
352
|
+
}
|
|
353
|
+
|
|
354
|
+
def __init__(
|
|
355
|
+
self,
|
|
356
|
+
uri: str,
|
|
357
|
+
wrap_exceptions: bool = False,
|
|
358
|
+
implementation: Literal["redispy", "coredis", "valkey"] = "coredis",
|
|
359
|
+
service_name: str | None = None,
|
|
360
|
+
use_replicas: bool = True,
|
|
361
|
+
sentinel_kwargs: dict[str, float | str | bool] | None = None,
|
|
362
|
+
**options: float | str | bool,
|
|
363
|
+
):
|
|
364
|
+
"""
|
|
365
|
+
:param uri: url of the form
|
|
366
|
+
``async+redis+sentinel://host:port,host:port/service_name``
|
|
367
|
+
|
|
368
|
+
If the uri schema is ``async+valkey+sentinel`` the implementation used will be from
|
|
369
|
+
:pypi:`valkey`.
|
|
370
|
+
:param wrap_exceptions: Whether to wrap storage exceptions in
|
|
371
|
+
:exc:`limits.errors.StorageError` before raising it.
|
|
372
|
+
:param implementation: Whether to use the client implementation from
|
|
373
|
+
|
|
374
|
+
- ``coredis``: :class:`coredis.sentinel.Sentinel`
|
|
375
|
+
- ``redispy``: :class:`redis.asyncio.sentinel.Sentinel`
|
|
376
|
+
- ``valkey``: :class:`valkey.asyncio.sentinel.Sentinel`
|
|
377
|
+
:param service_name: sentinel service name (if not provided in `uri`)
|
|
378
|
+
:param use_replicas: Whether to use replicas for read only operations
|
|
379
|
+
:param sentinel_kwargs: optional arguments to pass as
|
|
380
|
+
`sentinel_kwargs`` to :class:`coredis.sentinel.Sentinel` or
|
|
381
|
+
:class:`redis.asyncio.Sentinel`
|
|
382
|
+
:param options: all remaining keyword arguments are passed
|
|
383
|
+
directly to the constructor of :class:`coredis.sentinel.Sentinel` or
|
|
384
|
+
:class:`redis.asyncio.sentinel.Sentinel`
|
|
385
|
+
:raise ConfigurationError: when the redis library is not available
|
|
386
|
+
or if the redis primary host cannot be pinged.
|
|
387
|
+
"""
|
|
388
|
+
|
|
389
|
+
self.service_name = service_name
|
|
390
|
+
self.use_replicas = use_replicas
|
|
391
|
+
self.sentinel_kwargs = sentinel_kwargs
|
|
392
|
+
super().__init__(
|
|
393
|
+
uri,
|
|
394
|
+
wrap_exceptions=wrap_exceptions,
|
|
395
|
+
implementation=implementation,
|
|
396
|
+
**options,
|
|
397
|
+
)
|
|
398
|
+
|
|
399
|
+
def configure_bridge(self) -> None:
|
|
400
|
+
self.bridge.use_sentinel(
|
|
401
|
+
self.service_name, self.use_replicas, self.sentinel_kwargs, **self.options
|
|
402
|
+
)
|
|
@@ -0,0 +1,120 @@
|
|
|
1
|
+
from __future__ import annotations
|
|
2
|
+
|
|
3
|
+
import urllib
|
|
4
|
+
from abc import ABC, abstractmethod
|
|
5
|
+
from types import ModuleType
|
|
6
|
+
|
|
7
|
+
from limits.util import get_package_data
|
|
8
|
+
|
|
9
|
+
|
|
10
|
+
class RedisBridge(ABC):
|
|
11
|
+
PREFIX = "LIMITS"
|
|
12
|
+
RES_DIR = "resources/redis/lua_scripts"
|
|
13
|
+
|
|
14
|
+
SCRIPT_MOVING_WINDOW = get_package_data(f"{RES_DIR}/moving_window.lua")
|
|
15
|
+
SCRIPT_ACQUIRE_MOVING_WINDOW = get_package_data(
|
|
16
|
+
f"{RES_DIR}/acquire_moving_window.lua"
|
|
17
|
+
)
|
|
18
|
+
SCRIPT_CLEAR_KEYS = get_package_data(f"{RES_DIR}/clear_keys.lua")
|
|
19
|
+
SCRIPT_INCR_EXPIRE = get_package_data(f"{RES_DIR}/incr_expire.lua")
|
|
20
|
+
SCRIPT_SLIDING_WINDOW = get_package_data(f"{RES_DIR}/sliding_window.lua")
|
|
21
|
+
SCRIPT_ACQUIRE_SLIDING_WINDOW = get_package_data(
|
|
22
|
+
f"{RES_DIR}/acquire_sliding_window.lua"
|
|
23
|
+
)
|
|
24
|
+
|
|
25
|
+
def __init__(
|
|
26
|
+
self,
|
|
27
|
+
uri: str,
|
|
28
|
+
dependency: ModuleType,
|
|
29
|
+
) -> None:
|
|
30
|
+
self.uri = uri
|
|
31
|
+
self.parsed_uri = urllib.parse.urlparse(self.uri)
|
|
32
|
+
self.dependency = dependency
|
|
33
|
+
self.parsed_auth = {}
|
|
34
|
+
if self.parsed_uri.username:
|
|
35
|
+
self.parsed_auth["username"] = self.parsed_uri.username
|
|
36
|
+
if self.parsed_uri.password:
|
|
37
|
+
self.parsed_auth["password"] = self.parsed_uri.password
|
|
38
|
+
|
|
39
|
+
def prefixed_key(self, key: str) -> str:
|
|
40
|
+
return f"{self.PREFIX}:{key}"
|
|
41
|
+
|
|
42
|
+
@abstractmethod
|
|
43
|
+
def register_scripts(self) -> None: ...
|
|
44
|
+
|
|
45
|
+
@abstractmethod
|
|
46
|
+
def use_sentinel(
|
|
47
|
+
self,
|
|
48
|
+
service_name: str | None,
|
|
49
|
+
use_replicas: bool,
|
|
50
|
+
sentinel_kwargs: dict[str, str | float | bool] | None,
|
|
51
|
+
**options: str | float | bool,
|
|
52
|
+
) -> None: ...
|
|
53
|
+
|
|
54
|
+
@abstractmethod
|
|
55
|
+
def use_basic(self, **options: str | float | bool) -> None: ...
|
|
56
|
+
|
|
57
|
+
@abstractmethod
|
|
58
|
+
def use_cluster(self, **options: str | float | bool) -> None: ...
|
|
59
|
+
|
|
60
|
+
@property
|
|
61
|
+
@abstractmethod
|
|
62
|
+
def base_exceptions(
|
|
63
|
+
self,
|
|
64
|
+
) -> type[Exception] | tuple[type[Exception], ...]: ...
|
|
65
|
+
|
|
66
|
+
@abstractmethod
|
|
67
|
+
async def incr(
|
|
68
|
+
self,
|
|
69
|
+
key: str,
|
|
70
|
+
expiry: int,
|
|
71
|
+
elastic_expiry: bool = False,
|
|
72
|
+
amount: int = 1,
|
|
73
|
+
) -> int: ...
|
|
74
|
+
|
|
75
|
+
@abstractmethod
|
|
76
|
+
async def get(self, key: str) -> int: ...
|
|
77
|
+
|
|
78
|
+
@abstractmethod
|
|
79
|
+
async def clear(self, key: str) -> None: ...
|
|
80
|
+
|
|
81
|
+
@abstractmethod
|
|
82
|
+
async def get_moving_window(
|
|
83
|
+
self, key: str, limit: int, expiry: int
|
|
84
|
+
) -> tuple[float, int]: ...
|
|
85
|
+
|
|
86
|
+
@abstractmethod
|
|
87
|
+
async def get_sliding_window(
|
|
88
|
+
self, previous_key: str, current_key: str, expiry: int
|
|
89
|
+
) -> tuple[int, float, int, float]: ...
|
|
90
|
+
|
|
91
|
+
@abstractmethod
|
|
92
|
+
async def acquire_entry(
|
|
93
|
+
self,
|
|
94
|
+
key: str,
|
|
95
|
+
limit: int,
|
|
96
|
+
expiry: int,
|
|
97
|
+
amount: int = 1,
|
|
98
|
+
) -> bool: ...
|
|
99
|
+
|
|
100
|
+
@abstractmethod
|
|
101
|
+
async def acquire_sliding_window_entry(
|
|
102
|
+
self,
|
|
103
|
+
previous_key: str,
|
|
104
|
+
current_key: str,
|
|
105
|
+
limit: int,
|
|
106
|
+
expiry: int,
|
|
107
|
+
amount: int = 1,
|
|
108
|
+
) -> bool: ...
|
|
109
|
+
|
|
110
|
+
@abstractmethod
|
|
111
|
+
async def get_expiry(self, key: str) -> float: ...
|
|
112
|
+
|
|
113
|
+
@abstractmethod
|
|
114
|
+
async def check(self) -> bool: ...
|
|
115
|
+
|
|
116
|
+
@abstractmethod
|
|
117
|
+
async def reset(self) -> int | None: ...
|
|
118
|
+
|
|
119
|
+
@abstractmethod
|
|
120
|
+
async def lua_reset(self) -> int | None: ...
|