limits 4.0.1__py3-none-any.whl → 4.2__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (39) hide show
  1. limits/__init__.py +3 -1
  2. limits/_version.py +4 -4
  3. limits/aio/__init__.py +2 -0
  4. limits/aio/storage/__init__.py +4 -1
  5. limits/aio/storage/base.py +70 -24
  6. limits/aio/storage/etcd.py +8 -2
  7. limits/aio/storage/memcached.py +159 -33
  8. limits/aio/storage/memory.py +100 -13
  9. limits/aio/storage/mongodb.py +217 -9
  10. limits/aio/storage/redis/__init__.py +341 -0
  11. limits/aio/storage/redis/bridge.py +121 -0
  12. limits/aio/storage/redis/coredis.py +209 -0
  13. limits/aio/storage/redis/redispy.py +257 -0
  14. limits/aio/strategies.py +124 -1
  15. limits/errors.py +2 -0
  16. limits/limits.py +10 -11
  17. limits/resources/redis/lua_scripts/acquire_sliding_window.lua +45 -0
  18. limits/resources/redis/lua_scripts/sliding_window.lua +17 -0
  19. limits/storage/__init__.py +6 -3
  20. limits/storage/base.py +92 -24
  21. limits/storage/etcd.py +8 -2
  22. limits/storage/memcached.py +143 -34
  23. limits/storage/memory.py +99 -12
  24. limits/storage/mongodb.py +204 -11
  25. limits/storage/redis.py +159 -138
  26. limits/storage/redis_cluster.py +5 -3
  27. limits/storage/redis_sentinel.py +14 -35
  28. limits/storage/registry.py +3 -3
  29. limits/strategies.py +121 -5
  30. limits/typing.py +55 -19
  31. limits/util.py +29 -18
  32. limits-4.2.dist-info/METADATA +268 -0
  33. limits-4.2.dist-info/RECORD +42 -0
  34. limits/aio/storage/redis.py +0 -470
  35. limits-4.0.1.dist-info/METADATA +0 -192
  36. limits-4.0.1.dist-info/RECORD +0 -37
  37. {limits-4.0.1.dist-info → limits-4.2.dist-info}/LICENSE.txt +0 -0
  38. {limits-4.0.1.dist-info → limits-4.2.dist-info}/WHEEL +0 -0
  39. {limits-4.0.1.dist-info → limits-4.2.dist-info}/top_level.txt +0 -0
@@ -0,0 +1,341 @@
1
+ from __future__ import annotations
2
+
3
+ from typing import Optional, Type, Union
4
+
5
+ from deprecated.sphinx import versionadded, versionchanged
6
+ from packaging.version import Version
7
+
8
+ from limits.aio.storage import MovingWindowSupport, SlidingWindowCounterSupport, Storage
9
+ from limits.aio.storage.redis.bridge import RedisBridge
10
+ from limits.aio.storage.redis.coredis import CoredisBridge
11
+ from limits.aio.storage.redis.redispy import RedispyBridge
12
+ from limits.typing import Literal
13
+
14
+
15
+ @versionadded(version="2.1")
16
+ @versionchanged(
17
+ version="4.2",
18
+ reason=(
19
+ "Added support for using the asyncio redis client from :pypi:`redis`"
20
+ " through :paramref:`implementation`"
21
+ ),
22
+ )
23
+ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
24
+ """
25
+ Rate limit storage with redis as backend.
26
+
27
+ Depends on :pypi:`coredis` or :pypi:`redis`
28
+ """
29
+
30
+ STORAGE_SCHEME = ["async+redis", "async+rediss", "async+redis+unix"]
31
+ """
32
+ The storage schemes for redis to be used in an async context
33
+ """
34
+ DEPENDENCIES = {"redis": Version("5.2.0"), "coredis": Version("3.4.0")}
35
+ MODE: Literal["BASIC", "CLUSTER", "SENTINEL"] = "BASIC"
36
+ bridge: RedisBridge
37
+ storage_exceptions: tuple[Exception, ...]
38
+
39
+ def __init__(
40
+ self,
41
+ uri: str,
42
+ wrap_exceptions: bool = False,
43
+ implementation: Literal["redispy", "coredis"] = "coredis",
44
+ **options: Union[float, str, bool],
45
+ ) -> None:
46
+ """
47
+ :param uri: uri of the form:
48
+
49
+ - ``async+redis://[:password]@host:port``
50
+ - ``async+redis://[:password]@host:port/db``
51
+ - ``async+rediss://[:password]@host:port``
52
+ - ``async+redis+unix:///path/to/sock?db=0`` etc...
53
+
54
+ This uri is passed directly to :meth:`coredis.Redis.from_url` or
55
+ :meth:`redis.asyncio.client.Redis.from_url` with the initial ``async`` removed,
56
+ except for the case of ``async+redis+unix`` where it is replaced with ``unix``.
57
+ :param connection_pool: if provided, the redis client is initialized with
58
+ the connection pool and any other params passed as :paramref:`options`
59
+ :param wrap_exceptions: Whether to wrap storage exceptions in
60
+ :exc:`limits.errors.StorageError` before raising it.
61
+ :param implementation: Whether to use the client implementation from
62
+ :class:`coredis.Redis` (``coredis``) or :class:`redis.asyncio.client.Redis` (``redispy``).
63
+ :param options: all remaining keyword arguments are passed
64
+ directly to the constructor of :class:`coredis.Redis` or :class:`redis.asyncio.client.Redis`
65
+ :raise ConfigurationError: when the redis library is not available
66
+ """
67
+ uri = uri.replace("async+redis", "redis", 1)
68
+ uri = uri.replace("redis+unix", "unix")
69
+
70
+ super().__init__(uri, wrap_exceptions=wrap_exceptions)
71
+ self.options = options
72
+ if implementation == "redispy":
73
+ self.bridge = RedispyBridge(uri, self.dependencies["redis"].module)
74
+ else:
75
+ self.bridge = CoredisBridge(uri, self.dependencies["coredis"].module)
76
+ self.configure_bridge()
77
+ self.bridge.register_scripts()
78
+
79
+ def _current_window_key(self, key: str) -> str:
80
+ """
81
+ Return the current window's storage key (Sliding window strategy)
82
+
83
+ Contrary to other strategies that have one key per rate limit item,
84
+ this strategy has two keys per rate limit item than must be on the same machine.
85
+ To keep the current key and the previous key on the same Redis cluster node,
86
+ curly braces are added.
87
+
88
+ Eg: "{constructed_key}"
89
+ """
90
+ return f"{{{key}}}"
91
+
92
+ def _previous_window_key(self, key: str) -> str:
93
+ """
94
+ Return the previous window's storage key (Sliding window strategy).
95
+
96
+ Curvy braces are added on the common pattern with the current window's key,
97
+ so the current and the previous key are stored on the same Redis cluster node.
98
+
99
+ Eg: "{constructed_key}/-1"
100
+ """
101
+ return f"{self._current_window_key(key)}/-1"
102
+
103
+ def configure_bridge(self) -> None:
104
+ self.bridge.use_basic(**self.options)
105
+
106
+ @property
107
+ def base_exceptions(
108
+ self,
109
+ ) -> Union[Type[Exception], tuple[Type[Exception], ...]]: # pragma: no cover
110
+ return self.bridge.base_exceptions
111
+
112
+ async def incr(
113
+ self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1
114
+ ) -> int:
115
+ """
116
+ increments the counter for a given rate limit key
117
+
118
+ :param key: the key to increment
119
+ :param expiry: amount in seconds for the key to expire in
120
+ :param amount: the number to increment by
121
+ """
122
+
123
+ return await self.bridge.incr(key, expiry, elastic_expiry, amount)
124
+
125
+ async def get(self, key: str) -> int:
126
+ """
127
+ :param key: the key to get the counter value for
128
+ """
129
+
130
+ return await self.bridge.get(key)
131
+
132
+ async def clear(self, key: str) -> None:
133
+ """
134
+ :param key: the key to clear rate limits for
135
+ """
136
+
137
+ return await self.bridge.clear(key)
138
+
139
+ async def acquire_entry(
140
+ self, key: str, limit: int, expiry: int, amount: int = 1
141
+ ) -> bool:
142
+ """
143
+ :param key: rate limit key to acquire an entry in
144
+ :param limit: amount of entries allowed
145
+ :param expiry: expiry of the entry
146
+ :param amount: the number of entries to acquire
147
+ """
148
+
149
+ return await self.bridge.acquire_entry(key, limit, expiry, amount)
150
+
151
+ async def get_moving_window(
152
+ self, key: str, limit: int, expiry: int
153
+ ) -> tuple[float, int]:
154
+ """
155
+ returns the starting point and the number of entries in the moving
156
+ window
157
+
158
+ :param key: rate limit key
159
+ :param expiry: expiry of entry
160
+ :return: (previous count, previous TTL, current count, current TTL)
161
+ """
162
+ return await self.bridge.get_moving_window(key, limit, expiry)
163
+
164
+ async def acquire_sliding_window_entry(
165
+ self,
166
+ key: str,
167
+ limit: int,
168
+ expiry: int,
169
+ amount: int = 1,
170
+ ) -> bool:
171
+ current_key = self._current_window_key(key)
172
+ previous_key = self._previous_window_key(key)
173
+ return await self.bridge.acquire_sliding_window_entry(
174
+ previous_key, current_key, limit, expiry, amount
175
+ )
176
+
177
+ async def get_sliding_window(
178
+ self, key: str, expiry: int
179
+ ) -> tuple[int, float, int, float]:
180
+ previous_key = self._previous_window_key(key)
181
+ current_key = self._current_window_key(key)
182
+ return await self.bridge.get_sliding_window(previous_key, current_key, expiry)
183
+
184
+ async def get_expiry(self, key: str) -> float:
185
+ """
186
+ :param key: the key to get the expiry for
187
+ """
188
+
189
+ return await self.bridge.get_expiry(key)
190
+
191
+ async def check(self) -> bool:
192
+ """
193
+ Check if storage is healthy by calling ``PING``
194
+ """
195
+
196
+ return await self.bridge.check()
197
+
198
+ async def reset(self) -> Optional[int]:
199
+ """
200
+ This function calls a Lua Script to delete keys prefixed with
201
+ ``self.PREFIX`` in blocks of 5000.
202
+
203
+ .. warning:: This operation was designed to be fast, but was not tested
204
+ on a large production based system. Be careful with its usage as it
205
+ could be slow on very large data sets.
206
+ """
207
+
208
+ return await self.bridge.lua_reset()
209
+
210
+
211
+ @versionadded(version="2.1")
212
+ @versionchanged(
213
+ version="4.2",
214
+ reason="Added support for using the asyncio redis client from :pypi:`redis` ",
215
+ )
216
+ class RedisClusterStorage(RedisStorage):
217
+ """
218
+ Rate limit storage with redis cluster as backend
219
+
220
+ Depends on :pypi:`coredis` or :pypi:`redis`
221
+ """
222
+
223
+ STORAGE_SCHEME = ["async+redis+cluster"]
224
+ """
225
+ The storage schemes for redis cluster to be used in an async context
226
+ """
227
+
228
+ MODE = "CLUSTER"
229
+
230
+ def __init__(
231
+ self,
232
+ uri: str,
233
+ wrap_exceptions: bool = False,
234
+ implementation: Literal["redispy", "coredis"] = "coredis",
235
+ **options: Union[float, str, bool],
236
+ ) -> None:
237
+ """
238
+ :param uri: url of the form
239
+ ``async+redis+cluster://[:password]@host:port,host:port``
240
+ :param wrap_exceptions: Whether to wrap storage exceptions in
241
+ :exc:`limits.errors.StorageError` before raising it.
242
+ :param implementation: Whether to use the client implementation from
243
+ :class:`coredis.RedisCluster` (``coredis``) or :class:`redis.asyncio.cluster.RedisCluster` (``redispy``).
244
+ :param options: all remaining keyword arguments are passed
245
+ directly to the constructor of :class:`coredis.RedisCluster` or
246
+ :class:`redis.asyncio.RedisCluster`
247
+ :raise ConfigurationError: when the redis library is not
248
+ available or if the redis host cannot be pinged.
249
+ """
250
+ super().__init__(
251
+ uri,
252
+ wrap_exceptions=wrap_exceptions,
253
+ implementation=implementation,
254
+ **options,
255
+ )
256
+
257
+ def configure_bridge(self) -> None:
258
+ self.bridge.use_cluster(**self.options)
259
+
260
+ async def reset(self) -> Optional[int]:
261
+ """
262
+ Redis Clusters are sharded and deleting across shards
263
+ can't be done atomically. Because of this, this reset loops over all
264
+ keys that are prefixed with ``self.PREFIX`` and calls delete on them,
265
+ one at a time.
266
+
267
+ .. warning:: This operation was not tested with extremely large data sets.
268
+ On a large production based system, care should be taken with its
269
+ usage as it could be slow on very large data sets
270
+ """
271
+
272
+ return await self.bridge.reset()
273
+
274
+
275
+ @versionadded(version="2.1")
276
+ @versionchanged(
277
+ version="4.2",
278
+ reason="Added support for using the asyncio redis client from :pypi:`redis` ",
279
+ )
280
+ class RedisSentinelStorage(RedisStorage):
281
+ """
282
+ Rate limit storage with redis sentinel as backend
283
+
284
+ Depends on :pypi:`coredis` or :pypi:`redis`
285
+ """
286
+
287
+ STORAGE_SCHEME = ["async+redis+sentinel"]
288
+ """The storage scheme for redis accessed via a redis sentinel installation"""
289
+
290
+ MODE = "SENTINEL"
291
+
292
+ DEPENDENCIES = {
293
+ "redis": Version("5.2.0"),
294
+ "coredis": Version("3.4.0"),
295
+ "coredis.sentinel": Version("3.4.0"),
296
+ }
297
+
298
+ def __init__(
299
+ self,
300
+ uri: str,
301
+ wrap_exceptions: bool = False,
302
+ implementation: Literal["redispy", "coredis"] = "coredis",
303
+ service_name: Optional[str] = None,
304
+ use_replicas: bool = True,
305
+ sentinel_kwargs: Optional[dict[str, Union[float, str, bool]]] = None,
306
+ **options: Union[float, str, bool],
307
+ ):
308
+ """
309
+ :param uri: url of the form
310
+ ``async+redis+sentinel://host:port,host:port/service_name``
311
+ :param wrap_exceptions: Whether to wrap storage exceptions in
312
+ :exc:`limits.errors.StorageError` before raising it.
313
+ :param implementation: Whether to use the client implementation from
314
+ :class:`coredis.sentinel.Sentinel` (``coredis``) or
315
+ :class:`redis.asyncio.sentinel.Sentinel` (``redispy``)
316
+ :param service_name: sentinel service name (if not provided in `uri`)
317
+ :param use_replicas: Whether to use replicas for read only operations
318
+ :param sentinel_kwargs: optional arguments to pass as
319
+ `sentinel_kwargs`` to :class:`coredis.sentinel.Sentinel` or
320
+ :class:`redis.asyncio.Sentinel`
321
+ :param options: all remaining keyword arguments are passed
322
+ directly to the constructor of :class:`coredis.sentinel.Sentinel` or
323
+ :class:`redis.asyncio.sentinel.Sentinel`
324
+ :raise ConfigurationError: when the redis library is not available
325
+ or if the redis primary host cannot be pinged.
326
+ """
327
+
328
+ self.service_name = service_name
329
+ self.use_replicas = use_replicas
330
+ self.sentinel_kwargs = sentinel_kwargs
331
+ super().__init__(
332
+ uri,
333
+ wrap_exceptions=wrap_exceptions,
334
+ implementation=implementation,
335
+ **options,
336
+ )
337
+
338
+ def configure_bridge(self) -> None:
339
+ self.bridge.use_sentinel(
340
+ self.service_name, self.use_replicas, self.sentinel_kwargs, **self.options
341
+ )
@@ -0,0 +1,121 @@
1
+ from __future__ import annotations
2
+
3
+ import urllib
4
+ from abc import ABC, abstractmethod
5
+ from types import ModuleType
6
+
7
+ from limits.typing import Optional, Type, Union
8
+ from limits.util import get_package_data
9
+
10
+
11
+ class RedisBridge(ABC):
12
+ PREFIX = "LIMITS"
13
+ RES_DIR = "resources/redis/lua_scripts"
14
+
15
+ SCRIPT_MOVING_WINDOW = get_package_data(f"{RES_DIR}/moving_window.lua")
16
+ SCRIPT_ACQUIRE_MOVING_WINDOW = get_package_data(
17
+ f"{RES_DIR}/acquire_moving_window.lua"
18
+ )
19
+ SCRIPT_CLEAR_KEYS = get_package_data(f"{RES_DIR}/clear_keys.lua")
20
+ SCRIPT_INCR_EXPIRE = get_package_data(f"{RES_DIR}/incr_expire.lua")
21
+ SCRIPT_SLIDING_WINDOW = get_package_data(f"{RES_DIR}/sliding_window.lua")
22
+ SCRIPT_ACQUIRE_SLIDING_WINDOW = get_package_data(
23
+ f"{RES_DIR}/acquire_sliding_window.lua"
24
+ )
25
+
26
+ def __init__(
27
+ self,
28
+ uri: str,
29
+ dependency: ModuleType,
30
+ ) -> None:
31
+ self.uri = uri
32
+ self.parsed_uri = urllib.parse.urlparse(self.uri)
33
+ self.dependency = dependency
34
+ self.parsed_auth = {}
35
+ if self.parsed_uri.username:
36
+ self.parsed_auth["username"] = self.parsed_uri.username
37
+ if self.parsed_uri.password:
38
+ self.parsed_auth["password"] = self.parsed_uri.password
39
+
40
+ def prefixed_key(self, key: str) -> str:
41
+ return f"{self.PREFIX}:{key}"
42
+
43
+ @abstractmethod
44
+ def register_scripts(self) -> None: ...
45
+
46
+ @abstractmethod
47
+ def use_sentinel(
48
+ self,
49
+ service_name: Optional[str],
50
+ use_replicas: bool,
51
+ sentinel_kwargs: Optional[dict[str, Union[str, float, bool]]],
52
+ **options: Union[str, float, bool],
53
+ ) -> None: ...
54
+
55
+ @abstractmethod
56
+ def use_basic(self, **options: Union[str, float, bool]) -> None: ...
57
+
58
+ @abstractmethod
59
+ def use_cluster(self, **options: Union[str, float, bool]) -> None: ...
60
+
61
+ @property
62
+ @abstractmethod
63
+ def base_exceptions(
64
+ self,
65
+ ) -> Union[Type[Exception], tuple[Type[Exception], ...]]: ...
66
+
67
+ @abstractmethod
68
+ async def incr(
69
+ self,
70
+ key: str,
71
+ expiry: int,
72
+ elastic_expiry: bool = False,
73
+ amount: int = 1,
74
+ ) -> int: ...
75
+
76
+ @abstractmethod
77
+ async def get(self, key: str) -> int: ...
78
+
79
+ @abstractmethod
80
+ async def clear(self, key: str) -> None: ...
81
+
82
+ @abstractmethod
83
+ async def get_moving_window(
84
+ self, key: str, limit: int, expiry: int
85
+ ) -> tuple[float, int]: ...
86
+
87
+ @abstractmethod
88
+ async def get_sliding_window(
89
+ self, previous_key: str, current_key: str, expiry: int
90
+ ) -> tuple[int, float, int, float]: ...
91
+
92
+ @abstractmethod
93
+ async def acquire_entry(
94
+ self,
95
+ key: str,
96
+ limit: int,
97
+ expiry: int,
98
+ amount: int = 1,
99
+ ) -> bool: ...
100
+
101
+ @abstractmethod
102
+ async def acquire_sliding_window_entry(
103
+ self,
104
+ previous_key: str,
105
+ current_key: str,
106
+ limit: int,
107
+ expiry: int,
108
+ amount: int = 1,
109
+ ) -> bool: ...
110
+
111
+ @abstractmethod
112
+ async def get_expiry(self, key: str) -> float: ...
113
+
114
+ @abstractmethod
115
+ async def check(self) -> bool: ...
116
+
117
+ @abstractmethod
118
+ async def reset(self) -> Optional[int]: ...
119
+
120
+ @abstractmethod
121
+ async def lua_reset(self) -> Optional[int]: ...
@@ -0,0 +1,209 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from typing import TYPE_CHECKING, Type, cast
5
+
6
+ from limits.aio.storage.redis.bridge import RedisBridge
7
+ from limits.errors import ConfigurationError
8
+ from limits.typing import AsyncCoRedisClient, Callable, Optional, Union
9
+
10
+ if TYPE_CHECKING:
11
+ import coredis
12
+
13
+
14
+ class CoredisBridge(RedisBridge):
15
+ DEFAULT_CLUSTER_OPTIONS: dict[str, Union[float, str, bool]] = {
16
+ "max_connections": 1000,
17
+ }
18
+ "Default options passed to :class:`coredis.RedisCluster`"
19
+
20
+ @property
21
+ def base_exceptions(self) -> Union[Type[Exception], tuple[Type[Exception], ...]]:
22
+ return (self.dependency.exceptions.RedisError,)
23
+
24
+ def use_sentinel(
25
+ self,
26
+ service_name: Optional[str],
27
+ use_replicas: bool,
28
+ sentinel_kwargs: Optional[dict[str, Union[str, float, bool]]],
29
+ **options: Union[str, float, bool],
30
+ ) -> None:
31
+ sentinel_configuration = []
32
+ connection_options = options.copy()
33
+
34
+ sep = self.parsed_uri.netloc.find("@") + 1
35
+
36
+ for loc in self.parsed_uri.netloc[sep:].split(","):
37
+ host, port = loc.split(":")
38
+ sentinel_configuration.append((host, int(port)))
39
+ service_name = (
40
+ self.parsed_uri.path.replace("/", "")
41
+ if self.parsed_uri.path
42
+ else service_name
43
+ )
44
+
45
+ if service_name is None:
46
+ raise ConfigurationError("'service_name' not provided")
47
+
48
+ self.sentinel = self.dependency.sentinel.Sentinel(
49
+ sentinel_configuration,
50
+ sentinel_kwargs={**self.parsed_auth, **(sentinel_kwargs or {})},
51
+ **{**self.parsed_auth, **connection_options},
52
+ )
53
+ self.storage = self.sentinel.primary_for(service_name)
54
+ self.storage_replica = self.sentinel.replica_for(service_name)
55
+ self.connection_getter = lambda readonly: (
56
+ self.storage_replica if readonly and use_replicas else self.storage
57
+ )
58
+
59
+ def use_basic(self, **options: Union[str, float, bool]) -> None:
60
+ if connection_pool := options.pop("connection_pool", None):
61
+ self.storage = self.dependency.Redis(
62
+ connection_pool=connection_pool, **options
63
+ )
64
+ else:
65
+ self.storage = self.dependency.Redis.from_url(self.uri, **options)
66
+
67
+ self.connection_getter = lambda _: self.storage
68
+
69
+ def use_cluster(self, **options: Union[str, float, bool]) -> None:
70
+ sep = self.parsed_uri.netloc.find("@") + 1
71
+ cluster_hosts: list[dict[str, Union[int, str]]] = []
72
+ cluster_hosts.extend(
73
+ {"host": host, "port": int(port)}
74
+ for loc in self.parsed_uri.netloc[sep:].split(",")
75
+ if loc
76
+ for host, port in [loc.split(":")]
77
+ )
78
+ self.storage = self.dependency.RedisCluster(
79
+ startup_nodes=cluster_hosts,
80
+ **{**self.DEFAULT_CLUSTER_OPTIONS, **self.parsed_auth, **options},
81
+ )
82
+ self.connection_getter = lambda _: self.storage
83
+
84
+ lua_moving_window: "coredis.commands.Script[bytes]"
85
+ lua_acquire_moving_window: "coredis.commands.Script[bytes]"
86
+ lua_sliding_window: "coredis.commands.Script[bytes]"
87
+ lua_acquire_sliding_window: "coredis.commands.Script[bytes]"
88
+ lua_clear_keys: "coredis.commands.Script[bytes]"
89
+ lua_incr_expire: "coredis.commands.Script[bytes]"
90
+ connection_getter: Callable[[bool], AsyncCoRedisClient]
91
+
92
+ def get_connection(self, readonly: bool = False) -> AsyncCoRedisClient:
93
+ return self.connection_getter(readonly)
94
+
95
+ def register_scripts(self) -> None:
96
+ self.lua_moving_window = self.get_connection().register_script(
97
+ self.SCRIPT_MOVING_WINDOW
98
+ )
99
+ self.lua_acquire_moving_window = self.get_connection().register_script(
100
+ self.SCRIPT_ACQUIRE_MOVING_WINDOW
101
+ )
102
+ self.lua_clear_keys = self.get_connection().register_script(
103
+ self.SCRIPT_CLEAR_KEYS
104
+ )
105
+ self.lua_incr_expire = self.get_connection().register_script(
106
+ self.SCRIPT_INCR_EXPIRE
107
+ )
108
+ self.lua_sliding_window = self.get_connection().register_script(
109
+ self.SCRIPT_SLIDING_WINDOW
110
+ )
111
+ self.lua_acquire_sliding_window = self.get_connection().register_script(
112
+ self.SCRIPT_ACQUIRE_SLIDING_WINDOW
113
+ )
114
+
115
+ async def incr(
116
+ self, key: str, expiry: int, elastic_expiry: bool = False, amount: int = 1
117
+ ) -> int:
118
+ key = self.prefixed_key(key)
119
+ value = await self.get_connection().incrby(key, amount)
120
+ if elastic_expiry or value == amount:
121
+ await self.get_connection().expire(key, expiry)
122
+
123
+ return value
124
+
125
+ async def get(self, key: str) -> int:
126
+ key = self.prefixed_key(key)
127
+ return int(await self.get_connection(readonly=True).get(key) or 0)
128
+
129
+ async def clear(self, key: str) -> None:
130
+ key = self.prefixed_key(key)
131
+ await self.get_connection().delete([key])
132
+
133
+ async def lua_reset(self) -> Optional[int]:
134
+ return cast(int, await self.lua_clear_keys.execute([self.prefixed_key("*")]))
135
+
136
+ async def get_moving_window(
137
+ self, key: str, limit: int, expiry: int
138
+ ) -> tuple[float, int]:
139
+ key = self.prefixed_key(key)
140
+ timestamp = time.time()
141
+ window = await self.lua_moving_window.execute(
142
+ [key], [timestamp - expiry, limit]
143
+ )
144
+ if window:
145
+ return float(window[0]), window[1] # type: ignore
146
+ return timestamp, 0
147
+
148
+ async def get_sliding_window(
149
+ self, previous_key: str, current_key: str, expiry: int
150
+ ) -> tuple[int, float, int, float]:
151
+ previous_key = self.prefixed_key(previous_key)
152
+ current_key = self.prefixed_key(current_key)
153
+
154
+ if window := await self.lua_sliding_window.execute(
155
+ [previous_key, current_key], [expiry]
156
+ ):
157
+ return (
158
+ int(window[0] or 0), # type: ignore
159
+ max(0, float(window[1] or 0)) / 1000, # type: ignore
160
+ int(window[2] or 0), # type: ignore
161
+ max(0, float(window[3] or 0)) / 1000, # type: ignore
162
+ )
163
+ return 0, 0.0, 0, 0.0
164
+
165
+ async def acquire_entry(
166
+ self, key: str, limit: int, expiry: int, amount: int = 1
167
+ ) -> bool:
168
+ key = self.prefixed_key(key)
169
+ timestamp = time.time()
170
+ acquired = await self.lua_acquire_moving_window.execute(
171
+ [key], [timestamp, limit, expiry, amount]
172
+ )
173
+
174
+ return bool(acquired)
175
+
176
+ async def acquire_sliding_window_entry(
177
+ self,
178
+ previous_key: str,
179
+ current_key: str,
180
+ limit: int,
181
+ expiry: int,
182
+ amount: int = 1,
183
+ ) -> bool:
184
+ previous_key = self.prefixed_key(previous_key)
185
+ current_key = self.prefixed_key(current_key)
186
+ acquired = await self.lua_acquire_sliding_window.execute(
187
+ [previous_key, current_key], [limit, expiry, amount]
188
+ )
189
+ return bool(acquired)
190
+
191
+ async def get_expiry(self, key: str) -> float:
192
+ key = self.prefixed_key(key)
193
+ return max(await self.get_connection().ttl(key), 0) + time.time()
194
+
195
+ async def check(self) -> bool:
196
+ try:
197
+ await self.get_connection().ping()
198
+
199
+ return True
200
+ except: # noqa
201
+ return False
202
+
203
+ async def reset(self) -> Optional[int]:
204
+ prefix = self.prefixed_key("*")
205
+ keys = await self.storage.keys(prefix)
206
+ count = 0
207
+ for key in keys:
208
+ count += await self.storage.delete([key])
209
+ return count