limits 5.0.0rc1__py3-none-any.whl → 5.1.0__py3-none-any.whl

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
limits/_version.py CHANGED
@@ -8,11 +8,11 @@ import json
8
8
 
9
9
  version_json = '''
10
10
  {
11
- "date": "2025-04-09T18:20:47-0700",
11
+ "date": "2025-04-23T10:35:38-0700",
12
12
  "dirty": false,
13
13
  "error": null,
14
- "full-revisionid": "4a01f1090a5accfb05b7db6dc6469f7c51d4fa67",
15
- "version": "5.0.0rc1"
14
+ "full-revisionid": "2b76ea0a8aa2a37b2069ab5990e9ae180de6491f",
15
+ "version": "5.1.0"
16
16
  }
17
17
  ''' # END VERSION_JSON
18
18
 
@@ -1,24 +1,23 @@
1
1
  from __future__ import annotations
2
2
 
3
3
  import time
4
- import urllib.parse
5
- from collections.abc import Iterable
6
- from math import ceil, floor
7
- from typing import TYPE_CHECKING
4
+ from math import floor
8
5
 
9
6
  from deprecated.sphinx import versionadded, versionchanged
7
+ from packaging.version import Version
10
8
 
11
- from limits.aio.storage.base import SlidingWindowCounterSupport, Storage
9
+ from limits.aio.storage import SlidingWindowCounterSupport, Storage
10
+ from limits.aio.storage.memcached.bridge import MemcachedBridge
11
+ from limits.aio.storage.memcached.emcache import EmcacheBridge
12
+ from limits.aio.storage.memcached.memcachio import MemcachioBridge
12
13
  from limits.storage.base import TimestampedSlidingWindow
13
-
14
- if TYPE_CHECKING:
15
- import memcachio
14
+ from limits.typing import Literal
16
15
 
17
16
 
18
17
  @versionadded(version="2.1")
19
18
  @versionchanged(
20
19
  version="5.0",
21
- reason="Switched to :pypi:`memcachio` for async memcached support",
20
+ reason="Switched default implementation to :pypi:`memcachio`",
22
21
  )
23
22
  class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingWindow):
24
23
  """
@@ -30,12 +29,19 @@ class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingW
30
29
  STORAGE_SCHEME = ["async+memcached"]
31
30
  """The storage scheme for memcached to be used in an async context"""
32
31
 
33
- DEPENDENCIES = ["memcachio"]
32
+ DEPENDENCIES = {
33
+ "memcachio": Version("0.3"),
34
+ "emcache": Version("0.0"),
35
+ }
36
+
37
+ bridge: MemcachedBridge
38
+ storage_exceptions: tuple[Exception, ...]
34
39
 
35
40
  def __init__(
36
41
  self,
37
42
  uri: str,
38
43
  wrap_exceptions: bool = False,
44
+ implementation: Literal["memcachio", "emcache"] = "memcachio",
39
45
  **options: float | str | bool,
40
46
  ) -> None:
41
47
  """
@@ -43,77 +49,41 @@ class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingW
43
49
  ``async+memcached://host:port,host:port``
44
50
  :param wrap_exceptions: Whether to wrap storage exceptions in
45
51
  :exc:`limits.errors.StorageError` before raising it.
52
+ :param implementation: Whether to use the client implementation from
53
+
54
+ - ``memcachio``: :class:`memcachio.Client`
55
+ - ``emcache``: :class:`emcache.Client`
46
56
  :param options: all remaining keyword arguments are passed
47
57
  directly to the constructor of :class:`memcachio.Client`
48
58
  :raise ConfigurationError: when :pypi:`memcachio` is not available
49
59
  """
50
- parsed = urllib.parse.urlparse(uri)
51
- self.hosts = []
52
-
53
- for host, port in (
54
- loc.split(":") for loc in parsed.netloc.strip().split(",") if loc.strip()
55
- ):
56
- self.hosts.append((host, int(port)))
57
-
58
- self._options = options
59
- self._storage = None
60
+ if implementation == "emcache":
61
+ self.bridge = EmcacheBridge(
62
+ uri, self.dependencies["emcache"].module, **options
63
+ )
64
+ else:
65
+ self.bridge = MemcachioBridge(
66
+ uri, self.dependencies["memcachio"].module, **options
67
+ )
60
68
  super().__init__(uri, wrap_exceptions=wrap_exceptions, **options)
61
- self.dependency = self.dependencies["memcachio"].module
62
69
 
63
70
  @property
64
71
  def base_exceptions(
65
72
  self,
66
73
  ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover
67
- return (
68
- self.dependency.errors.NoNodeAvailable,
69
- self.dependency.errors.MemcachioConnectionError,
70
- )
71
-
72
- async def get_storage(self) -> memcachio.Client[bytes]:
73
- if not self._storage:
74
- self._storage = self.dependency.Client(
75
- [(h, p) for h, p in self.hosts],
76
- **self._options,
77
- )
78
- assert self._storage
79
- return self._storage
74
+ return self.bridge.base_exceptions
80
75
 
81
76
  async def get(self, key: str) -> int:
82
77
  """
83
78
  :param key: the key to get the counter value for
84
79
  """
85
- item = (await self.get_many([key])).get(key.encode("utf-8"), None)
86
- return item and int(item.value) or 0
87
-
88
- async def get_many(
89
- self, keys: Iterable[str]
90
- ) -> dict[bytes, memcachio.MemcachedItem[bytes]]:
91
- """
92
- Return multiple counters at once
93
-
94
- :param keys: the keys to get the counter values for
95
- """
96
- return await (await self.get_storage()).get(*[k.encode("utf-8") for k in keys])
80
+ return await self.bridge.get(key)
97
81
 
98
82
  async def clear(self, key: str) -> None:
99
83
  """
100
84
  :param key: the key to clear rate limits for
101
85
  """
102
- await (await self.get_storage()).delete(key.encode("utf-8"))
103
-
104
- async def decr(self, key: str, amount: int = 1, noreply: bool = False) -> int:
105
- """
106
- decrements the counter for a given rate limit key
107
-
108
- retursn 0 if the key doesn't exist or if noreply is set to True
109
-
110
- :param key: the key to decrement
111
- :param amount: the number to decrement by
112
- :param noreply: set to True to ignore the memcached response
113
- """
114
- storage = await self.get_storage()
115
- limit_key = key.encode("utf-8")
116
- return await storage.decr(limit_key, amount, noreply=noreply) or 0
86
+ await self.bridge.clear(key)
117
87
 
118
88
  async def incr(
119
89
  self,
@@ -131,60 +101,22 @@ class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingW
131
101
  :param amount: the number to increment by
132
102
  :param set_expiration_key: if set to False, the expiration time won't be stored but the key will still expire
133
103
  """
134
- storage = await self.get_storage()
135
- limit_key = key.encode("utf-8")
136
- expire_key = self._expiration_key(key).encode()
137
- if (value := (await storage.incr(limit_key, amount))) is None:
138
- storage = await self.get_storage()
139
- if await storage.add(limit_key, f"{amount}".encode(), expiry=ceil(expiry)):
140
- if set_expiration_key:
141
- await storage.set(
142
- expire_key,
143
- str(expiry + time.time()).encode("utf-8"),
144
- expiry=ceil(expiry),
145
- noreply=False,
146
- )
147
- return amount
148
- else:
149
- storage = await self.get_storage()
150
- return await storage.incr(limit_key, amount) or amount
151
- return value
104
+ return await self.bridge.incr(
105
+ key, expiry, amount, set_expiration_key=set_expiration_key
106
+ )
152
107
 
153
108
  async def get_expiry(self, key: str) -> float:
154
109
  """
155
110
  :param key: the key to get the expiry for
156
111
  """
157
- storage = await self.get_storage()
158
- expiration_key = self._expiration_key(key).encode("utf-8")
159
- item = (await storage.get(expiration_key)).get(expiration_key, None)
160
-
161
- return item and float(item.value) or time.time()
162
-
163
- def _expiration_key(self, key: str) -> str:
164
- """
165
- Return the expiration key for the given counter key.
166
-
167
- Memcached doesn't natively return the expiration time or TTL for a given key,
168
- so we implement the expiration time on a separate key.
169
- """
170
- return key + "/expires"
171
-
172
- async def check(self) -> bool:
173
- """
174
- Check if storage is healthy by calling the ``get`` command
175
- on the key ``limiter-check``
176
- """
177
- try:
178
- storage = await self.get_storage()
179
- await storage.get(b"limiter-check")
180
-
181
- return True
182
- except: # noqa
183
- return False
112
+ return await self.bridge.get_expiry(key)
184
113
 
185
114
  async def reset(self) -> int | None:
186
115
  raise NotImplementedError
187
116
 
117
+ async def check(self) -> bool:
118
+ return await self.bridge.check()
119
+
188
120
  async def acquire_sliding_window_entry(
189
121
  self,
190
122
  key: str,
@@ -219,10 +151,10 @@ class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingW
219
151
  previous_count * actualised_previous_ttl / expiry + current_count
220
152
  )
221
153
  if floor(weighted_count) > limit:
222
- # Another hit won the race condition: revert the incrementation and refuse this hit
154
+ # Another hit won the race condition: revert the increment and refuse this hit
223
155
  # Limitation: during high concurrency at the end of the window,
224
156
  # the counter is shifted and cannot be decremented, so less requests than expected are allowed.
225
- await self.decr(current_key, amount, noreply=True)
157
+ await self.bridge.decr(current_key, amount, noreply=True)
226
158
  return False
227
159
  return True
228
160
 
@@ -238,13 +170,11 @@ class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingW
238
170
  async def _get_sliding_window_info(
239
171
  self, previous_key: str, current_key: str, expiry: int, now: float
240
172
  ) -> tuple[int, float, int, float]:
241
- result = await self.get_many([previous_key, current_key])
173
+ result = await self.bridge.get_many([previous_key, current_key])
242
174
 
243
- raw_previous_count = result.get(previous_key.encode("utf-8"))
244
- raw_current_count = result.get(current_key.encode("utf-8"))
175
+ previous_count = result.get(previous_key.encode("utf-8"), 0)
176
+ current_count = result.get(current_key.encode("utf-8"), 0)
245
177
 
246
- current_count = raw_current_count and int(raw_current_count.value) or 0
247
- previous_count = raw_previous_count and int(raw_previous_count.value) or 0
248
178
  if previous_count == 0:
249
179
  previous_ttl = float(0)
250
180
  else:
@@ -0,0 +1,73 @@
1
+ from __future__ import annotations
2
+
3
+ import urllib
4
+ from abc import ABC, abstractmethod
5
+ from types import ModuleType
6
+
7
+ from limits.typing import Iterable
8
+
9
+
10
+ class MemcachedBridge(ABC):
11
+ def __init__(
12
+ self,
13
+ uri: str,
14
+ dependency: ModuleType,
15
+ **options: float | str | bool,
16
+ ) -> None:
17
+ self.uri = uri
18
+ self.parsed_uri = urllib.parse.urlparse(self.uri)
19
+ self.dependency = dependency
20
+ self.hosts = []
21
+ self.options = options
22
+
23
+ sep = self.parsed_uri.netloc.strip().find("@") + 1
24
+ for loc in self.parsed_uri.netloc.strip()[sep:].split(","):
25
+ host, port = loc.split(":")
26
+ self.hosts.append((host, int(port)))
27
+
28
+ if self.parsed_uri.username:
29
+ self.options["username"] = self.parsed_uri.username
30
+ if self.parsed_uri.password:
31
+ self.options["password"] = self.parsed_uri.password
32
+
33
+ def _expiration_key(self, key: str) -> str:
34
+ """
35
+ Return the expiration key for the given counter key.
36
+
37
+ Memcached doesn't natively return the expiration time or TTL for a given key,
38
+ so we implement the expiration time on a separate key.
39
+ """
40
+ return key + "/expires"
41
+
42
+ @property
43
+ @abstractmethod
44
+ def base_exceptions(
45
+ self,
46
+ ) -> type[Exception] | tuple[type[Exception], ...]: ...
47
+
48
+ @abstractmethod
49
+ async def get(self, key: str) -> int: ...
50
+
51
+ @abstractmethod
52
+ async def get_many(self, keys: Iterable[str]) -> dict[bytes, int]: ...
53
+
54
+ @abstractmethod
55
+ async def clear(self, key: str) -> None: ...
56
+
57
+ @abstractmethod
58
+ async def decr(self, key: str, amount: int = 1, noreply: bool = False) -> int: ...
59
+
60
+ @abstractmethod
61
+ async def incr(
62
+ self,
63
+ key: str,
64
+ expiry: float,
65
+ amount: int = 1,
66
+ set_expiration_key: bool = True,
67
+ ) -> int: ...
68
+
69
+ @abstractmethod
70
+ async def get_expiry(self, key: str) -> float: ...
71
+
72
+ @abstractmethod
73
+ async def check(self) -> bool: ...
@@ -0,0 +1,112 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from math import ceil
5
+ from types import ModuleType
6
+
7
+ from limits.typing import TYPE_CHECKING, Iterable
8
+
9
+ from .bridge import MemcachedBridge
10
+
11
+ if TYPE_CHECKING:
12
+ import emcache
13
+
14
+
15
+ class EmcacheBridge(MemcachedBridge):
16
+ def __init__(
17
+ self,
18
+ uri: str,
19
+ dependency: ModuleType,
20
+ **options: float | str | bool,
21
+ ) -> None:
22
+ super().__init__(uri, dependency, **options)
23
+ self._storage = None
24
+
25
+ async def get_storage(self) -> emcache.Client:
26
+ if not self._storage:
27
+ self._storage = await self.dependency.create_client(
28
+ [self.dependency.MemcachedHostAddress(h, p) for h, p in self.hosts],
29
+ **self.options,
30
+ )
31
+ assert self._storage
32
+ return self._storage
33
+
34
+ async def get(self, key: str) -> int:
35
+ item = await (await self.get_storage()).get(key.encode("utf-8"))
36
+ return item and int(item.value) or 0
37
+
38
+ async def get_many(self, keys: Iterable[str]) -> dict[bytes, int]:
39
+ results = await (await self.get_storage()).get_many(
40
+ [k.encode("utf-8") for k in keys]
41
+ )
42
+ return {k: int(item.value) if item else 0 for k, item in results.items()}
43
+
44
+ async def clear(self, key: str) -> None:
45
+ try:
46
+ await (await self.get_storage()).delete(key.encode("utf-8"))
47
+ except self.dependency.NotFoundCommandError:
48
+ pass
49
+
50
+ async def decr(self, key: str, amount: int = 1, noreply: bool = False) -> int:
51
+ storage = await self.get_storage()
52
+ limit_key = key.encode("utf-8")
53
+ try:
54
+ value = await storage.decrement(limit_key, amount, noreply=noreply) or 0
55
+ except self.dependency.NotFoundCommandError:
56
+ value = 0
57
+ return value
58
+
59
+ async def incr(
60
+ self, key: str, expiry: float, amount: int = 1, set_expiration_key: bool = True
61
+ ) -> int:
62
+ storage = await self.get_storage()
63
+ limit_key = key.encode("utf-8")
64
+ expire_key = self._expiration_key(key).encode()
65
+ try:
66
+ return await storage.increment(limit_key, amount) or amount
67
+ except self.dependency.NotFoundCommandError:
68
+ storage = await self.get_storage()
69
+ try:
70
+ await storage.add(limit_key, f"{amount}".encode(), exptime=ceil(expiry))
71
+ if set_expiration_key:
72
+ await storage.set(
73
+ expire_key,
74
+ str(expiry + time.time()).encode("utf-8"),
75
+ exptime=ceil(expiry),
76
+ noreply=False,
77
+ )
78
+ value = amount
79
+ except self.dependency.NotStoredStorageCommandError:
80
+ # Coult not add the key, probably because a concurrent call has added it
81
+ storage = await self.get_storage()
82
+ value = await storage.increment(limit_key, amount) or amount
83
+ return value
84
+
85
+ async def get_expiry(self, key: str) -> float:
86
+ storage = await self.get_storage()
87
+ item = await storage.get(self._expiration_key(key).encode("utf-8"))
88
+
89
+ return item and float(item.value) or time.time()
90
+ pass
91
+
92
+ @property
93
+ def base_exceptions(
94
+ self,
95
+ ) -> type[Exception] | tuple[type[Exception], ...]: # pragma: no cover
96
+ return (
97
+ self.dependency.ClusterNoAvailableNodes,
98
+ self.dependency.CommandError,
99
+ )
100
+
101
+ async def check(self) -> bool:
102
+ """
103
+ Check if storage is healthy by calling the ``get`` command
104
+ on the key ``limiter-check``
105
+ """
106
+ try:
107
+ storage = await self.get_storage()
108
+ await storage.get(b"limiter-check")
109
+
110
+ return True
111
+ except: # noqa
112
+ return False
@@ -0,0 +1,104 @@
1
+ from __future__ import annotations
2
+
3
+ import time
4
+ from math import ceil
5
+ from types import ModuleType
6
+ from typing import TYPE_CHECKING, Iterable
7
+
8
+ from .bridge import MemcachedBridge
9
+
10
+ if TYPE_CHECKING:
11
+ import memcachio
12
+
13
+
14
+ class MemcachioBridge(MemcachedBridge):
15
+ def __init__(
16
+ self,
17
+ uri: str,
18
+ dependency: ModuleType,
19
+ **options: float | str | bool,
20
+ ) -> None:
21
+ super().__init__(uri, dependency, **options)
22
+ self._storage: memcachio.Client[bytes] | None = None
23
+
24
+ @property
25
+ def base_exceptions(
26
+ self,
27
+ ) -> type[Exception] | tuple[type[Exception], ...]:
28
+ return (
29
+ self.dependency.errors.NoAvailableNodes,
30
+ self.dependency.errors.MemcachioConnectionError,
31
+ )
32
+
33
+ async def get_storage(self) -> memcachio.Client[bytes]:
34
+ if not self._storage:
35
+ self._storage = self.dependency.Client(
36
+ [(h, p) for h, p in self.hosts],
37
+ **self.options,
38
+ )
39
+ assert self._storage
40
+ return self._storage
41
+
42
+ async def get(self, key: str) -> int:
43
+ return (await self.get_many([key])).get(key.encode("utf-8"), 0)
44
+
45
+ async def get_many(self, keys: Iterable[str]) -> dict[bytes, int]:
46
+ """
47
+ Return multiple counters at once
48
+
49
+ :param keys: the keys to get the counter values for
50
+ """
51
+ results = await (await self.get_storage()).get(
52
+ *[k.encode("utf-8") for k in keys]
53
+ )
54
+ return {k: int(v.value) for k, v in results.items()}
55
+
56
+ async def clear(self, key: str) -> None:
57
+ await (await self.get_storage()).delete(key.encode("utf-8"))
58
+
59
+ async def decr(self, key: str, amount: int = 1, noreply: bool = False) -> int:
60
+ storage = await self.get_storage()
61
+ limit_key = key.encode("utf-8")
62
+ return await storage.decr(limit_key, amount, noreply=noreply) or 0
63
+
64
+ async def incr(
65
+ self, key: str, expiry: float, amount: int = 1, set_expiration_key: bool = True
66
+ ) -> int:
67
+ storage = await self.get_storage()
68
+ limit_key = key.encode("utf-8")
69
+ expire_key = self._expiration_key(key).encode()
70
+ if (value := (await storage.incr(limit_key, amount))) is None:
71
+ storage = await self.get_storage()
72
+ if await storage.add(limit_key, f"{amount}".encode(), expiry=ceil(expiry)):
73
+ if set_expiration_key:
74
+ await storage.set(
75
+ expire_key,
76
+ str(expiry + time.time()).encode("utf-8"),
77
+ expiry=ceil(expiry),
78
+ noreply=False,
79
+ )
80
+ return amount
81
+ else:
82
+ storage = await self.get_storage()
83
+ return await storage.incr(limit_key, amount) or amount
84
+ return value
85
+
86
+ async def get_expiry(self, key: str) -> float:
87
+ storage = await self.get_storage()
88
+ expiration_key = self._expiration_key(key).encode("utf-8")
89
+ item = (await storage.get(expiration_key)).get(expiration_key, None)
90
+
91
+ return item and float(item.value) or time.time()
92
+
93
+ async def check(self) -> bool:
94
+ """
95
+ Check if storage is healthy by calling the ``get`` command
96
+ on the key ``limiter-check``
97
+ """
98
+ try:
99
+ storage = await self.get_storage()
100
+ await storage.get(b"limiter-check")
101
+
102
+ return True
103
+ except: # noqa
104
+ return False
@@ -62,25 +62,29 @@ class MemoryStorage(
62
62
  asyncio.ensure_future(self.__schedule_expiry())
63
63
 
64
64
  async def __expire_events(self) -> None:
65
- now = time.time()
66
- for key in list(self.events.keys()):
67
- cutoff = await asyncio.to_thread(
68
- lambda evts: bisect.bisect_left(
69
- evts, -now, key=lambda event: -event.expiry
70
- ),
71
- self.events[key],
72
- )
73
- async with self.locks[key]:
74
- self.events[key] = self.events[key][:cutoff]
75
- if not self.events.get(key, None):
76
- self.events.pop(key, None)
65
+ try:
66
+ now = time.time()
67
+ for key in list(self.events.keys()):
68
+ cutoff = await asyncio.to_thread(
69
+ lambda evts: bisect.bisect_left(
70
+ evts, -now, key=lambda event: -event.expiry
71
+ ),
72
+ self.events[key],
73
+ )
74
+ async with self.locks[key]:
75
+ if self.events.get(key, []):
76
+ self.events[key] = self.events[key][:cutoff]
77
+ if not self.events.get(key, None):
78
+ self.events.pop(key, None)
79
+ self.locks.pop(key, None)
80
+
81
+ for key in list(self.expirations.keys()):
82
+ if self.expirations[key] <= time.time():
83
+ self.storage.pop(key, None)
84
+ self.expirations.pop(key, None)
77
85
  self.locks.pop(key, None)
78
-
79
- for key in list(self.expirations.keys()):
80
- if self.expirations[key] <= time.time():
81
- self.storage.pop(key, None)
82
- self.expirations.pop(key, None)
83
- self.locks.pop(key, None)
86
+ except asyncio.CancelledError:
87
+ return
84
88
 
85
89
  async def __schedule_expiry(self) -> None:
86
90
  if not self.timer or self.timer.done():
@@ -268,3 +272,10 @@ class MemoryStorage(
268
272
  self.locks.clear()
269
273
 
270
274
  return num_items
275
+
276
+ def __del__(self) -> None:
277
+ try:
278
+ if self.timer and not self.timer.done():
279
+ self.timer.cancel()
280
+ except RuntimeError: # noqa
281
+ pass
@@ -51,6 +51,8 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
51
51
  "valkey": Version("6.0"),
52
52
  }
53
53
  MODE: Literal["BASIC", "CLUSTER", "SENTINEL"] = "BASIC"
54
+ PREFIX = "LIMITS"
55
+
54
56
  bridge: RedisBridge
55
57
  storage_exceptions: tuple[Exception, ...]
56
58
  target_server: Literal["redis", "valkey"]
@@ -60,6 +62,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
60
62
  uri: str,
61
63
  wrap_exceptions: bool = False,
62
64
  implementation: Literal["redispy", "coredis", "valkey"] = "coredis",
65
+ key_prefix: str = PREFIX,
63
66
  **options: float | str | bool,
64
67
  ) -> None:
65
68
  """
@@ -86,6 +89,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
86
89
  - ``redispy``: :class:`redis.asyncio.client.Redis`
87
90
  - ``valkey``: :class:`valkey.asyncio.client.Valkey`
88
91
 
92
+ :param key_prefix: the prefix for each key created in redis
89
93
  :param options: all remaining keyword arguments are passed
90
94
  directly to the constructor of :class:`coredis.Redis` or :class:`redis.asyncio.client.Redis`
91
95
  :raise ConfigurationError: when the redis library is not available
@@ -97,12 +101,18 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
97
101
  super().__init__(uri, wrap_exceptions=wrap_exceptions)
98
102
  self.options = options
99
103
  if self.target_server == "valkey" or implementation == "valkey":
100
- self.bridge = ValkeyBridge(uri, self.dependencies["valkey"].module)
104
+ self.bridge = ValkeyBridge(
105
+ uri, self.dependencies["valkey"].module, key_prefix
106
+ )
101
107
  else:
102
108
  if implementation == "redispy":
103
- self.bridge = RedispyBridge(uri, self.dependencies["redis"].module)
109
+ self.bridge = RedispyBridge(
110
+ uri, self.dependencies["redis"].module, key_prefix
111
+ )
104
112
  else:
105
- self.bridge = CoredisBridge(uri, self.dependencies["coredis"].module)
113
+ self.bridge = CoredisBridge(
114
+ uri, self.dependencies["coredis"].module, key_prefix
115
+ )
106
116
  self.configure_bridge()
107
117
  self.bridge.register_scripts()
108
118
 
@@ -226,7 +236,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
226
236
  async def reset(self) -> int | None:
227
237
  """
228
238
  This function calls a Lua Script to delete keys prefixed with
229
- ``self.PREFIX`` in blocks of 5000.
239
+ :paramref:`RedisStorage.key_prefix` in blocks of 5000.
230
240
 
231
241
  .. warning:: This operation was designed to be fast, but was not tested
232
242
  on a large production based system. Be careful with its usage as it
@@ -268,6 +278,7 @@ class RedisClusterStorage(RedisStorage):
268
278
  uri: str,
269
279
  wrap_exceptions: bool = False,
270
280
  implementation: Literal["redispy", "coredis", "valkey"] = "coredis",
281
+ key_prefix: str = RedisStorage.PREFIX,
271
282
  **options: float | str | bool,
272
283
  ) -> None:
273
284
  """
@@ -283,6 +294,7 @@ class RedisClusterStorage(RedisStorage):
283
294
  - ``coredis``: :class:`coredis.RedisCluster`
284
295
  - ``redispy``: :class:`redis.asyncio.cluster.RedisCluster`
285
296
  - ``valkey``: :class:`valkey.asyncio.cluster.ValkeyCluster`
297
+ :param key_prefix: the prefix for each key created in redis
286
298
  :param options: all remaining keyword arguments are passed
287
299
  directly to the constructor of :class:`coredis.RedisCluster` or
288
300
  :class:`redis.asyncio.RedisCluster`
@@ -293,6 +305,7 @@ class RedisClusterStorage(RedisStorage):
293
305
  uri,
294
306
  wrap_exceptions=wrap_exceptions,
295
307
  implementation=implementation,
308
+ key_prefix=key_prefix,
296
309
  **options,
297
310
  )
298
311
 
@@ -303,8 +316,8 @@ class RedisClusterStorage(RedisStorage):
303
316
  """
304
317
  Redis Clusters are sharded and deleting across shards
305
318
  can't be done atomically. Because of this, this reset loops over all
306
- keys that are prefixed with ``self.PREFIX`` and calls delete on them,
307
- one at a time.
319
+ keys that are prefixed with :paramref:`RedisClusterStorage.key_prefix`
320
+ and calls delete on them one at a time.
308
321
 
309
322
  .. warning:: This operation was not tested with extremely large data sets.
310
323
  On a large production based system, care should be taken with its
@@ -354,6 +367,7 @@ class RedisSentinelStorage(RedisStorage):
354
367
  uri: str,
355
368
  wrap_exceptions: bool = False,
356
369
  implementation: Literal["redispy", "coredis", "valkey"] = "coredis",
370
+ key_prefix: str = RedisStorage.PREFIX,
357
371
  service_name: str | None = None,
358
372
  use_replicas: bool = True,
359
373
  sentinel_kwargs: dict[str, float | str | bool] | None = None,
@@ -372,6 +386,7 @@ class RedisSentinelStorage(RedisStorage):
372
386
  - ``coredis``: :class:`coredis.sentinel.Sentinel`
373
387
  - ``redispy``: :class:`redis.asyncio.sentinel.Sentinel`
374
388
  - ``valkey``: :class:`valkey.asyncio.sentinel.Sentinel`
389
+ :param key_prefix: the prefix for each key created in redis
375
390
  :param service_name: sentinel service name (if not provided in `uri`)
376
391
  :param use_replicas: Whether to use replicas for read only operations
377
392
  :param sentinel_kwargs: optional arguments to pass as
@@ -391,6 +406,7 @@ class RedisSentinelStorage(RedisStorage):
391
406
  uri,
392
407
  wrap_exceptions=wrap_exceptions,
393
408
  implementation=implementation,
409
+ key_prefix=key_prefix,
394
410
  **options,
395
411
  )
396
412
 
@@ -8,7 +8,6 @@ from limits.util import get_package_data
8
8
 
9
9
 
10
10
  class RedisBridge(ABC):
11
- PREFIX = "LIMITS"
12
11
  RES_DIR = "resources/redis/lua_scripts"
13
12
 
14
13
  SCRIPT_MOVING_WINDOW = get_package_data(f"{RES_DIR}/moving_window.lua")
@@ -26,18 +25,20 @@ class RedisBridge(ABC):
26
25
  self,
27
26
  uri: str,
28
27
  dependency: ModuleType,
28
+ key_prefix: str,
29
29
  ) -> None:
30
30
  self.uri = uri
31
31
  self.parsed_uri = urllib.parse.urlparse(self.uri)
32
32
  self.dependency = dependency
33
33
  self.parsed_auth = {}
34
+ self.key_prefix = key_prefix
34
35
  if self.parsed_uri.username:
35
36
  self.parsed_auth["username"] = self.parsed_uri.username
36
37
  if self.parsed_uri.password:
37
38
  self.parsed_auth["password"] = self.parsed_uri.password
38
39
 
39
40
  def prefixed_key(self, key: str) -> str:
40
- return f"{self.PREFIX}:{key}"
41
+ return f"{self.key_prefix}:{key}"
41
42
 
42
43
  @abstractmethod
43
44
  def register_scripts(self) -> None: ...
@@ -153,6 +153,8 @@ class MemcachedStorage(Storage, SlidingWindowCounterSupport, TimestampedSlidingW
153
153
  Return multiple counters at once
154
154
 
155
155
  :param keys: the keys to get the counter values for
156
+
157
+ :meta private:
156
158
  """
157
159
  return self.storage.get_many(keys)
158
160
 
limits/storage/memory.py CHANGED
@@ -57,11 +57,11 @@ class MemoryStorage(
57
57
  def __expire_events(self) -> None:
58
58
  for key in list(self.events.keys()):
59
59
  with self.locks[key]:
60
- events = self.events.get(key, [])
61
- oldest = bisect.bisect_left(
62
- events, -time.time(), key=lambda event: -event.expiry
63
- )
64
- self.events[key] = self.events[key][:oldest]
60
+ if events := self.events.get(key, []):
61
+ oldest = bisect.bisect_left(
62
+ events, -time.time(), key=lambda event: -event.expiry
63
+ )
64
+ self.events[key] = self.events[key][:oldest]
65
65
  if not self.events.get(key, None):
66
66
  self.locks.pop(key, None)
67
67
  for key in list(self.expirations.keys()):
limits/storage/redis.py CHANGED
@@ -68,6 +68,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
68
68
  self,
69
69
  uri: str,
70
70
  connection_pool: redis.connection.ConnectionPool | None = None,
71
+ key_prefix: str = PREFIX,
71
72
  wrap_exceptions: bool = False,
72
73
  **options: float | str | bool,
73
74
  ) -> None:
@@ -82,6 +83,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
82
83
  :pypi:`valkey`.
83
84
  :param connection_pool: if provided, the redis client is initialized with
84
85
  the connection pool and any other params passed as :paramref:`options`
86
+ :param key_prefix: the prefix for each key created in redis
85
87
  :param wrap_exceptions: Whether to wrap storage exceptions in
86
88
  :exc:`limits.errors.StorageError` before raising it.
87
89
  :param options: all remaining keyword arguments are passed
@@ -89,6 +91,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
89
91
  :raise ConfigurationError: when the :pypi:`redis` library is not available
90
92
  """
91
93
  super().__init__(uri, wrap_exceptions=wrap_exceptions, **options)
94
+ self.key_prefix = key_prefix
92
95
  self.target_server = "valkey" if uri.startswith("valkey") else "redis"
93
96
  self.dependency = self.dependencies[self.target_server].module
94
97
 
@@ -165,7 +168,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
165
168
  return f"{self._current_window_key(key)}/-1"
166
169
 
167
170
  def prefixed_key(self, key: str) -> str:
168
- return f"{self.PREFIX}:{key}"
171
+ return f"{self.key_prefix}:{key}"
169
172
 
170
173
  def get_moving_window(self, key: str, limit: int, expiry: int) -> tuple[float, int]:
171
174
  """
@@ -295,7 +298,7 @@ class RedisStorage(Storage, MovingWindowSupport, SlidingWindowCounterSupport):
295
298
  def reset(self) -> int | None:
296
299
  """
297
300
  This function calls a Lua Script to delete keys prefixed with
298
- ``self.PREFIX`` in blocks of 5000.
301
+ :paramref:`RedisStorage.key_prefix` in blocks of 5000.
299
302
 
300
303
  .. warning::
301
304
  This operation was designed to be fast, but was not tested
@@ -56,6 +56,7 @@ class RedisClusterStorage(RedisStorage):
56
56
  def __init__(
57
57
  self,
58
58
  uri: str,
59
+ key_prefix: str = RedisStorage.PREFIX,
59
60
  wrap_exceptions: bool = False,
60
61
  **options: float | str | bool,
61
62
  ) -> None:
@@ -65,6 +66,7 @@ class RedisClusterStorage(RedisStorage):
65
66
 
66
67
  If the uri scheme is ``valkey+cluster`` the implementation used will be from
67
68
  :pypi:`valkey`.
69
+ :param key_prefix: the prefix for each key created in redis
68
70
  :param wrap_exceptions: Whether to wrap storage exceptions in
69
71
  :exc:`limits.errors.StorageError` before raising it.
70
72
  :param options: all remaining keyword arguments are passed
@@ -86,6 +88,7 @@ class RedisClusterStorage(RedisStorage):
86
88
  host, port = loc.split(":")
87
89
  cluster_hosts.append((host, int(port)))
88
90
 
91
+ self.key_prefix = key_prefix
89
92
  self.storage = None
90
93
  self.target_server = "valkey" if uri.startswith("valkey") else "redis"
91
94
  merged_options = {**self.DEFAULT_OPTIONS, **parsed_auth, **options}
@@ -108,8 +111,8 @@ class RedisClusterStorage(RedisStorage):
108
111
  """
109
112
  Redis Clusters are sharded and deleting across shards
110
113
  can't be done atomically. Because of this, this reset loops over all
111
- keys that are prefixed with ``self.PREFIX`` and calls delete on them,
112
- one at a time.
114
+ keys that are prefixed with :paramref:`RedisClusterStorage.prefix` and
115
+ calls delete on them one at a time.
113
116
 
114
117
  .. warning::
115
118
  This operation was not tested with extremely large data sets.
@@ -45,6 +45,7 @@ class RedisSentinelStorage(RedisStorage):
45
45
  service_name: str | None = None,
46
46
  use_replicas: bool = True,
47
47
  sentinel_kwargs: dict[str, float | str | bool] | None = None,
48
+ key_prefix: str = RedisStorage.PREFIX,
48
49
  wrap_exceptions: bool = False,
49
50
  **options: float | str | bool,
50
51
  ) -> None:
@@ -59,6 +60,7 @@ class RedisSentinelStorage(RedisStorage):
59
60
  :param use_replicas: Whether to use replicas for read only operations
60
61
  :param sentinel_kwargs: kwargs to pass as
61
62
  :attr:`sentinel_kwargs` to :class:`redis.sentinel.Sentinel`
63
+ :param key_prefix: the prefix for each key created in redis
62
64
  :param wrap_exceptions: Whether to wrap storage exceptions in
63
65
  :exc:`limits.errors.StorageError` before raising it.
64
66
  :param options: all remaining keyword arguments are passed
@@ -87,6 +89,7 @@ class RedisSentinelStorage(RedisStorage):
87
89
  for loc in parsed.netloc[sep:].split(","):
88
90
  host, port = loc.split(":")
89
91
  sentinel_configuration.append((host, int(port)))
92
+ self.key_prefix = key_prefix
90
93
  self.service_name = (
91
94
  parsed.path.replace("/", "") if parsed.path else service_name
92
95
  )
limits/typing.py CHANGED
@@ -107,6 +107,7 @@ __all__ = [
107
107
  "Callable",
108
108
  "ClassVar",
109
109
  "Counter",
110
+ "Iterable",
110
111
  "Literal",
111
112
  "MemcachedClientP",
112
113
  "MongoClient",
@@ -1,6 +1,6 @@
1
1
  Metadata-Version: 2.4
2
2
  Name: limits
3
- Version: 5.0.0rc1
3
+ Version: 5.1.0
4
4
  Summary: Rate limiting utilities
5
5
  Home-page: https://limits.readthedocs.org
6
6
  Author: Ali-Akber Saifee
@@ -22,7 +22,7 @@ Classifier: Programming Language :: Python :: Implementation :: PyPy
22
22
  Requires-Python: >=3.10
23
23
  License-File: LICENSE.txt
24
24
  Requires-Dist: deprecated>=1.2
25
- Requires-Dist: packaging<25,>=21
25
+ Requires-Dist: packaging<26,>=21
26
26
  Requires-Dist: typing_extensions
27
27
  Provides-Extra: redis
28
28
  Requires-Dist: redis!=4.5.2,!=4.5.3,<6.0.0,>3; extra == "redis"
@@ -274,5 +274,6 @@ Links
274
274
  =====
275
275
 
276
276
  * `Documentation <http://limits.readthedocs.org/en/latest>`_
277
+ * `Benchmarks <http://limits.readthedocs.org/en/latest/performance.html>`_
277
278
  * `Changelog <http://limits.readthedocs.org/en/stable/changelog.html>`_
278
279
 
@@ -1,21 +1,24 @@
1
1
  limits/__init__.py,sha256=gPUFrt02kHF_syLjiVRSs-S4UVGpRMcM2VMFNhF6G24,748
2
- limits/_version.py,sha256=kltwWj42pO4gRxnH3YVXMAqHthKTyYt67Uzylwnaayo,500
2
+ limits/_version.py,sha256=lLFEUIKCe-XUvEBxgpwkb-a4zLkFZoXtkgamjxeqlCA,497
3
3
  limits/errors.py,sha256=s1el9Vg0ly-z92guvnvYNgKi3_aVqpiw_sufemiLLTI,662
4
4
  limits/limits.py,sha256=YzzZP8_ay_zlMMnnY2xhAcFTTFvFe5HEk8NQlvUTru4,4907
5
5
  limits/py.typed,sha256=47DEQpj8HBSa-_TImW-5JCeuQeRkm5NMpJWZG3hSuFU,0
6
6
  limits/strategies.py,sha256=LeZ6lnE73EIQqQ8TfKaTzlxNvBMrZOOSXFB0l8D17fI,9946
7
- limits/typing.py,sha256=vmutp9AD8XXQ5U48fM7rVaBSoK9fWalkhnK_TGg8xxY,3267
7
+ limits/typing.py,sha256=pVt5D23MhQSUGqi0MBG5FCSqDwta2ygu18BpKvJFxow,3283
8
8
  limits/util.py,sha256=nk5QYvezFuXPq1OTEj04RrZFSWIH-khT0e_Dim6zGCw,6002
9
9
  limits/version.py,sha256=YwkF3dtq1KGzvmL3iVGctA8NNtGlK_0arrzZkZGVjUs,47
10
10
  limits/aio/__init__.py,sha256=yxvWb_ZmV245Hg2LqD365WC5IDllcGDMw6udJ1jNp1g,118
11
11
  limits/aio/strategies.py,sha256=RzZExH2r6jnHra4SpDHqtZCC0Bo3085zUJYo2boAj6Y,9897
12
12
  limits/aio/storage/__init__.py,sha256=vKeArUnN1ld_0mQOBBZPCjaQgM5xI1GBPM7_F2Ydz5c,646
13
13
  limits/aio/storage/base.py,sha256=VfHpL9Z3RL76eKhoaSQKLKQsqcF5B2bnF6gfa-8ltWA,6296
14
- limits/aio/storage/memcached.py,sha256=q1t9wowCg04-pHavg0i5xEly4KMgVkW5ptyGNA4RiqU,9195
15
- limits/aio/storage/memory.py,sha256=HdkQPcjjHv1MhGoeYWOkBwwMj5e0tQbaO0OlECzh64A,9178
14
+ limits/aio/storage/memory.py,sha256=sWrDzOe-6Opy9uFmfP1S38IbN2_wNCBaIHTS4UTRy6g,9562
16
15
  limits/aio/storage/mongodb.py,sha256=tIMfQrseONRMR2nuRmPO7ocp8dTCABfqBICS_kgp550,19141
17
- limits/aio/storage/redis/__init__.py,sha256=lwoKk91YLEBlZ3W6hCnQ1e7Gc6LxpvSzZZW16saCyR4,14143
18
- limits/aio/storage/redis/bridge.py,sha256=eoRi9h2bSy194cVwoKgRYQV1HQ7SvwarL-4LeazrxeA,3145
16
+ limits/aio/storage/memcached/__init__.py,sha256=VMWsH4XpaPswtPV7cQmsfckhVRbOOrKvoUPYnGt5MRY,6611
17
+ limits/aio/storage/memcached/bridge.py,sha256=3CEruS6LvZWDQPGPLlwY4hemy6oN0WWduUE7t8vyXBI,2017
18
+ limits/aio/storage/memcached/emcache.py,sha256=J01jP-Udd2fLgamCh2CX9NEIvhN8eZVTzUok096Bbe4,3833
19
+ limits/aio/storage/memcached/memcachio.py,sha256=OoGVqOVG0pVX2McFeTGQ_AbiqQUu_FYwWItpQMtNV7g,3491
20
+ limits/aio/storage/redis/__init__.py,sha256=dU0FsaDv53kKGpZg-CA_F1VjLEdrxGakSHpfuh1cyYM,14756
21
+ limits/aio/storage/redis/bridge.py,sha256=tz6WGViOqIm81hjGPUOBlz-Qw0tSB71NIttn7Xb5lok,3189
19
22
  limits/aio/storage/redis/coredis.py,sha256=IzfEyXBvQbr4QUWML9xAd87a2aHCvglOBEjAg-Vq4z0,7420
20
23
  limits/aio/storage/redis/redispy.py,sha256=HS1H6E9g0dP3G-8tSUILIFoc8JWpeRQOiBxcpL3I0gM,8310
21
24
  limits/aio/storage/redis/valkey.py,sha256=f_-HPZhzNspywGybMNIL0F5uDZk76v8_K9wuC5ZeKhc,248
@@ -27,15 +30,15 @@ limits/resources/redis/lua_scripts/moving_window.lua,sha256=zlieQwfET0BC7sxpfiOu
27
30
  limits/resources/redis/lua_scripts/sliding_window.lua,sha256=qG3Yg30Dq54QpRUcR9AOrKQ5bdJiaYpCacTm6Kxblvc,713
28
31
  limits/storage/__init__.py,sha256=9iNxIlwzLQw2d54EcMa2LBJ47wiWCPOnHgn6ddqKkDI,2652
29
32
  limits/storage/base.py,sha256=IdOL_iqR9KhaJO73M_h9c6OYe8Ox632pxx5uXaL9Dbo,6860
30
- limits/storage/memcached.py,sha256=7ZdT80OgQa0f16Tr2L2zGu8PZGzLryOvlb4D1Qk7z9I,10193
31
- limits/storage/memory.py,sha256=Qc13tGIOFPJ5maehxZdT0074qMvkW-RrzoyZIbFuRyE,8698
33
+ limits/storage/memcached.py,sha256=5GUKGWS_BYTwUss2WmOlCwBtOieGT7AFUcpX65WYXdQ,10217
34
+ limits/storage/memory.py,sha256=rVlsirSp9LDhuqNFp6KMLR85fJc9xwrU58IHIVz6eq4,8719
32
35
  limits/storage/mongodb.py,sha256=V4Ib_AwPFX6JpNI7oUUGJx_3MxD8EmYAi4Q6QcWnQ5U,18071
33
- limits/storage/redis.py,sha256=i_6qh4S6JQd-lG6eRJdTPxNnZIAkm4G0cA0mfow9OOk,10389
34
- limits/storage/redis_cluster.py,sha256=z6aONMl4p1AY78G3J0BbtK--uztz88krwnpiOsU61BM,4447
35
- limits/storage/redis_sentinel.py,sha256=AN0WtwHN88TvXk0C2uUE8l5Jhsd1ZxU8XSqrEyQSR20,4327
36
+ limits/storage/redis.py,sha256=GwD1fODE8pZxm403UwEuhbRHG1bprqoxyuFkIp_K5QQ,10552
37
+ limits/storage/redis_cluster.py,sha256=GkL8GCQFfxDriMzsPMkaj6pMEX5FvQXYpUtXLY5q8fQ,4621
38
+ limits/storage/redis_sentinel.py,sha256=OSb61DxgUxMgXSIjaM_pF5-entD8XntD56xt0rFu89k,4479
36
39
  limits/storage/registry.py,sha256=CxSaDBGR5aBJPFAIsfX9axCnbcThN3Bu-EH4wHrXtu8,650
37
- limits-5.0.0rc1.dist-info/licenses/LICENSE.txt,sha256=T6i7kq7F5gIPfcno9FCxU5Hcwm22Bjq0uHZV3ElcjsQ,1061
38
- limits-5.0.0rc1.dist-info/METADATA,sha256=iXK14MS_wl5xFrRblJE9Niv54qsuaGhW-_49-vIrSrY,10828
39
- limits-5.0.0rc1.dist-info/WHEEL,sha256=CmyFI0kx5cdEMTLiONQRbGQwjIoR1aIYB7eCAQ4KPJ0,91
40
- limits-5.0.0rc1.dist-info/top_level.txt,sha256=C7g5ahldPoU2s6iWTaJayUrbGmPK1d6e9t5Nn0vQ2jM,7
41
- limits-5.0.0rc1.dist-info/RECORD,,
40
+ limits-5.1.0.dist-info/licenses/LICENSE.txt,sha256=T6i7kq7F5gIPfcno9FCxU5Hcwm22Bjq0uHZV3ElcjsQ,1061
41
+ limits-5.1.0.dist-info/METADATA,sha256=Me_Gk0sTOOVK8weSyIufZ1ie9SpIKiEA2bQIMQrwBbo,10900
42
+ limits-5.1.0.dist-info/WHEEL,sha256=pxyMxgL8-pra_rKaQ4drOZAegBVuX-G_4nRHjjgWbmo,91
43
+ limits-5.1.0.dist-info/top_level.txt,sha256=C7g5ahldPoU2s6iWTaJayUrbGmPK1d6e9t5Nn0vQ2jM,7
44
+ limits-5.1.0.dist-info/RECORD,,
@@ -1,5 +1,5 @@
1
1
  Wheel-Version: 1.0
2
- Generator: setuptools (78.1.0)
2
+ Generator: setuptools (79.0.0)
3
3
  Root-Is-Purelib: true
4
4
  Tag: py3-none-any
5
5