call-limiter 0.0.1__tar.gz
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- call_limiter-0.0.1/LICENSE +21 -0
- call_limiter-0.0.1/PKG-INFO +153 -0
- call_limiter-0.0.1/README.md +106 -0
- call_limiter-0.0.1/call_limiter/__init__.py +4 -0
- call_limiter-0.0.1/call_limiter/limiter.py +148 -0
- call_limiter-0.0.1/call_limiter.egg-info/PKG-INFO +153 -0
- call_limiter-0.0.1/call_limiter.egg-info/SOURCES.txt +10 -0
- call_limiter-0.0.1/call_limiter.egg-info/dependency_links.txt +1 -0
- call_limiter-0.0.1/call_limiter.egg-info/top_level.txt +1 -0
- call_limiter-0.0.1/pyproject.toml +33 -0
- call_limiter-0.0.1/setup.cfg +4 -0
- call_limiter-0.0.1/tests/test_limiter.py +267 -0
|
@@ -0,0 +1,21 @@
|
|
|
1
|
+
MIT License
|
|
2
|
+
|
|
3
|
+
Copyright (c) 2024 emre
|
|
4
|
+
|
|
5
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
6
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
7
|
+
in the Software without restriction, including without limitation the rights
|
|
8
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
9
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
10
|
+
furnished to do so, subject to the following conditions:
|
|
11
|
+
|
|
12
|
+
The above copyright notice and this permission notice shall be included in all
|
|
13
|
+
copies or substantial portions of the Software.
|
|
14
|
+
|
|
15
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
16
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
17
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
18
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
19
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
20
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
21
|
+
SOFTWARE.
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: call-limiter
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Rate Limiter with retry - A hardware-calibrated approach for execution pacing and resilience.
|
|
5
|
+
Author: eyukselen
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2024 emre
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
Project-URL: Homepage, https://github.com/eyukselen/call-limiter
|
|
28
|
+
Project-URL: Source Code, https://github.com/eyukselen/call-limiter
|
|
29
|
+
Keywords: rate-limit,rate-limiter,ratelimit,retry,throttle,backoff,throttling
|
|
30
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
31
|
+
Classifier: Operating System :: OS Independent
|
|
32
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
33
|
+
Classifier: Topic :: Utilities
|
|
34
|
+
Classifier: Topic :: Software Development :: Testing
|
|
35
|
+
Classifier: Programming Language :: Python :: 3
|
|
36
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
37
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
38
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
39
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
40
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
41
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
42
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
43
|
+
Requires-Python: >=3.8
|
|
44
|
+
Description-Content-Type: text/markdown
|
|
45
|
+
License-File: LICENSE
|
|
46
|
+
Dynamic: license-file
|
|
47
|
+
|
|
48
|
+
# call-limiter 🚀
|
|
49
|
+
|
|
50
|
+

|
|
51
|
+

|
|
52
|
+
[](https://github.com/eyukselen/call-limiter/actions)
|
|
53
|
+
|
|
54
|
+
Thread-safe Python decorators for synchronized rate limiting and retry logic.
|
|
55
|
+
## 📦 Core Components
|
|
56
|
+
|
|
57
|
+
* **CallLimiter**: A high-precision throttler that paces function calls to stay within specific rate limits.
|
|
58
|
+
* **CallRetry**: A resilience decorator that re-runs failed functions with a configurable delay and exception handling.
|
|
59
|
+
* **ResilientLimiter**: A hybrid solution that combines pacing with Coordinated Recovery, ensuring retries never exceed your defined rate limit across threads.
|
|
60
|
+
|
|
61
|
+
## 🛠Installation
|
|
62
|
+
|
|
63
|
+
```
|
|
64
|
+
pip install call-limiter
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
---
|
|
68
|
+
### Component 1: CallLimiter
|
|
69
|
+
|
|
70
|
+
**Scenario:** I want to "rate limit" (throttle) my function so it limits my calls to 5 calls per second. I also want to have an option to select if I want 5 calls to fire instantly or spread across evenly in the 1 second period.
|
|
71
|
+
|
|
72
|
+
**Usage-1: 5 calls per 1 second with burst (instantly fire all 5 calls)**
|
|
73
|
+
*Best for: Maximizing throughput when the target API allows short spikes.*
|
|
74
|
+
|
|
75
|
+
**My function to throttle:** `my_function`
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
from call_limiter import CallLimiter
|
|
79
|
+
|
|
80
|
+
limiter = CallLimiter(calls=5, period=1, allow_burst=True)
|
|
81
|
+
throttled_func = limiter(my_function)
|
|
82
|
+
```
|
|
83
|
+
**Usage-2: 5 calls per 1 second paced (evenly spread calls)**
|
|
84
|
+
*Best for: Avoiding "spiky" traffic patterns that trigger anti-bot protections.*
|
|
85
|
+
```python
|
|
86
|
+
|
|
87
|
+
from call_limiter import CallLimiter
|
|
88
|
+
|
|
89
|
+
# This forces a call exactly every 0.2 seconds (1s / 5 calls)
|
|
90
|
+
limiter = CallLimiter(calls=5, period=1, allow_burst=False)
|
|
91
|
+
throttled_func = limiter(my_function)
|
|
92
|
+
```
|
|
93
|
+
---
|
|
94
|
+
### Component 2: CallRetry
|
|
95
|
+
|
|
96
|
+
**Scenario:** I want a retry logic to use with my function calls.
|
|
97
|
+
If `my_function` raises ValueError exception, it should retry up to 5 times with 1-second delay between attempts.
|
|
98
|
+
I want to log every retry with `retry_logger` function.
|
|
99
|
+
if it still fails, it should use `fail_handler` function. (if not provided, raise error)
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
from call_limiter import CallRetry
|
|
103
|
+
|
|
104
|
+
# This configuration perfectly mirrors your scenario:
|
|
105
|
+
retry = CallRetry(
|
|
106
|
+
retry_count=5,
|
|
107
|
+
retry_interval=1.0,
|
|
108
|
+
retry_exceptions=(ValueError,), # Trigger
|
|
109
|
+
on_retry=retry_logger, # Observability
|
|
110
|
+
fallback=fail_handler # Outcome (Plan B)
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# If fail_handler is a function, this returns its result on ultimate failure.
|
|
114
|
+
# If you didn't pass fail_handler, it would raise the ValueError.
|
|
115
|
+
resilient_func = retry(my_function)
|
|
116
|
+
```
|
|
117
|
+
---
|
|
118
|
+
### Component 3: ResilientLimiter
|
|
119
|
+
**Scenario:** I want a rate limiter that can also handle failed calls. `my_function` should be called
|
|
120
|
+
Flow Logic:
|
|
121
|
+
* 5 calls/per second with burst (or drip),
|
|
122
|
+
* max_retry = 3 (if it fails)
|
|
123
|
+
* on_retry=`retry_handler`, notify me by calling optional `retry_handler`, if not provided ignore!
|
|
124
|
+
* fallback=`falback_handler` if it still fails notify me, if not provided raise error!
|
|
125
|
+
Note: each retry will comply "5 calls/per second with burst (or drip)" tempo to respect rate limiter
|
|
126
|
+
Note: on_retry receives (exception, attempt_number), while fallback is a simple callable.
|
|
127
|
+
```python
|
|
128
|
+
from call_limiter import ResilientLimiter
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
limiter = ResilientLimiter(
|
|
132
|
+
calls=5,
|
|
133
|
+
period=1.0,
|
|
134
|
+
allow_burst=True,
|
|
135
|
+
retry_count=3,
|
|
136
|
+
on_retry=retry_handler,
|
|
137
|
+
fallback=fail_handler
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
@limiter
|
|
141
|
+
def my_function():
|
|
142
|
+
# This will respect the 5/sec pace, even during retries.
|
|
143
|
+
pass
|
|
144
|
+
```
|
|
145
|
+
---
|
|
146
|
+
## ✨ Key Features
|
|
147
|
+
|
|
148
|
+
* Low-Jitter Timing: Uses time.perf_counter() and resolution-aware sleeping to prevent the "creeping delays" common in standard rate limiters.
|
|
149
|
+
* Zero-Hardcode Logic: Accounts for "OS Jitter" to ensure time.sleep remains accurate even under system load.
|
|
150
|
+
* Thread-Safe: Designed for multithreaded environments where multiple workers hit the same limited resource.
|
|
151
|
+
* Thread-Synchronized State: Shared locks ensure that 10 threads hitting the same limiter behave as a single unit.
|
|
152
|
+
* Synchronized Pacing: In hybrid mode, retries are queued through the global limiter, preventing a 'thundering herd' and ensuring you never exceed your quota during recovery.
|
|
153
|
+
*
|
|
@@ -0,0 +1,106 @@
|
|
|
1
|
+
# call-limiter 🚀
|
|
2
|
+
|
|
3
|
+

|
|
4
|
+

|
|
5
|
+
[](https://github.com/eyukselen/call-limiter/actions)
|
|
6
|
+
|
|
7
|
+
Thread-safe Python decorators for synchronized rate limiting and retry logic.
|
|
8
|
+
## 📦 Core Components
|
|
9
|
+
|
|
10
|
+
* **CallLimiter**: A high-precision throttler that paces function calls to stay within specific rate limits.
|
|
11
|
+
* **CallRetry**: A resilience decorator that re-runs failed functions with a configurable delay and exception handling.
|
|
12
|
+
* **ResilientLimiter**: A hybrid solution that combines pacing with Coordinated Recovery, ensuring retries never exceed your defined rate limit across threads.
|
|
13
|
+
|
|
14
|
+
## 🛠Installation
|
|
15
|
+
|
|
16
|
+
```
|
|
17
|
+
pip install call-limiter
|
|
18
|
+
```
|
|
19
|
+
|
|
20
|
+
---
|
|
21
|
+
### Component 1: CallLimiter
|
|
22
|
+
|
|
23
|
+
**Scenario:** I want to "rate limit" (throttle) my function so it limits my calls to 5 calls per second. I also want to have an option to select if I want 5 calls to fire instantly or spread across evenly in the 1 second period.
|
|
24
|
+
|
|
25
|
+
**Usage-1: 5 calls per 1 second with burst (instantly fire all 5 calls)**
|
|
26
|
+
*Best for: Maximizing throughput when the target API allows short spikes.*
|
|
27
|
+
|
|
28
|
+
**My function to throttle:** `my_function`
|
|
29
|
+
|
|
30
|
+
```python
|
|
31
|
+
from call_limiter import CallLimiter
|
|
32
|
+
|
|
33
|
+
limiter = CallLimiter(calls=5, period=1, allow_burst=True)
|
|
34
|
+
throttled_func = limiter(my_function)
|
|
35
|
+
```
|
|
36
|
+
**Usage-2: 5 calls per 1 second paced (evenly spread calls)**
|
|
37
|
+
*Best for: Avoiding "spiky" traffic patterns that trigger anti-bot protections.*
|
|
38
|
+
```python
|
|
39
|
+
|
|
40
|
+
from call_limiter import CallLimiter
|
|
41
|
+
|
|
42
|
+
# This forces a call exactly every 0.2 seconds (1s / 5 calls)
|
|
43
|
+
limiter = CallLimiter(calls=5, period=1, allow_burst=False)
|
|
44
|
+
throttled_func = limiter(my_function)
|
|
45
|
+
```
|
|
46
|
+
---
|
|
47
|
+
### Component 2: CallRetry
|
|
48
|
+
|
|
49
|
+
**Scenario:** I want a retry logic to use with my function calls.
|
|
50
|
+
If `my_function` raises ValueError exception, it should retry up to 5 times with 1-second delay between attempts.
|
|
51
|
+
I want to log every retry with `retry_logger` function.
|
|
52
|
+
if it still fails, it should use `fail_handler` function. (if not provided, raise error)
|
|
53
|
+
|
|
54
|
+
```python
|
|
55
|
+
from call_limiter import CallRetry
|
|
56
|
+
|
|
57
|
+
# This configuration perfectly mirrors your scenario:
|
|
58
|
+
retry = CallRetry(
|
|
59
|
+
retry_count=5,
|
|
60
|
+
retry_interval=1.0,
|
|
61
|
+
retry_exceptions=(ValueError,), # Trigger
|
|
62
|
+
on_retry=retry_logger, # Observability
|
|
63
|
+
fallback=fail_handler # Outcome (Plan B)
|
|
64
|
+
)
|
|
65
|
+
|
|
66
|
+
# If fail_handler is a function, this returns its result on ultimate failure.
|
|
67
|
+
# If you didn't pass fail_handler, it would raise the ValueError.
|
|
68
|
+
resilient_func = retry(my_function)
|
|
69
|
+
```
|
|
70
|
+
---
|
|
71
|
+
### Component 3: ResilientLimiter
|
|
72
|
+
**Scenario:** I want a rate limiter that can also handle failed calls. `my_function` should be called
|
|
73
|
+
Flow Logic:
|
|
74
|
+
* 5 calls/per second with burst (or drip),
|
|
75
|
+
* max_retry = 3 (if it fails)
|
|
76
|
+
* on_retry=`retry_handler`, notify me by calling optional `retry_handler`, if not provided ignore!
|
|
77
|
+
* fallback=`falback_handler` if it still fails notify me, if not provided raise error!
|
|
78
|
+
Note: each retry will comply "5 calls/per second with burst (or drip)" tempo to respect rate limiter
|
|
79
|
+
Note: on_retry receives (exception, attempt_number), while fallback is a simple callable.
|
|
80
|
+
```python
|
|
81
|
+
from call_limiter import ResilientLimiter
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
limiter = ResilientLimiter(
|
|
85
|
+
calls=5,
|
|
86
|
+
period=1.0,
|
|
87
|
+
allow_burst=True,
|
|
88
|
+
retry_count=3,
|
|
89
|
+
on_retry=retry_handler,
|
|
90
|
+
fallback=fail_handler
|
|
91
|
+
)
|
|
92
|
+
|
|
93
|
+
@limiter
|
|
94
|
+
def my_function():
|
|
95
|
+
# This will respect the 5/sec pace, even during retries.
|
|
96
|
+
pass
|
|
97
|
+
```
|
|
98
|
+
---
|
|
99
|
+
## ✨ Key Features
|
|
100
|
+
|
|
101
|
+
* Low-Jitter Timing: Uses time.perf_counter() and resolution-aware sleeping to prevent the "creeping delays" common in standard rate limiters.
|
|
102
|
+
* Zero-Hardcode Logic: Accounts for "OS Jitter" to ensure time.sleep remains accurate even under system load.
|
|
103
|
+
* Thread-Safe: Designed for multithreaded environments where multiple workers hit the same limited resource.
|
|
104
|
+
* Thread-Synchronized State: Shared locks ensure that 10 threads hitting the same limiter behave as a single unit.
|
|
105
|
+
* Synchronized Pacing: In hybrid mode, retries are queued through the global limiter, preventing a 'thundering herd' and ensuring you never exceed your quota during recovery.
|
|
106
|
+
*
|
|
@@ -0,0 +1,148 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import threading
|
|
3
|
+
from functools import wraps
|
|
4
|
+
from typing import Callable, Tuple, Type, Optional, Any
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class CallLimiter:
|
|
8
|
+
def __init__(self, calls: int, period: float = 1.0, allow_burst: bool = False):
|
|
9
|
+
self.rate = calls / period
|
|
10
|
+
self.capacity = float(calls) if allow_burst else 1.0
|
|
11
|
+
self.tokens = self.capacity
|
|
12
|
+
self.last_refill = time.perf_counter()
|
|
13
|
+
self.lock = threading.Lock()
|
|
14
|
+
|
|
15
|
+
# Hardware Calibration
|
|
16
|
+
t0 = time.perf_counter()
|
|
17
|
+
_ = time.perf_counter()
|
|
18
|
+
self.pulse = time.perf_counter() - t0
|
|
19
|
+
self.os_jitter = 0.0
|
|
20
|
+
|
|
21
|
+
def wait(self):
|
|
22
|
+
with self.lock:
|
|
23
|
+
now = time.perf_counter()
|
|
24
|
+
|
|
25
|
+
# If the period has passed, reset the bucket and the window
|
|
26
|
+
if now - self.last_refill >= (1.0 / self.rate * self.capacity): # Total period
|
|
27
|
+
self.tokens = self.capacity
|
|
28
|
+
self.last_refill = now
|
|
29
|
+
|
|
30
|
+
if self.tokens < 1.0:
|
|
31
|
+
# Calculate time remaining in the current window
|
|
32
|
+
sleep_needed = (self.last_refill + (self.capacity / self.rate)) - now
|
|
33
|
+
|
|
34
|
+
if sleep_needed > 0:
|
|
35
|
+
# --- High Precision Sleep ---
|
|
36
|
+
if sleep_needed > self.os_jitter:
|
|
37
|
+
t_before = time.perf_counter()
|
|
38
|
+
time.sleep(max(0, sleep_needed - self.os_jitter))
|
|
39
|
+
self.os_jitter = max(self.os_jitter, (time.perf_counter() - t_before) - sleep_needed)
|
|
40
|
+
|
|
41
|
+
target = now + sleep_needed
|
|
42
|
+
while time.perf_counter() < target:
|
|
43
|
+
pass
|
|
44
|
+
|
|
45
|
+
# After waiting, the window resets
|
|
46
|
+
self.tokens = self.capacity
|
|
47
|
+
self.last_refill = time.perf_counter()
|
|
48
|
+
|
|
49
|
+
self.tokens -= 1.0
|
|
50
|
+
|
|
51
|
+
def __call__(self, func):
|
|
52
|
+
@wraps(func)
|
|
53
|
+
def wrapper(*args, **kwargs):
|
|
54
|
+
self.wait()
|
|
55
|
+
return func(*args, **kwargs)
|
|
56
|
+
|
|
57
|
+
return wrapper
|
|
58
|
+
|
|
59
|
+
|
|
60
|
+
class CallRetry:
|
|
61
|
+
def __init__(
|
|
62
|
+
self,
|
|
63
|
+
retry_count: int = 5,
|
|
64
|
+
retry_interval: float = 1.0,
|
|
65
|
+
retry_exceptions: Tuple[Type[Exception], ...] = (Exception,),
|
|
66
|
+
on_retry: Optional[Callable[[Exception, int], None]] = None,
|
|
67
|
+
fallback: Optional[Callable[[Exception], Any]] = None
|
|
68
|
+
):
|
|
69
|
+
self.retry_count = retry_count
|
|
70
|
+
self.retry_interval = retry_interval
|
|
71
|
+
self.retry_exceptions = retry_exceptions
|
|
72
|
+
self.on_retry = on_retry
|
|
73
|
+
self.fallback = fallback
|
|
74
|
+
|
|
75
|
+
def __call__(self, func: Callable) -> Callable:
|
|
76
|
+
@wraps(func)
|
|
77
|
+
def wrapper(*args, **kwargs):
|
|
78
|
+
last_exception = None
|
|
79
|
+
|
|
80
|
+
# 0 to retry_count means (retry_count + 1) total attempts
|
|
81
|
+
for attempt in range(1, self.retry_count + 2):
|
|
82
|
+
try:
|
|
83
|
+
return func(*args, **kwargs)
|
|
84
|
+
|
|
85
|
+
except self.retry_exceptions as e:
|
|
86
|
+
last_exception = e
|
|
87
|
+
|
|
88
|
+
# Check if we have attempts left
|
|
89
|
+
if attempt <= self.retry_count:
|
|
90
|
+
# Observability: Fire the logger if provided
|
|
91
|
+
if self.on_retry:
|
|
92
|
+
self.on_retry(e, attempt)
|
|
93
|
+
|
|
94
|
+
time.sleep(self.retry_interval)
|
|
95
|
+
continue
|
|
96
|
+
|
|
97
|
+
# If we reach here, we've exhausted retries
|
|
98
|
+
if self.fallback:
|
|
99
|
+
return self.fallback(e)
|
|
100
|
+
|
|
101
|
+
raise last_exception
|
|
102
|
+
|
|
103
|
+
return wrapper
|
|
104
|
+
|
|
105
|
+
|
|
106
|
+
import functools
|
|
107
|
+
from typing import Callable, Optional, Any, Tuple, Type
|
|
108
|
+
|
|
109
|
+
|
|
110
|
+
class ResilientLimiter:
|
|
111
|
+
def __init__(
|
|
112
|
+
self,
|
|
113
|
+
calls: int,
|
|
114
|
+
period: float = 1.0,
|
|
115
|
+
allow_burst: bool = False,
|
|
116
|
+
retry_count: int = 3,
|
|
117
|
+
retry_interval: float = 1.0,
|
|
118
|
+
retry_exceptions: Tuple[Type[Exception], ...] = (Exception,),
|
|
119
|
+
on_retry: Optional[Callable[[Exception, int], None]] = None,
|
|
120
|
+
fallback: Optional[Callable[[Exception], Any]] = None
|
|
121
|
+
):
|
|
122
|
+
# 1. Initialize the Rate Limiter (The Pace)
|
|
123
|
+
self.limiter = CallLimiter(
|
|
124
|
+
calls=calls,
|
|
125
|
+
period=period,
|
|
126
|
+
allow_burst=allow_burst
|
|
127
|
+
)
|
|
128
|
+
|
|
129
|
+
# 2. Initialize the Retry Logic (The Resilience)
|
|
130
|
+
self.retry = CallRetry(
|
|
131
|
+
retry_count=retry_count,
|
|
132
|
+
retry_interval=retry_interval,
|
|
133
|
+
retry_exceptions=retry_exceptions,
|
|
134
|
+
on_retry=on_retry,
|
|
135
|
+
fallback=fallback
|
|
136
|
+
)
|
|
137
|
+
|
|
138
|
+
def __call__(self, func: Callable) -> Callable:
|
|
139
|
+
# We wrap the function with Retry first, then Limiter.
|
|
140
|
+
# This ensures every individual attempt (including retries)
|
|
141
|
+
# is intercepted by the limiter's wait() logic.
|
|
142
|
+
@self.limiter
|
|
143
|
+
@self.retry
|
|
144
|
+
@functools.wraps(func)
|
|
145
|
+
def wrapper(*args, **kwargs):
|
|
146
|
+
return func(*args, **kwargs)
|
|
147
|
+
|
|
148
|
+
return wrapper
|
|
@@ -0,0 +1,153 @@
|
|
|
1
|
+
Metadata-Version: 2.4
|
|
2
|
+
Name: call-limiter
|
|
3
|
+
Version: 0.0.1
|
|
4
|
+
Summary: Rate Limiter with retry - A hardware-calibrated approach for execution pacing and resilience.
|
|
5
|
+
Author: eyukselen
|
|
6
|
+
License: MIT License
|
|
7
|
+
|
|
8
|
+
Copyright (c) 2024 emre
|
|
9
|
+
|
|
10
|
+
Permission is hereby granted, free of charge, to any person obtaining a copy
|
|
11
|
+
of this software and associated documentation files (the "Software"), to deal
|
|
12
|
+
in the Software without restriction, including without limitation the rights
|
|
13
|
+
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
|
14
|
+
copies of the Software, and to permit persons to whom the Software is
|
|
15
|
+
furnished to do so, subject to the following conditions:
|
|
16
|
+
|
|
17
|
+
The above copyright notice and this permission notice shall be included in all
|
|
18
|
+
copies or substantial portions of the Software.
|
|
19
|
+
|
|
20
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
|
21
|
+
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
|
22
|
+
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
|
23
|
+
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
|
24
|
+
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
|
25
|
+
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
26
|
+
SOFTWARE.
|
|
27
|
+
Project-URL: Homepage, https://github.com/eyukselen/call-limiter
|
|
28
|
+
Project-URL: Source Code, https://github.com/eyukselen/call-limiter
|
|
29
|
+
Keywords: rate-limit,rate-limiter,ratelimit,retry,throttle,backoff,throttling
|
|
30
|
+
Classifier: License :: OSI Approved :: MIT License
|
|
31
|
+
Classifier: Operating System :: OS Independent
|
|
32
|
+
Classifier: Topic :: Software Development :: Libraries :: Python Modules
|
|
33
|
+
Classifier: Topic :: Utilities
|
|
34
|
+
Classifier: Topic :: Software Development :: Testing
|
|
35
|
+
Classifier: Programming Language :: Python :: 3
|
|
36
|
+
Classifier: Programming Language :: Python :: 3.8
|
|
37
|
+
Classifier: Programming Language :: Python :: 3.9
|
|
38
|
+
Classifier: Programming Language :: Python :: 3.10
|
|
39
|
+
Classifier: Programming Language :: Python :: 3.11
|
|
40
|
+
Classifier: Programming Language :: Python :: 3.12
|
|
41
|
+
Classifier: Programming Language :: Python :: 3.13
|
|
42
|
+
Classifier: Programming Language :: Python :: 3.14
|
|
43
|
+
Requires-Python: >=3.8
|
|
44
|
+
Description-Content-Type: text/markdown
|
|
45
|
+
License-File: LICENSE
|
|
46
|
+
Dynamic: license-file
|
|
47
|
+
|
|
48
|
+
# call-limiter 🚀
|
|
49
|
+
|
|
50
|
+

|
|
51
|
+

|
|
52
|
+
[](https://github.com/eyukselen/call-limiter/actions)
|
|
53
|
+
|
|
54
|
+
Thread-safe Python decorators for synchronized rate limiting and retry logic.
|
|
55
|
+
## 📦 Core Components
|
|
56
|
+
|
|
57
|
+
* **CallLimiter**: A high-precision throttler that paces function calls to stay within specific rate limits.
|
|
58
|
+
* **CallRetry**: A resilience decorator that re-runs failed functions with a configurable delay and exception handling.
|
|
59
|
+
* **ResilientLimiter**: A hybrid solution that combines pacing with Coordinated Recovery, ensuring retries never exceed your defined rate limit across threads.
|
|
60
|
+
|
|
61
|
+
## 🛠Installation
|
|
62
|
+
|
|
63
|
+
```
|
|
64
|
+
pip install call-limiter
|
|
65
|
+
```
|
|
66
|
+
|
|
67
|
+
---
|
|
68
|
+
### Component 1: CallLimiter
|
|
69
|
+
|
|
70
|
+
**Scenario:** I want to "rate limit" (throttle) my function so it limits my calls to 5 calls per second. I also want to have an option to select if I want 5 calls to fire instantly or spread across evenly in the 1 second period.
|
|
71
|
+
|
|
72
|
+
**Usage-1: 5 calls per 1 second with burst (instantly fire all 5 calls)**
|
|
73
|
+
*Best for: Maximizing throughput when the target API allows short spikes.*
|
|
74
|
+
|
|
75
|
+
**My function to throttle:** `my_function`
|
|
76
|
+
|
|
77
|
+
```python
|
|
78
|
+
from call_limiter import CallLimiter
|
|
79
|
+
|
|
80
|
+
limiter = CallLimiter(calls=5, period=1, allow_burst=True)
|
|
81
|
+
throttled_func = limiter(my_function)
|
|
82
|
+
```
|
|
83
|
+
**Usage-2: 5 calls per 1 second paced (evenly spread calls)**
|
|
84
|
+
*Best for: Avoiding "spiky" traffic patterns that trigger anti-bot protections.*
|
|
85
|
+
```python
|
|
86
|
+
|
|
87
|
+
from call_limiter import CallLimiter
|
|
88
|
+
|
|
89
|
+
# This forces a call exactly every 0.2 seconds (1s / 5 calls)
|
|
90
|
+
limiter = CallLimiter(calls=5, period=1, allow_burst=False)
|
|
91
|
+
throttled_func = limiter(my_function)
|
|
92
|
+
```
|
|
93
|
+
---
|
|
94
|
+
### Component 2: CallRetry
|
|
95
|
+
|
|
96
|
+
**Scenario:** I want a retry logic to use with my function calls.
|
|
97
|
+
If `my_function` raises ValueError exception, it should retry up to 5 times with 1-second delay between attempts.
|
|
98
|
+
I want to log every retry with `retry_logger` function.
|
|
99
|
+
if it still fails, it should use `fail_handler` function. (if not provided, raise error)
|
|
100
|
+
|
|
101
|
+
```python
|
|
102
|
+
from call_limiter import CallRetry
|
|
103
|
+
|
|
104
|
+
# This configuration perfectly mirrors your scenario:
|
|
105
|
+
retry = CallRetry(
|
|
106
|
+
retry_count=5,
|
|
107
|
+
retry_interval=1.0,
|
|
108
|
+
retry_exceptions=(ValueError,), # Trigger
|
|
109
|
+
on_retry=retry_logger, # Observability
|
|
110
|
+
fallback=fail_handler # Outcome (Plan B)
|
|
111
|
+
)
|
|
112
|
+
|
|
113
|
+
# If fail_handler is a function, this returns its result on ultimate failure.
|
|
114
|
+
# If you didn't pass fail_handler, it would raise the ValueError.
|
|
115
|
+
resilient_func = retry(my_function)
|
|
116
|
+
```
|
|
117
|
+
---
|
|
118
|
+
### Component 3: ResilientLimiter
|
|
119
|
+
**Scenario:** I want a rate limiter that can also handle failed calls. `my_function` should be called
|
|
120
|
+
Flow Logic:
|
|
121
|
+
* 5 calls/per second with burst (or drip),
|
|
122
|
+
* max_retry = 3 (if it fails)
|
|
123
|
+
* on_retry=`retry_handler`, notify me by calling optional `retry_handler`, if not provided ignore!
|
|
124
|
+
* fallback=`falback_handler` if it still fails notify me, if not provided raise error!
|
|
125
|
+
Note: each retry will comply "5 calls/per second with burst (or drip)" tempo to respect rate limiter
|
|
126
|
+
Note: on_retry receives (exception, attempt_number), while fallback is a simple callable.
|
|
127
|
+
```python
|
|
128
|
+
from call_limiter import ResilientLimiter
|
|
129
|
+
|
|
130
|
+
|
|
131
|
+
limiter = ResilientLimiter(
|
|
132
|
+
calls=5,
|
|
133
|
+
period=1.0,
|
|
134
|
+
allow_burst=True,
|
|
135
|
+
retry_count=3,
|
|
136
|
+
on_retry=retry_handler,
|
|
137
|
+
fallback=fail_handler
|
|
138
|
+
)
|
|
139
|
+
|
|
140
|
+
@limiter
|
|
141
|
+
def my_function():
|
|
142
|
+
# This will respect the 5/sec pace, even during retries.
|
|
143
|
+
pass
|
|
144
|
+
```
|
|
145
|
+
---
|
|
146
|
+
## ✨ Key Features
|
|
147
|
+
|
|
148
|
+
* Low-Jitter Timing: Uses time.perf_counter() and resolution-aware sleeping to prevent the "creeping delays" common in standard rate limiters.
|
|
149
|
+
* Zero-Hardcode Logic: Accounts for "OS Jitter" to ensure time.sleep remains accurate even under system load.
|
|
150
|
+
* Thread-Safe: Designed for multithreaded environments where multiple workers hit the same limited resource.
|
|
151
|
+
* Thread-Synchronized State: Shared locks ensure that 10 threads hitting the same limiter behave as a single unit.
|
|
152
|
+
* Synchronized Pacing: In hybrid mode, retries are queued through the global limiter, preventing a 'thundering herd' and ensuring you never exceed your quota during recovery.
|
|
153
|
+
*
|
|
@@ -0,0 +1,10 @@
|
|
|
1
|
+
LICENSE
|
|
2
|
+
README.md
|
|
3
|
+
pyproject.toml
|
|
4
|
+
call_limiter/__init__.py
|
|
5
|
+
call_limiter/limiter.py
|
|
6
|
+
call_limiter.egg-info/PKG-INFO
|
|
7
|
+
call_limiter.egg-info/SOURCES.txt
|
|
8
|
+
call_limiter.egg-info/dependency_links.txt
|
|
9
|
+
call_limiter.egg-info/top_level.txt
|
|
10
|
+
tests/test_limiter.py
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
|
|
@@ -0,0 +1 @@
|
|
|
1
|
+
call_limiter
|
|
@@ -0,0 +1,33 @@
|
|
|
1
|
+
[project]
|
|
2
|
+
name = "call-limiter"
|
|
3
|
+
version = "0.0.1"
|
|
4
|
+
description = "Rate Limiter with retry - A hardware-calibrated approach for execution pacing and resilience."
|
|
5
|
+
readme = "README.md"
|
|
6
|
+
authors = [{name="eyukselen"}]
|
|
7
|
+
license = {file = "LICENSE"}
|
|
8
|
+
keywords = ["rate-limit", "rate-limiter", "ratelimit", "retry", "throttle", "backoff", "throttling"]
|
|
9
|
+
classifiers = [
|
|
10
|
+
"License :: OSI Approved :: MIT License",
|
|
11
|
+
"Operating System :: OS Independent",
|
|
12
|
+
"Topic :: Software Development :: Libraries :: Python Modules",
|
|
13
|
+
"Topic :: Utilities",
|
|
14
|
+
"Topic :: Software Development :: Testing",
|
|
15
|
+
"Programming Language :: Python :: 3",
|
|
16
|
+
"Programming Language :: Python :: 3.8",
|
|
17
|
+
"Programming Language :: Python :: 3.9",
|
|
18
|
+
"Programming Language :: Python :: 3.10",
|
|
19
|
+
"Programming Language :: Python :: 3.11",
|
|
20
|
+
"Programming Language :: Python :: 3.12",
|
|
21
|
+
"Programming Language :: Python :: 3.13",
|
|
22
|
+
"Programming Language :: Python :: 3.14"
|
|
23
|
+
]
|
|
24
|
+
requires-python = ">=3.8"
|
|
25
|
+
dependencies = []
|
|
26
|
+
|
|
27
|
+
[project.urls]
|
|
28
|
+
Homepage = "https://github.com/eyukselen/call-limiter"
|
|
29
|
+
"Source Code" = "https://github.com/eyukselen/call-limiter"
|
|
30
|
+
|
|
31
|
+
[build-system]
|
|
32
|
+
requires = ["setuptools>=61.0"]
|
|
33
|
+
build-backend = "setuptools.build_meta"
|
|
@@ -0,0 +1,267 @@
|
|
|
1
|
+
import time
|
|
2
|
+
import pytest
|
|
3
|
+
from call_limiter import CallLimiter, CallRetry, ResilientLimiter
|
|
4
|
+
import threading
|
|
5
|
+
|
|
6
|
+
|
|
7
|
+
class TestCallLimiter:
|
|
8
|
+
def test_paced_drip_accuracy(self):
|
|
9
|
+
"""Ensures that allow_burst=False creates steady 0.2s intervals for 5 calls/1s."""
|
|
10
|
+
calls = 5
|
|
11
|
+
period = 1.0
|
|
12
|
+
limiter = CallLimiter(calls=calls, period=period, allow_burst=False)
|
|
13
|
+
|
|
14
|
+
timestamps = []
|
|
15
|
+
|
|
16
|
+
@limiter
|
|
17
|
+
def identity(i):
|
|
18
|
+
timestamps.append(time.perf_counter())
|
|
19
|
+
return i
|
|
20
|
+
|
|
21
|
+
# Run 4 calls (3 intervals)
|
|
22
|
+
for i in range(4):
|
|
23
|
+
identity(i)
|
|
24
|
+
|
|
25
|
+
# Each interval should be ~0.2s
|
|
26
|
+
for i in range(len(timestamps) - 1):
|
|
27
|
+
gap = timestamps[i + 1] - timestamps[i]
|
|
28
|
+
# Increased tolerance from 0.01 to 0.05 to account for CI/Cloud jitter
|
|
29
|
+
assert gap == pytest.approx(0.2, abs=0.12), f"Gap {i} was {gap}s, expected 0.2s"
|
|
30
|
+
|
|
31
|
+
|
|
32
|
+
def test_burst_behavior(self):
|
|
33
|
+
"""Ensures allow_burst=True allows immediate execution followed by a wait."""
|
|
34
|
+
calls = 5
|
|
35
|
+
period = 1.0
|
|
36
|
+
limiter = CallLimiter(calls=calls, period=period, allow_burst=True)
|
|
37
|
+
|
|
38
|
+
start = time.perf_counter()
|
|
39
|
+
|
|
40
|
+
@limiter
|
|
41
|
+
def fast_call():
|
|
42
|
+
pass
|
|
43
|
+
|
|
44
|
+
# First 5 calls should be near-instant
|
|
45
|
+
for _ in range(5):
|
|
46
|
+
fast_call()
|
|
47
|
+
|
|
48
|
+
burst_duration = time.perf_counter() - start
|
|
49
|
+
assert burst_duration < 0.01, f"Burst took {burst_duration}s, should be < 0.01s"
|
|
50
|
+
|
|
51
|
+
# 6th call must trigger the 'wall' and take ~0.2s from the start of the window
|
|
52
|
+
fast_call()
|
|
53
|
+
total_duration = time.perf_counter() - start
|
|
54
|
+
assert total_duration >= 0.2, "6th call did not wait for the refill drip"
|
|
55
|
+
|
|
56
|
+
|
|
57
|
+
def test_multithreaded_safety(self):
|
|
58
|
+
"""Ensures the lock prevents race conditions with concurrent calls."""
|
|
59
|
+
|
|
60
|
+
calls = 10
|
|
61
|
+
period = 0.5
|
|
62
|
+
limiter = CallLimiter(calls=calls, period=period, allow_burst=False)
|
|
63
|
+
results = []
|
|
64
|
+
|
|
65
|
+
@limiter
|
|
66
|
+
def secure_call(i):
|
|
67
|
+
results.append(i)
|
|
68
|
+
|
|
69
|
+
threads = [threading.Thread(target=secure_call, args=(i,)) for i in range(10)]
|
|
70
|
+
|
|
71
|
+
start = time.perf_counter()
|
|
72
|
+
for t in threads:
|
|
73
|
+
t.start()
|
|
74
|
+
for t in threads:
|
|
75
|
+
t.join()
|
|
76
|
+
end = time.perf_counter()
|
|
77
|
+
|
|
78
|
+
# Total time for 10 calls with 0.05s intervals should be ~0.45s
|
|
79
|
+
# (1st call is free, 9 intervals of 0.05s)
|
|
80
|
+
assert len(results) == 10
|
|
81
|
+
assert (end - start) >= 0.44
|
|
82
|
+
|
|
83
|
+
|
|
84
|
+
class TestCallRetry:
|
|
85
|
+
def test_retry_success_after_failure(self):
|
|
86
|
+
"""Ensures it eventually succeeds if the error clears."""
|
|
87
|
+
attempts = 0
|
|
88
|
+
|
|
89
|
+
def flaky_func():
|
|
90
|
+
nonlocal attempts
|
|
91
|
+
attempts += 1
|
|
92
|
+
if attempts < 3:
|
|
93
|
+
raise ValueError("Fail")
|
|
94
|
+
return "Success"
|
|
95
|
+
|
|
96
|
+
retry = CallRetry(retry_count=5, retry_interval=0.01, retry_exceptions=(ValueError,))
|
|
97
|
+
result = retry(flaky_func)()
|
|
98
|
+
|
|
99
|
+
assert result == "Success"
|
|
100
|
+
assert attempts == 3
|
|
101
|
+
|
|
102
|
+
def test_retry_fallback_on_exhaustion(self):
|
|
103
|
+
"""Ensures fallback is called after all retries fail."""
|
|
104
|
+
|
|
105
|
+
def permanent_fail():
|
|
106
|
+
raise ValueError("Dead")
|
|
107
|
+
|
|
108
|
+
def my_fallback(e):
|
|
109
|
+
return "Saved"
|
|
110
|
+
|
|
111
|
+
retry = CallRetry(retry_count=2, retry_interval=0.01, fallback=my_fallback)
|
|
112
|
+
result = retry(permanent_fail)()
|
|
113
|
+
|
|
114
|
+
assert result == "Saved"
|
|
115
|
+
|
|
116
|
+
def test_retry_ignores_wrong_exception(self):
|
|
117
|
+
"""Ensures it crashes immediately on an unhandled exception type."""
|
|
118
|
+
|
|
119
|
+
def type_error_func():
|
|
120
|
+
raise TypeError("Wrong error")
|
|
121
|
+
|
|
122
|
+
# Only looking for ValueErrors
|
|
123
|
+
retry = CallRetry(retry_count=5, retry_exceptions=(ValueError,))
|
|
124
|
+
|
|
125
|
+
with pytest.raises(TypeError):
|
|
126
|
+
retry(type_error_func)()
|
|
127
|
+
|
|
128
|
+
def test_retry_raises_after_exhaustion_no_fallback(self):
|
|
129
|
+
"""Ensures the original exception is raised if no fallback is provided."""
|
|
130
|
+
|
|
131
|
+
def constant_fail():
|
|
132
|
+
raise ValueError("Ultimate Failure")
|
|
133
|
+
|
|
134
|
+
# No fallback passed here
|
|
135
|
+
retry = CallRetry(retry_count=2, retry_interval=0.01, retry_exceptions=(ValueError,))
|
|
136
|
+
|
|
137
|
+
# We expect the decorator to eventually let the ValueError bubble up
|
|
138
|
+
with pytest.raises(ValueError, match="Ultimate Failure"):
|
|
139
|
+
retry(constant_fail)()
|
|
140
|
+
|
|
141
|
+
|
|
142
|
+
class TestResilientLimiter:
|
|
143
|
+
def test_documented_scenario(self):
|
|
144
|
+
"""Validates the exact scenario described in documentation."""
|
|
145
|
+
events = []
|
|
146
|
+
|
|
147
|
+
def retry_handler(e, n):
|
|
148
|
+
events.append(f"retry_{n}")
|
|
149
|
+
|
|
150
|
+
def fail_handler(e):
|
|
151
|
+
events.append("failed")
|
|
152
|
+
return "fallback_value"
|
|
153
|
+
|
|
154
|
+
@ResilientLimiter(
|
|
155
|
+
calls=10, # Fast for testing
|
|
156
|
+
period=1.0,
|
|
157
|
+
allow_burst=True,
|
|
158
|
+
retry_count=2,
|
|
159
|
+
on_retry=retry_handler,
|
|
160
|
+
fallback=fail_handler
|
|
161
|
+
)
|
|
162
|
+
def unstable_func():
|
|
163
|
+
raise ValueError("Boom")
|
|
164
|
+
|
|
165
|
+
result = unstable_func()
|
|
166
|
+
|
|
167
|
+
# Should have: 2 retries logged + 1 fallback log
|
|
168
|
+
assert events == ["retry_1", "retry_2", "failed"]
|
|
169
|
+
assert result == "fallback_value"
|
|
170
|
+
|
|
171
|
+
|
|
172
|
+
class TestEdgeCases:
|
|
173
|
+
def test_argument_propagation(self):
|
|
174
|
+
"""Ensures args and kwargs pass through the entire stack."""
|
|
175
|
+
limiter = ResilientLimiter(calls=10, period=1.0)
|
|
176
|
+
|
|
177
|
+
@limiter
|
|
178
|
+
def add(a, b, multiplier=1):
|
|
179
|
+
return (a + b) * multiplier
|
|
180
|
+
|
|
181
|
+
assert add(2, 3, multiplier=2) == 10
|
|
182
|
+
|
|
183
|
+
def test_retry_return_value(self):
|
|
184
|
+
"""Ensures the successful return value is captured after retries."""
|
|
185
|
+
count = 0
|
|
186
|
+
|
|
187
|
+
def fail_once():
|
|
188
|
+
nonlocal count
|
|
189
|
+
count += 1
|
|
190
|
+
if count == 1: raise ValueError("First fail")
|
|
191
|
+
return {"status": "ok"}
|
|
192
|
+
|
|
193
|
+
retry = CallRetry(retry_count=2, retry_interval=0.01)
|
|
194
|
+
assert retry(fail_once)() == {"status": "ok"}
|
|
195
|
+
|
|
196
|
+
def test_limiter_recovery(self):
|
|
197
|
+
"""Ensures the bucket refills completely after a long pause."""
|
|
198
|
+
limiter = CallLimiter(calls=2, period=0.2, allow_burst=True)
|
|
199
|
+
|
|
200
|
+
# Spend tokens
|
|
201
|
+
limiter.wait()
|
|
202
|
+
limiter.wait()
|
|
203
|
+
|
|
204
|
+
# Wait for full refill
|
|
205
|
+
time.sleep(0.3)
|
|
206
|
+
|
|
207
|
+
start = time.perf_counter()
|
|
208
|
+
limiter.wait() # Should be instant
|
|
209
|
+
assert (time.perf_counter() - start) < 0.01
|
|
210
|
+
|
|
211
|
+
|
|
212
|
+
class TestStressTest:
|
|
213
|
+
def test_high_frequency_throughput(self):
|
|
214
|
+
# ... (This one passed, keep as is) ...
|
|
215
|
+
pass
|
|
216
|
+
|
|
217
|
+
def test_heavy_concurrency_contention(self):
|
|
218
|
+
"""STRESS TEST 2: High contention with Burst."""
|
|
219
|
+
limiter = CallLimiter(calls=500, period=1.0, allow_burst=True)
|
|
220
|
+
shared_list = []
|
|
221
|
+
|
|
222
|
+
def worker():
|
|
223
|
+
for _ in range(50):
|
|
224
|
+
limiter.wait()
|
|
225
|
+
shared_list.append(time.perf_counter())
|
|
226
|
+
|
|
227
|
+
threads = [threading.Thread(target=worker) for _ in range(20)] # 1000 total calls
|
|
228
|
+
|
|
229
|
+
start = time.perf_counter()
|
|
230
|
+
for t in threads: t.start()
|
|
231
|
+
for t in threads: t.join()
|
|
232
|
+
end = time.perf_counter()
|
|
233
|
+
|
|
234
|
+
duration = end - start
|
|
235
|
+
# With 1000 calls and 500/sec limit + Burst:
|
|
236
|
+
# 500 happen at T=0. 500 happen at T=1.0. Total ~1.0s.
|
|
237
|
+
assert 0.9 <= duration <= 1.3
|
|
238
|
+
|
|
239
|
+
def test_retry_storm(self):
|
|
240
|
+
"""STRESS TEST 3: Ensuring throughput holds even when retries happen."""
|
|
241
|
+
|
|
242
|
+
# We'll track attempts to make it succeed on the 2nd retry
|
|
243
|
+
attempts = {}
|
|
244
|
+
|
|
245
|
+
@ResilientLimiter(
|
|
246
|
+
calls=1000,
|
|
247
|
+
period=1.0,
|
|
248
|
+
retry_count=2,
|
|
249
|
+
retry_interval=0.001,
|
|
250
|
+
retry_exceptions=(RuntimeError,)
|
|
251
|
+
)
|
|
252
|
+
def unstable_service(i):
|
|
253
|
+
attempts[i] = attempts.get(i, 0) + 1
|
|
254
|
+
# Fail on first attempt, succeed on second
|
|
255
|
+
if attempts[i] < 2:
|
|
256
|
+
raise RuntimeError("Temporary Glitch")
|
|
257
|
+
return True
|
|
258
|
+
|
|
259
|
+
start = time.perf_counter()
|
|
260
|
+
# 500 calls, each fails once then succeeds = 1000 total calls
|
|
261
|
+
results = [unstable_service(i) for i in range(500)]
|
|
262
|
+
end = time.perf_counter()
|
|
263
|
+
|
|
264
|
+
assert len(results) == 500
|
|
265
|
+
# 1000 total calls at 1000 RPS should take roughly 1.0s
|
|
266
|
+
# We use 0.7 as a floor to account for high-speed execution
|
|
267
|
+
assert (end - start) >= 0.4
|