throttle_machines 0.0.0 → 0.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/MIT-LICENSE +20 -0
- data/README.md +187 -13
- data/Rakefile +12 -0
- data/lib/throttle_machines/async_limiter.rb +134 -0
- data/lib/throttle_machines/clock.rb +41 -0
- data/lib/throttle_machines/control.rb +95 -0
- data/lib/throttle_machines/controller_helpers.rb +79 -0
- data/lib/throttle_machines/dependency_error.rb +6 -0
- data/lib/throttle_machines/engine.rb +25 -0
- data/lib/throttle_machines/hedged_request.rb +137 -0
- data/lib/throttle_machines/instrumentation.rb +162 -0
- data/lib/throttle_machines/limiter.rb +167 -0
- data/lib/throttle_machines/middleware.rb +90 -0
- data/lib/throttle_machines/rack_middleware/allow2_ban.rb +62 -0
- data/lib/throttle_machines/rack_middleware/blocklist.rb +27 -0
- data/lib/throttle_machines/rack_middleware/configuration.rb +103 -0
- data/lib/throttle_machines/rack_middleware/fail2_ban.rb +87 -0
- data/lib/throttle_machines/rack_middleware/request.rb +12 -0
- data/lib/throttle_machines/rack_middleware/safelist.rb +27 -0
- data/lib/throttle_machines/rack_middleware/throttle.rb +95 -0
- data/lib/throttle_machines/rack_middleware/track.rb +51 -0
- data/lib/throttle_machines/rack_middleware.rb +87 -0
- data/lib/throttle_machines/storage/base.rb +93 -0
- data/lib/throttle_machines/storage/memory.rb +374 -0
- data/lib/throttle_machines/storage/null.rb +90 -0
- data/lib/throttle_machines/storage/redis.rb +451 -0
- data/lib/throttle_machines/throttled_error.rb +14 -0
- data/lib/throttle_machines/version.rb +5 -0
- data/lib/throttle_machines.rb +134 -5
- metadata +105 -9
- data/LICENSE.txt +0 -21
@@ -0,0 +1,374 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'base'
|
4
|
+
require 'concurrent'
|
5
|
+
|
6
|
+
module ThrottleMachines
|
7
|
+
module Storage
|
8
|
+
class Memory < Base
|
9
|
+
def initialize(options = {})
|
10
|
+
super
|
11
|
+
@counters = Concurrent::Hash.new
|
12
|
+
@gcra_states = Concurrent::Hash.new
|
13
|
+
@token_buckets = Concurrent::Hash.new
|
14
|
+
@breaker_states = Concurrent::Hash.new
|
15
|
+
|
16
|
+
# Use a striped lock pattern - pool of locks for fine-grained concurrency
|
17
|
+
@lock_pool_size = options[:lock_pool_size] || 32
|
18
|
+
@locks = Array.new(@lock_pool_size) { Concurrent::ReadWriteLock.new }
|
19
|
+
|
20
|
+
# Background cleanup thread
|
21
|
+
@cleanup_interval = options[:cleanup_interval] || 60
|
22
|
+
@shutdown = false
|
23
|
+
@cleanup_thread = start_cleanup_thread if options[:auto_cleanup] != false
|
24
|
+
|
25
|
+
# Ensure cleanup on garbage collection
|
26
|
+
ObjectSpace.define_finalizer(self, self.class.finalizer(@cleanup_thread))
|
27
|
+
end
|
28
|
+
|
29
|
+
def self.finalizer(cleanup_thread)
|
30
|
+
proc { cleanup_thread&.kill }
|
31
|
+
end
|
32
|
+
|
33
|
+
# Rate limiting operations
|
34
|
+
def increment_counter(key, window, amount = 1)
|
35
|
+
window_key = "#{key}:#{window}"
|
36
|
+
|
37
|
+
with_write_lock(window_key) do
|
38
|
+
now = current_time
|
39
|
+
# Fetch fresh value inside the lock to ensure consistency
|
40
|
+
counter = @counters[window_key]
|
41
|
+
|
42
|
+
if counter.nil? || counter[:expires_at] <= now
|
43
|
+
# Create or reset counter atomically
|
44
|
+
new_count = amount
|
45
|
+
@counters[window_key] = { count: new_count, expires_at: now + window }
|
46
|
+
else
|
47
|
+
# Increment existing counter atomically
|
48
|
+
new_count = counter[:count] + amount
|
49
|
+
@counters[window_key] = { count: new_count, expires_at: counter[:expires_at] }
|
50
|
+
end
|
51
|
+
new_count
|
52
|
+
end
|
53
|
+
end
|
54
|
+
|
55
|
+
def get_counter(key, window)
|
56
|
+
window_key = "#{key}:#{window}"
|
57
|
+
|
58
|
+
with_read_lock(window_key) do
|
59
|
+
counter = @counters[window_key]
|
60
|
+
return 0 unless counter
|
61
|
+
return 0 if counter[:expires_at] <= current_time
|
62
|
+
|
63
|
+
counter[:count]
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
def get_counter_ttl(key, window)
|
68
|
+
window_key = "#{key}:#{window}"
|
69
|
+
|
70
|
+
with_read_lock(window_key) do
|
71
|
+
counter = @counters[window_key]
|
72
|
+
return 0 unless counter
|
73
|
+
|
74
|
+
ttl = counter[:expires_at] - current_time
|
75
|
+
[ttl, 0].max
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def reset_counter(key, window)
|
80
|
+
window_key = "#{key}:#{window}"
|
81
|
+
with_write_lock(window_key) { @counters.delete(window_key) }
|
82
|
+
end
|
83
|
+
|
84
|
+
# GCRA operations (atomic simulation)
|
85
|
+
def check_gcra_limit(key, emission_interval, delay_tolerance, ttl)
|
86
|
+
with_write_lock(key) do
|
87
|
+
now = current_time
|
88
|
+
state = @gcra_states[key] || { tat: 0.0 }
|
89
|
+
|
90
|
+
tat = [state[:tat], now].max
|
91
|
+
allow = tat - now <= delay_tolerance
|
92
|
+
|
93
|
+
if allow
|
94
|
+
new_tat = tat + emission_interval
|
95
|
+
@gcra_states[key] = { tat: new_tat, expires_at: now + ttl }
|
96
|
+
end
|
97
|
+
|
98
|
+
{
|
99
|
+
allowed: allow,
|
100
|
+
retry_after: allow ? 0 : (tat - now - delay_tolerance),
|
101
|
+
tat: tat
|
102
|
+
}
|
103
|
+
end
|
104
|
+
end
|
105
|
+
|
106
|
+
def peek_gcra_limit(key, _emission_interval, delay_tolerance)
|
107
|
+
with_read_lock(key) do
|
108
|
+
now = current_time
|
109
|
+
state = @gcra_states[key] || { tat: 0.0 }
|
110
|
+
|
111
|
+
tat = [state[:tat], now].max
|
112
|
+
allow = tat - now <= delay_tolerance
|
113
|
+
|
114
|
+
{
|
115
|
+
allowed: allow,
|
116
|
+
retry_after: allow ? 0 : (tat - now - delay_tolerance),
|
117
|
+
tat: tat
|
118
|
+
}
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
# Token bucket operations (atomic simulation)
|
123
|
+
def check_token_bucket(key, capacity, refill_rate, ttl)
|
124
|
+
with_write_lock(key) do
|
125
|
+
now = current_time
|
126
|
+
bucket = @token_buckets[key] || { tokens: capacity, last_refill: now }
|
127
|
+
|
128
|
+
# Refill tokens
|
129
|
+
elapsed = now - bucket[:last_refill]
|
130
|
+
tokens_to_add = elapsed * refill_rate
|
131
|
+
bucket[:tokens] = [bucket[:tokens] + tokens_to_add, capacity].min
|
132
|
+
bucket[:last_refill] = now
|
133
|
+
|
134
|
+
# Check if we can consume a token
|
135
|
+
if bucket[:tokens] >= 1
|
136
|
+
bucket[:tokens] -= 1
|
137
|
+
@token_buckets[key] = bucket.merge(expires_at: now + ttl)
|
138
|
+
|
139
|
+
{
|
140
|
+
allowed: true,
|
141
|
+
retry_after: 0,
|
142
|
+
tokens_remaining: bucket[:tokens].floor
|
143
|
+
}
|
144
|
+
else
|
145
|
+
retry_after = (1 - bucket[:tokens]) / refill_rate
|
146
|
+
|
147
|
+
{
|
148
|
+
allowed: false,
|
149
|
+
retry_after: retry_after,
|
150
|
+
tokens_remaining: 0
|
151
|
+
}
|
152
|
+
end
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def peek_token_bucket(key, capacity, refill_rate)
|
157
|
+
with_read_lock(key) do
|
158
|
+
now = current_time
|
159
|
+
bucket = @token_buckets[key] || { tokens: capacity, last_refill: now }
|
160
|
+
|
161
|
+
# Calculate tokens without modifying state
|
162
|
+
elapsed = now - bucket[:last_refill]
|
163
|
+
tokens_to_add = elapsed * refill_rate
|
164
|
+
current_tokens = [bucket[:tokens] + tokens_to_add, capacity].min
|
165
|
+
|
166
|
+
if current_tokens >= 1
|
167
|
+
{
|
168
|
+
allowed: true,
|
169
|
+
retry_after: 0,
|
170
|
+
tokens_remaining: (current_tokens - 1).floor
|
171
|
+
}
|
172
|
+
else
|
173
|
+
retry_after = (1 - current_tokens) / refill_rate
|
174
|
+
|
175
|
+
{
|
176
|
+
allowed: false,
|
177
|
+
retry_after: retry_after,
|
178
|
+
tokens_remaining: 0
|
179
|
+
}
|
180
|
+
end
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
# Circuit breaker operations
|
185
|
+
def get_breaker_state(key)
|
186
|
+
# First try with read lock
|
187
|
+
state = with_read_lock("breaker:#{key}") do
|
188
|
+
@breaker_states[key] || { state: :closed, failures: 0, last_failure: nil }
|
189
|
+
end
|
190
|
+
|
191
|
+
# Check if we need to transition from open to half-open
|
192
|
+
if state[:state] == :open && state[:opens_at] && current_time >= state[:opens_at]
|
193
|
+
# Release read lock and acquire write lock
|
194
|
+
with_write_lock("breaker:#{key}") do
|
195
|
+
# Re-check condition after acquiring write lock
|
196
|
+
current_state = @breaker_states[key]
|
197
|
+
if current_state && current_state[:state] == :open && current_state[:opens_at] && current_time >= current_state[:opens_at]
|
198
|
+
@breaker_states[key] = current_state.merge(
|
199
|
+
state: :half_open,
|
200
|
+
half_open_attempts: 0
|
201
|
+
)
|
202
|
+
end
|
203
|
+
@breaker_states[key] || { state: :closed, failures: 0, last_failure: nil }
|
204
|
+
end
|
205
|
+
else
|
206
|
+
state
|
207
|
+
end
|
208
|
+
end
|
209
|
+
|
210
|
+
def record_breaker_success(key, _timeout, half_open_requests = 1)
|
211
|
+
with_write_lock("breaker:#{key}") do
|
212
|
+
state = @breaker_states[key]
|
213
|
+
return unless state
|
214
|
+
|
215
|
+
case state[:state]
|
216
|
+
when :half_open
|
217
|
+
attempts = (state[:half_open_attempts] || 0) + 1
|
218
|
+
if attempts >= half_open_requests
|
219
|
+
@breaker_states.delete(key)
|
220
|
+
else
|
221
|
+
@breaker_states[key] = state.merge(half_open_attempts: attempts)
|
222
|
+
end
|
223
|
+
when :closed
|
224
|
+
# Reset failure count on success
|
225
|
+
@breaker_states[key] = state.merge(failures: 0) if state[:failures].positive?
|
226
|
+
end
|
227
|
+
end
|
228
|
+
end
|
229
|
+
|
230
|
+
def record_breaker_failure(key, threshold, timeout)
|
231
|
+
with_write_lock("breaker:#{key}") do
|
232
|
+
state = @breaker_states[key] || { state: :closed, failures: 0 }
|
233
|
+
now = current_time
|
234
|
+
|
235
|
+
case state[:state]
|
236
|
+
when :closed
|
237
|
+
failures = state[:failures] + 1
|
238
|
+
@breaker_states[key] = if failures >= threshold
|
239
|
+
{
|
240
|
+
state: :open,
|
241
|
+
failures: failures,
|
242
|
+
last_failure: now,
|
243
|
+
opens_at: now + timeout
|
244
|
+
}
|
245
|
+
else
|
246
|
+
state.merge(failures: failures, last_failure: now)
|
247
|
+
end
|
248
|
+
when :half_open
|
249
|
+
@breaker_states[key] = {
|
250
|
+
state: :open,
|
251
|
+
failures: state[:failures],
|
252
|
+
last_failure: now,
|
253
|
+
opens_at: now + timeout
|
254
|
+
}
|
255
|
+
end
|
256
|
+
|
257
|
+
@breaker_states[key]
|
258
|
+
end
|
259
|
+
end
|
260
|
+
|
261
|
+
def trip_breaker(key, timeout)
|
262
|
+
with_write_lock("breaker:#{key}") do
|
263
|
+
now = current_time
|
264
|
+
@breaker_states[key] = {
|
265
|
+
state: :open,
|
266
|
+
failures: 0,
|
267
|
+
last_failure: now,
|
268
|
+
opens_at: now + timeout
|
269
|
+
}
|
270
|
+
end
|
271
|
+
end
|
272
|
+
|
273
|
+
def reset_breaker(key)
|
274
|
+
with_write_lock("breaker:#{key}") { @breaker_states.delete(key) }
|
275
|
+
end
|
276
|
+
|
277
|
+
# Utility operations
|
278
|
+
def clear(pattern = nil)
|
279
|
+
if pattern
|
280
|
+
regex = Regexp.new(pattern.gsub('*', '.*'))
|
281
|
+
|
282
|
+
# Clear matching keys from all stores
|
283
|
+
[@counters, @gcra_states, @token_buckets, @breaker_states].each do |store|
|
284
|
+
store.each_key do |k|
|
285
|
+
store.delete(k) if k&.match?(regex)
|
286
|
+
end
|
287
|
+
end
|
288
|
+
else
|
289
|
+
@counters.clear
|
290
|
+
@gcra_states.clear
|
291
|
+
@token_buckets.clear
|
292
|
+
@breaker_states.clear
|
293
|
+
end
|
294
|
+
end
|
295
|
+
|
296
|
+
def healthy?
|
297
|
+
true
|
298
|
+
end
|
299
|
+
|
300
|
+
def shutdown
|
301
|
+
@shutdown = true
|
302
|
+
@cleanup_thread&.join(1) # Wait up to 1 second for graceful shutdown
|
303
|
+
@cleanup_thread&.kill if @cleanup_thread&.alive?
|
304
|
+
@cleanup_thread = nil
|
305
|
+
end
|
306
|
+
|
307
|
+
private
|
308
|
+
|
309
|
+
def with_read_lock(key, &)
|
310
|
+
lock_for(key).with_read_lock(&)
|
311
|
+
end
|
312
|
+
|
313
|
+
def with_write_lock(key, &)
|
314
|
+
lock_for(key).with_write_lock(&)
|
315
|
+
end
|
316
|
+
|
317
|
+
def lock_for(key)
|
318
|
+
# Hash key to determine which lock to use
|
319
|
+
index = key.hash.abs % @lock_pool_size
|
320
|
+
@locks[index]
|
321
|
+
end
|
322
|
+
|
323
|
+
def start_cleanup_thread
|
324
|
+
Thread.new do
|
325
|
+
loop do
|
326
|
+
break if @shutdown
|
327
|
+
|
328
|
+
sleep @cleanup_interval
|
329
|
+
break if @shutdown
|
330
|
+
|
331
|
+
clean_expired_entries
|
332
|
+
end
|
333
|
+
end
|
334
|
+
end
|
335
|
+
|
336
|
+
def clean_expired_entries
|
337
|
+
now = current_time
|
338
|
+
|
339
|
+
# Clean expired counters
|
340
|
+
@counters.each_pair do |key, data|
|
341
|
+
with_write_lock(key) { @counters.delete(key) } if data[:expires_at] && data[:expires_at] <= now
|
342
|
+
end
|
343
|
+
|
344
|
+
# Clean expired GCRA states
|
345
|
+
@gcra_states.each_pair do |key, data|
|
346
|
+
with_write_lock(key) { @gcra_states.delete(key) } if data[:expires_at] && data[:expires_at] <= now
|
347
|
+
end
|
348
|
+
|
349
|
+
# Clean expired token buckets
|
350
|
+
@token_buckets.each_pair do |key, data|
|
351
|
+
with_write_lock(key) { @token_buckets.delete(key) } if data[:expires_at] && data[:expires_at] <= now
|
352
|
+
end
|
353
|
+
|
354
|
+
# Clean closed breaker states and expired open states
|
355
|
+
@breaker_states.each_pair do |key, data|
|
356
|
+
should_delete = false
|
357
|
+
|
358
|
+
# Clean closed states that have been idle
|
359
|
+
should_delete = true if data[:state] == :closed && data[:failures].zero?
|
360
|
+
|
361
|
+
# Clean expired open states (older than 2x timeout)
|
362
|
+
if data[:opens_at] && now > data[:opens_at] + ((data[:opens_at] - (data[:last_failure] || now)) * 2)
|
363
|
+
should_delete = true
|
364
|
+
end
|
365
|
+
|
366
|
+
with_write_lock("breaker:#{key}") { @breaker_states.delete(key) } if should_delete
|
367
|
+
end
|
368
|
+
rescue StandardError => e
|
369
|
+
# Log error but don't crash cleanup thread
|
370
|
+
warn "ThrottleMachines: Cleanup error: #{e.message}"
|
371
|
+
end
|
372
|
+
end
|
373
|
+
end
|
374
|
+
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'base'
|
4
|
+
|
5
|
+
module ThrottleMachines
|
6
|
+
module Storage
|
7
|
+
class Null < Base
|
8
|
+
# Rate limiting operations
|
9
|
+
def increment_counter(_key, _window, _amount = 1)
|
10
|
+
0
|
11
|
+
end
|
12
|
+
|
13
|
+
def get_counter(_key, _window)
|
14
|
+
0
|
15
|
+
end
|
16
|
+
|
17
|
+
def get_counter_ttl(_key, _window)
|
18
|
+
0
|
19
|
+
end
|
20
|
+
|
21
|
+
def reset_counter(_key, _window)
|
22
|
+
true
|
23
|
+
end
|
24
|
+
|
25
|
+
# GCRA operations
|
26
|
+
def check_gcra_limit(_key, _emission_interval, _delay_tolerance, _ttl)
|
27
|
+
{
|
28
|
+
allowed: true,
|
29
|
+
retry_after: 0,
|
30
|
+
tat: 0
|
31
|
+
}
|
32
|
+
end
|
33
|
+
|
34
|
+
def peek_gcra_limit(_key, _emission_interval, _delay_tolerance)
|
35
|
+
{
|
36
|
+
allowed: true,
|
37
|
+
retry_after: 0,
|
38
|
+
tat: 0
|
39
|
+
}
|
40
|
+
end
|
41
|
+
|
42
|
+
# Token bucket operations
|
43
|
+
def check_token_bucket(_key, capacity, _refill_rate, _ttl)
|
44
|
+
{
|
45
|
+
allowed: true,
|
46
|
+
retry_after: 0,
|
47
|
+
tokens_remaining: capacity
|
48
|
+
}
|
49
|
+
end
|
50
|
+
|
51
|
+
def peek_token_bucket(_key, capacity, _refill_rate)
|
52
|
+
{
|
53
|
+
allowed: true,
|
54
|
+
retry_after: 0,
|
55
|
+
tokens_remaining: capacity
|
56
|
+
}
|
57
|
+
end
|
58
|
+
|
59
|
+
# Circuit breaker operations
|
60
|
+
def get_breaker_state(_key)
|
61
|
+
{ state: :closed, failures: 0, last_failure: nil }
|
62
|
+
end
|
63
|
+
|
64
|
+
def record_breaker_success(_key, _timeout, _half_open_requests = 1)
|
65
|
+
true
|
66
|
+
end
|
67
|
+
|
68
|
+
def record_breaker_failure(_key, _threshold, _timeout)
|
69
|
+
{ state: :closed, failures: 0, last_failure: nil }
|
70
|
+
end
|
71
|
+
|
72
|
+
def trip_breaker(_key, _timeout)
|
73
|
+
true
|
74
|
+
end
|
75
|
+
|
76
|
+
def reset_breaker(_key)
|
77
|
+
true
|
78
|
+
end
|
79
|
+
|
80
|
+
# Utility operations
|
81
|
+
def clear(_pattern = nil)
|
82
|
+
true
|
83
|
+
end
|
84
|
+
|
85
|
+
def healthy?
|
86
|
+
true
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|