throttle_machines 0.0.0 → 0.1.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (40) hide show
  1. checksums.yaml +4 -4
  2. data/LICENSE +20 -0
  3. data/README.md +187 -13
  4. data/Rakefile +12 -0
  5. data/lib/throttle_machines/async_limiter.rb +134 -0
  6. data/lib/throttle_machines/control.rb +95 -0
  7. data/lib/throttle_machines/controller_helpers.rb +79 -0
  8. data/lib/throttle_machines/dependency_error.rb +6 -0
  9. data/lib/throttle_machines/engine.rb +23 -0
  10. data/lib/throttle_machines/hedged_breaker.rb +23 -0
  11. data/lib/throttle_machines/hedged_request.rb +117 -0
  12. data/lib/throttle_machines/instrumentation.rb +158 -0
  13. data/lib/throttle_machines/limiter.rb +167 -0
  14. data/lib/throttle_machines/middleware.rb +90 -0
  15. data/lib/throttle_machines/rack_middleware/allow2_ban.rb +62 -0
  16. data/lib/throttle_machines/rack_middleware/blocklist.rb +27 -0
  17. data/lib/throttle_machines/rack_middleware/configuration.rb +103 -0
  18. data/lib/throttle_machines/rack_middleware/fail2_ban.rb +87 -0
  19. data/lib/throttle_machines/rack_middleware/request.rb +12 -0
  20. data/lib/throttle_machines/rack_middleware/safelist.rb +27 -0
  21. data/lib/throttle_machines/rack_middleware/throttle.rb +95 -0
  22. data/lib/throttle_machines/rack_middleware/track.rb +51 -0
  23. data/lib/throttle_machines/rack_middleware.rb +89 -0
  24. data/lib/throttle_machines/storage/base.rb +93 -0
  25. data/lib/throttle_machines/storage/memory.rb +373 -0
  26. data/lib/throttle_machines/storage/null.rb +88 -0
  27. data/lib/throttle_machines/storage/redis/gcra.lua +22 -0
  28. data/lib/throttle_machines/storage/redis/get_breaker_state.lua +23 -0
  29. data/lib/throttle_machines/storage/redis/increment_counter.lua +9 -0
  30. data/lib/throttle_machines/storage/redis/peek_gcra.lua +16 -0
  31. data/lib/throttle_machines/storage/redis/peek_token_bucket.lua +18 -0
  32. data/lib/throttle_machines/storage/redis/record_breaker_failure.lua +24 -0
  33. data/lib/throttle_machines/storage/redis/record_breaker_success.lua +16 -0
  34. data/lib/throttle_machines/storage/redis/token_bucket.lua +23 -0
  35. data/lib/throttle_machines/storage/redis.rb +294 -0
  36. data/lib/throttle_machines/throttled_error.rb +14 -0
  37. data/lib/throttle_machines/version.rb +5 -0
  38. data/lib/throttle_machines.rb +130 -5
  39. metadata +113 -9
  40. data/LICENSE.txt +0 -21
@@ -0,0 +1,373 @@
1
+ # frozen_string_literal: true
2
+
3
+ require 'concurrent'
4
+
5
+ module ThrottleMachines
6
+ module Storage
7
+ class Memory < Base
8
+ def initialize(options = {})
9
+ super
10
+ @counters = Concurrent::Hash.new
11
+ @gcra_states = Concurrent::Hash.new
12
+ @token_buckets = Concurrent::Hash.new
13
+ @breaker_states = Concurrent::Hash.new
14
+
15
+ # Use a striped lock pattern - pool of locks for fine-grained concurrency
16
+ @lock_pool_size = options[:lock_pool_size] || 32
17
+ @locks = Array.new(@lock_pool_size) { Concurrent::ReadWriteLock.new }
18
+
19
+ # Background cleanup thread
20
+ @cleanup_interval = options[:cleanup_interval] || 60
21
+ @shutdown = false
22
+ @cleanup_thread = start_cleanup_thread if options[:auto_cleanup] != false
23
+
24
+ # Ensure cleanup on garbage collection
25
+ ObjectSpace.define_finalizer(self, self.class.finalizer(@cleanup_thread))
26
+ end
27
+
28
+ def self.finalizer(cleanup_thread)
29
+ proc { cleanup_thread&.kill }
30
+ end
31
+
32
+ # Rate limiting operations
33
+ def increment_counter(key, window, amount = 1)
34
+ window_key = "#{key}:#{window}"
35
+
36
+ with_write_lock(window_key) do
37
+ now = current_time
38
+ # Fetch fresh value inside the lock to ensure consistency
39
+ counter = @counters[window_key]
40
+
41
+ if counter.nil? || counter[:expires_at] <= now
42
+ # Create or reset counter atomically
43
+ new_count = amount
44
+ @counters[window_key] = { count: new_count, expires_at: now + window }
45
+ else
46
+ # Increment existing counter atomically
47
+ new_count = counter[:count] + amount
48
+ @counters[window_key] = { count: new_count, expires_at: counter[:expires_at] }
49
+ end
50
+ new_count
51
+ end
52
+ end
53
+
54
+ def get_counter(key, window)
55
+ window_key = "#{key}:#{window}"
56
+
57
+ with_read_lock(window_key) do
58
+ counter = @counters[window_key]
59
+ return 0 unless counter
60
+ return 0 if counter[:expires_at] <= current_time
61
+
62
+ counter[:count]
63
+ end
64
+ end
65
+
66
+ def get_counter_ttl(key, window)
67
+ window_key = "#{key}:#{window}"
68
+
69
+ with_read_lock(window_key) do
70
+ counter = @counters[window_key]
71
+ return 0 unless counter
72
+
73
+ ttl = counter[:expires_at] - current_time
74
+ [ttl, 0].max
75
+ end
76
+ end
77
+
78
+ def reset_counter(key, window)
79
+ window_key = "#{key}:#{window}"
80
+ with_write_lock(window_key) { @counters.delete(window_key) }
81
+ end
82
+
83
+ # GCRA operations (atomic simulation)
84
+ def check_gcra_limit(key, emission_interval, delay_tolerance, ttl)
85
+ with_write_lock(key) do
86
+ now = current_time
87
+ state = @gcra_states[key] || { tat: 0.0 }
88
+
89
+ tat = [state[:tat], now].max
90
+ allow = tat - now <= delay_tolerance
91
+
92
+ if allow
93
+ new_tat = tat + emission_interval
94
+ @gcra_states[key] = { tat: new_tat, expires_at: now + ttl }
95
+ end
96
+
97
+ {
98
+ allowed: allow,
99
+ retry_after: allow ? 0 : (tat - now - delay_tolerance),
100
+ tat: tat
101
+ }
102
+ end
103
+ end
104
+
105
+ def peek_gcra_limit(key, _emission_interval, delay_tolerance)
106
+ with_read_lock(key) do
107
+ now = current_time
108
+ state = @gcra_states[key] || { tat: 0.0 }
109
+
110
+ tat = [state[:tat], now].max
111
+ allow = tat - now <= delay_tolerance
112
+
113
+ {
114
+ allowed: allow,
115
+ retry_after: allow ? 0 : (tat - now - delay_tolerance),
116
+ tat: tat
117
+ }
118
+ end
119
+ end
120
+
121
+ # Token bucket operations (atomic simulation)
122
+ def check_token_bucket(key, capacity, refill_rate, ttl)
123
+ with_write_lock(key) do
124
+ now = current_time
125
+ bucket = @token_buckets[key] || { tokens: capacity, last_refill: now }
126
+
127
+ # Refill tokens
128
+ elapsed = now - bucket[:last_refill]
129
+ tokens_to_add = elapsed * refill_rate
130
+ bucket[:tokens] = [bucket[:tokens] + tokens_to_add, capacity].min
131
+ bucket[:last_refill] = now
132
+
133
+ # Check if we can consume a token
134
+ if bucket[:tokens] >= 1
135
+ bucket[:tokens] -= 1
136
+ @token_buckets[key] = bucket.merge(expires_at: now + ttl)
137
+
138
+ {
139
+ allowed: true,
140
+ retry_after: 0,
141
+ tokens_remaining: bucket[:tokens].floor
142
+ }
143
+ else
144
+ retry_after = (1 - bucket[:tokens]) / refill_rate
145
+
146
+ {
147
+ allowed: false,
148
+ retry_after: retry_after,
149
+ tokens_remaining: 0
150
+ }
151
+ end
152
+ end
153
+ end
154
+
155
+ def peek_token_bucket(key, capacity, refill_rate)
156
+ with_read_lock(key) do
157
+ now = current_time
158
+ bucket = @token_buckets[key] || { tokens: capacity, last_refill: now }
159
+
160
+ # Calculate tokens without modifying state
161
+ elapsed = now - bucket[:last_refill]
162
+ tokens_to_add = elapsed * refill_rate
163
+ current_tokens = [bucket[:tokens] + tokens_to_add, capacity].min
164
+
165
+ if current_tokens >= 1
166
+ {
167
+ allowed: true,
168
+ retry_after: 0,
169
+ tokens_remaining: (current_tokens - 1).floor
170
+ }
171
+ else
172
+ retry_after = (1 - current_tokens) / refill_rate
173
+
174
+ {
175
+ allowed: false,
176
+ retry_after: retry_after,
177
+ tokens_remaining: 0
178
+ }
179
+ end
180
+ end
181
+ end
182
+
183
+ # Circuit breaker operations
184
+ def get_breaker_state(key)
185
+ # First try with read lock
186
+ state = with_read_lock("breaker:#{key}") do
187
+ @breaker_states[key] || { state: :closed, failures: 0, last_failure: nil }
188
+ end
189
+
190
+ # Check if we need to transition from open to half-open
191
+ if state[:state] == :open && state[:opens_at] && current_time >= state[:opens_at]
192
+ # Release read lock and acquire write lock
193
+ with_write_lock("breaker:#{key}") do
194
+ # Re-check condition after acquiring write lock
195
+ current_state = @breaker_states[key]
196
+ if current_state && current_state[:state] == :open && current_state[:opens_at] && current_time >= current_state[:opens_at]
197
+ @breaker_states[key] = current_state.merge(
198
+ state: :half_open,
199
+ half_open_attempts: 0
200
+ )
201
+ end
202
+ @breaker_states[key] || { state: :closed, failures: 0, last_failure: nil }
203
+ end
204
+ else
205
+ state
206
+ end
207
+ end
208
+
209
+ def record_breaker_success(key, _timeout, half_open_requests = 1)
210
+ with_write_lock("breaker:#{key}") do
211
+ state = @breaker_states[key]
212
+ return unless state
213
+
214
+ case state[:state]
215
+ when :half_open
216
+ attempts = (state[:half_open_attempts] || 0) + 1
217
+ if attempts >= half_open_requests
218
+ @breaker_states.delete(key)
219
+ else
220
+ @breaker_states[key] = state.merge(half_open_attempts: attempts)
221
+ end
222
+ when :closed
223
+ # Reset failure count on success
224
+ @breaker_states[key] = state.merge(failures: 0) if state[:failures].positive?
225
+ end
226
+ end
227
+ end
228
+
229
+ def record_breaker_failure(key, threshold, timeout)
230
+ with_write_lock("breaker:#{key}") do
231
+ state = @breaker_states[key] || { state: :closed, failures: 0 }
232
+ now = current_time
233
+
234
+ case state[:state]
235
+ when :closed
236
+ failures = state[:failures] + 1
237
+ @breaker_states[key] = if failures >= threshold
238
+ {
239
+ state: :open,
240
+ failures: failures,
241
+ last_failure: now,
242
+ opens_at: now + timeout
243
+ }
244
+ else
245
+ state.merge(failures: failures, last_failure: now)
246
+ end
247
+ when :half_open
248
+ @breaker_states[key] = {
249
+ state: :open,
250
+ failures: state[:failures],
251
+ last_failure: now,
252
+ opens_at: now + timeout
253
+ }
254
+ end
255
+
256
+ @breaker_states[key]
257
+ end
258
+ end
259
+
260
+ def trip_breaker(key, timeout)
261
+ with_write_lock("breaker:#{key}") do
262
+ now = current_time
263
+ @breaker_states[key] = {
264
+ state: :open,
265
+ failures: 0,
266
+ last_failure: now,
267
+ opens_at: now + timeout
268
+ }
269
+ end
270
+ end
271
+
272
+ def reset_breaker(key)
273
+ with_write_lock("breaker:#{key}") { @breaker_states.delete(key) }
274
+ end
275
+
276
+ # Utility operations
277
+ def clear(pattern = nil)
278
+ if pattern
279
+ regex = Regexp.new(pattern.gsub('*', '.*'))
280
+
281
+ # Clear matching keys from all stores
282
+ [@counters, @gcra_states, @token_buckets, @breaker_states].each do |store|
283
+ store.each_key do |k|
284
+ store.delete(k) if k&.match?(regex)
285
+ end
286
+ end
287
+ else
288
+ @counters.clear
289
+ @gcra_states.clear
290
+ @token_buckets.clear
291
+ @breaker_states.clear
292
+ end
293
+ end
294
+
295
+ def healthy?
296
+ true
297
+ end
298
+
299
+ def shutdown
300
+ @shutdown = true
301
+ @cleanup_thread&.join(1) # Wait up to 1 second for graceful shutdown
302
+ @cleanup_thread&.kill if @cleanup_thread&.alive?
303
+ @cleanup_thread = nil
304
+ end
305
+
306
+ private
307
+
308
+ def with_read_lock(key, &)
309
+ lock_for(key).with_read_lock(&)
310
+ end
311
+
312
+ def with_write_lock(key, &)
313
+ lock_for(key).with_write_lock(&)
314
+ end
315
+
316
+ def lock_for(key)
317
+ # Hash key to determine which lock to use
318
+ index = key.hash.abs % @lock_pool_size
319
+ @locks[index]
320
+ end
321
+
322
+ def start_cleanup_thread
323
+ Thread.new do
324
+ loop do
325
+ break if @shutdown
326
+
327
+ sleep @cleanup_interval
328
+ break if @shutdown
329
+
330
+ clean_expired_entries
331
+ end
332
+ end
333
+ end
334
+
335
+ def clean_expired_entries
336
+ now = current_time
337
+
338
+ # Clean expired counters
339
+ @counters.each_pair do |key, data|
340
+ with_write_lock(key) { @counters.delete(key) } if data[:expires_at] && data[:expires_at] <= now
341
+ end
342
+
343
+ # Clean expired GCRA states
344
+ @gcra_states.each_pair do |key, data|
345
+ with_write_lock(key) { @gcra_states.delete(key) } if data[:expires_at] && data[:expires_at] <= now
346
+ end
347
+
348
+ # Clean expired token buckets
349
+ @token_buckets.each_pair do |key, data|
350
+ with_write_lock(key) { @token_buckets.delete(key) } if data[:expires_at] && data[:expires_at] <= now
351
+ end
352
+
353
+ # Clean closed breaker states and expired open states
354
+ @breaker_states.each_pair do |key, data|
355
+ should_delete = false
356
+
357
+ # Clean closed states that have been idle
358
+ should_delete = true if data[:state] == :closed && data[:failures].zero?
359
+
360
+ # Clean expired open states (older than 2x timeout)
361
+ if data[:opens_at] && now > data[:opens_at] + ((data[:opens_at] - (data[:last_failure] || now)) * 2)
362
+ should_delete = true
363
+ end
364
+
365
+ with_write_lock("breaker:#{key}") { @breaker_states.delete(key) } if should_delete
366
+ end
367
+ rescue StandardError => e
368
+ # Log error but don't crash cleanup thread
369
+ warn "ThrottleMachines: Cleanup error: #{e.message}"
370
+ end
371
+ end
372
+ end
373
+ end
@@ -0,0 +1,88 @@
1
+ # frozen_string_literal: true
2
+
3
+ module ThrottleMachines
4
+ module Storage
5
+ class Null < Base
6
+ # Rate limiting operations
7
+ def increment_counter(_key, _window, _amount = 1)
8
+ 0
9
+ end
10
+
11
+ def get_counter(_key, _window)
12
+ 0
13
+ end
14
+
15
+ def get_counter_ttl(_key, _window)
16
+ 0
17
+ end
18
+
19
+ def reset_counter(_key, _window)
20
+ true
21
+ end
22
+
23
+ # GCRA operations
24
+ def check_gcra_limit(_key, _emission_interval, _delay_tolerance, _ttl)
25
+ {
26
+ allowed: true,
27
+ retry_after: 0,
28
+ tat: 0
29
+ }
30
+ end
31
+
32
+ def peek_gcra_limit(_key, _emission_interval, _delay_tolerance)
33
+ {
34
+ allowed: true,
35
+ retry_after: 0,
36
+ tat: 0
37
+ }
38
+ end
39
+
40
+ # Token bucket operations
41
+ def check_token_bucket(_key, capacity, _refill_rate, _ttl)
42
+ {
43
+ allowed: true,
44
+ retry_after: 0,
45
+ tokens_remaining: capacity
46
+ }
47
+ end
48
+
49
+ def peek_token_bucket(_key, capacity, _refill_rate)
50
+ {
51
+ allowed: true,
52
+ retry_after: 0,
53
+ tokens_remaining: capacity
54
+ }
55
+ end
56
+
57
+ # Circuit breaker operations
58
+ def get_breaker_state(_key)
59
+ { state: :closed, failures: 0, last_failure: nil }
60
+ end
61
+
62
+ def record_breaker_success(_key, _timeout, _half_open_requests = 1)
63
+ true
64
+ end
65
+
66
+ def record_breaker_failure(_key, _threshold, _timeout)
67
+ { state: :closed, failures: 0, last_failure: nil }
68
+ end
69
+
70
+ def trip_breaker(_key, _timeout)
71
+ true
72
+ end
73
+
74
+ def reset_breaker(_key)
75
+ true
76
+ end
77
+
78
+ # Utility operations
79
+ def clear(_pattern = nil)
80
+ true
81
+ end
82
+
83
+ def healthy?
84
+ true
85
+ end
86
+ end
87
+ end
88
+ end
@@ -0,0 +1,22 @@
1
+ local key = KEYS[1]
2
+ local emission_interval = tonumber(ARGV[1])
3
+ local delay_tolerance = tonumber(ARGV[2])
4
+ local ttl = tonumber(ARGV[3])
5
+ local now = tonumber(ARGV[4])
6
+
7
+ local tat = redis.call('GET', key)
8
+ if not tat then
9
+ tat = 0
10
+ else
11
+ tat = tonumber(tat)
12
+ end
13
+
14
+ tat = math.max(tat, now)
15
+ local allow = (tat - now) <= delay_tolerance
16
+
17
+ if allow then
18
+ local new_tat = tat + emission_interval
19
+ redis.call('SET', key, new_tat, 'EX', ttl)
20
+ end
21
+
22
+ return { allow and 1 or 0, tat }
@@ -0,0 +1,23 @@
1
+ local data = redis.call('HGETALL', KEYS[1])
2
+ if #data == 0 then
3
+ return {}
4
+ end
5
+
6
+ local state = {}
7
+ for i = 1, #data, 2 do
8
+ state[data[i]] = data[i + 1]
9
+ end
10
+
11
+ -- Auto-transition from open to half-open if timeout passed
12
+ if state['state'] == 'open' and state['opens_at'] then
13
+ local now = tonumber(ARGV[1])
14
+ local opens_at = tonumber(state['opens_at'])
15
+
16
+ if now >= opens_at then
17
+ redis.call('HSET', KEYS[1], 'state', 'half_open', 'half_open_attempts', '0')
18
+ state['state'] = 'half_open'
19
+ state['half_open_attempts'] = '0'
20
+ end
21
+ end
22
+
23
+ return state
@@ -0,0 +1,9 @@
1
+ local count = redis.call('INCRBY', KEYS[1], ARGV[1])
2
+ local ttl = redis.call('TTL', KEYS[1])
3
+
4
+ -- Set expiry if key is new (ttl == -2) or has no TTL (ttl == -1)
5
+ if ttl <= 0 then
6
+ redis.call('EXPIRE', KEYS[1], ARGV[2])
7
+ end
8
+
9
+ return count
@@ -0,0 +1,16 @@
1
+ local key = KEYS[1]
2
+ local emission_interval = tonumber(ARGV[1])
3
+ local delay_tolerance = tonumber(ARGV[2])
4
+ local now = tonumber(ARGV[3])
5
+
6
+ local tat = redis.call('GET', key)
7
+ if not tat then
8
+ tat = 0
9
+ else
10
+ tat = tonumber(tat)
11
+ end
12
+
13
+ tat = math.max(tat, now)
14
+ local allow = (tat - now) <= delay_tolerance
15
+
16
+ return { allow and 1 or 0, tat }
@@ -0,0 +1,18 @@
1
+ local key = KEYS[1]
2
+ local capacity = tonumber(ARGV[1])
3
+ local refill_rate = tonumber(ARGV[2])
4
+ local now = tonumber(ARGV[3])
5
+
6
+ local bucket = redis.call('HMGET', key, 'tokens', 'last_refill')
7
+ local tokens = tonumber(bucket[1]) or capacity
8
+ local last_refill = tonumber(bucket[2]) or now
9
+
10
+ -- Calculate tokens without modifying
11
+ local elapsed = now - last_refill
12
+ local tokens_to_add = elapsed * refill_rate
13
+ tokens = math.min(tokens + tokens_to_add, capacity)
14
+
15
+ local allow = tokens >= 1
16
+ local tokens_after = allow and (tokens - 1) or 0
17
+
18
+ return { allow and 1 or 0, tokens_after }
@@ -0,0 +1,24 @@
1
+ local state = redis.call('HGET', KEYS[1], 'state') or 'closed'
2
+ local now = ARGV[3]
3
+ local timeout = tonumber(ARGV[2])
4
+
5
+ if state == 'half_open' then
6
+ -- Failure in half-open state, just re-open the circuit
7
+ redis.call('HMSET', KEYS[1],
8
+ 'state', 'open',
9
+ 'opens_at', tonumber(now) + timeout,
10
+ 'last_failure', now
11
+ )
12
+ else -- state is 'closed' or nil
13
+ local failures = redis.call('HINCRBY', KEYS[1], 'failures', 1)
14
+ redis.call('HSET', KEYS[1], 'last_failure', now)
15
+
16
+ if failures >= tonumber(ARGV[1]) then
17
+ redis.call('HMSET', KEYS[1],
18
+ 'state', 'open',
19
+ 'opens_at', tonumber(now) + timeout
20
+ )
21
+ end
22
+ end
23
+
24
+ redis.call('EXPIRE', KEYS[1], timeout * 2)
@@ -0,0 +1,16 @@
1
+ local state = redis.call('HGET', KEYS[1], 'state')
2
+
3
+ if state == 'half_open' then
4
+ -- Increment half-open attempts and potentially close the circuit
5
+ local attempts = redis.call('HINCRBY', KEYS[1], 'half_open_attempts', 1)
6
+
7
+ if attempts >= tonumber(ARGV[1]) then
8
+ redis.call('DEL', KEYS[1])
9
+ end
10
+ elseif state == 'closed' then
11
+ -- Reset failure count on success in closed state
12
+ local failures = redis.call('HGET', KEYS[1], 'failures')
13
+ if failures and tonumber(failures) > 0 then
14
+ redis.call('HSET', KEYS[1], 'failures', 0)
15
+ end
16
+ end
@@ -0,0 +1,23 @@
1
+ local key = KEYS[1]
2
+ local capacity = tonumber(ARGV[1])
3
+ local refill_rate = tonumber(ARGV[2])
4
+ local ttl = tonumber(ARGV[3])
5
+ local now = tonumber(ARGV[4])
6
+
7
+ local bucket = redis.call('HMGET', key, 'tokens', 'last_refill')
8
+ local tokens = tonumber(bucket[1]) or capacity
9
+ local last_refill = tonumber(bucket[2]) or now
10
+
11
+ -- Refill tokens
12
+ local elapsed = now - last_refill
13
+ local tokens_to_add = elapsed * refill_rate
14
+ tokens = math.min(tokens + tokens_to_add, capacity)
15
+
16
+ local allow = tokens >= 1
17
+ if allow then
18
+ tokens = tokens - 1
19
+ redis.call('HMSET', key, 'tokens', tokens, 'last_refill', now)
20
+ redis.call('EXPIRE', key, ttl)
21
+ end
22
+
23
+ return { allow and 1 or 0, tokens }