prop 2.2.5 → 2.6.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA256:
3
- metadata.gz: d749e4483b80fc4abe3ab44ba034c349bafaf521dc00c4ad059e3e9bc683e88a
4
- data.tar.gz: d00a838d4b8f14cf9eb5a440e6a6fce475f6fb09d9937e2ebf6c0d66701ef82e
3
+ metadata.gz: bf33dac797d0f3b3cf7126709f1622e3069b7cf476f0bfde211178b3e4e743e1
4
+ data.tar.gz: e166a98af608dab54400289bb8fd28738b4ba8ab18ee0604613e75907ae4ec5c
5
5
  SHA512:
6
- metadata.gz: 36474a7d324d76381193d6e39928e1a9e07ba6e15e2bbec050db8bff9d3d2f0eeb4fb713f3882864fbc4ef3c65bb5343a7d99ae6ecf588326c62e1672da28274
7
- data.tar.gz: f68780e2c3cc1196a10e24e14949b8ee9b348d72c321bf4aa98e918031b423bf467ccefdda0cbd77dfdb9090403603540358b3a3561bbd17e6ce46d705cf7b72
6
+ metadata.gz: 953571f21e59adbbd743490543fa4a7faf30b0e07951f3646bf7e3af5043ed2e24d0f93c6c5d375b1bb866ed5719e1eefe206bf4ce230b5001d80a7fc0f48cee
7
+ data.tar.gz: ca8960bee3c4f2789d4ddc9a07e489ee646c45cbbe9391da6b76f7e06f81711224093806e60a49f85cd5f1451389868f22b58f1259283b33cf7ffad21665948b
data/README.md CHANGED
@@ -18,7 +18,7 @@ To store values, prop needs a cache:
18
18
  Prop.cache = Rails.cache # needs read/write/increment methods
19
19
  ```
20
20
 
21
- Prop does not expire its used keys, so use memcached or similar, not redis.
21
+ When using the interval strategy, prop sets a key expiry to its interval. Because the leaky bucket strategy does not set a ttl, it is best to use memcached or similar for all prop caching, not redis.
22
22
 
23
23
  ## Setting a Callback
24
24
 
@@ -37,6 +37,9 @@ Example: Limit on accepted emails per hour from a given user, by defining a thre
37
37
 
38
38
  ```ruby
39
39
  Prop.configure(:mails_per_hour, threshold: 100, interval: 1.hour, description: "Mail rate limit exceeded")
40
+
41
+ # Block requests by setting threshold to 0
42
+ Prop.configure(:mails_per_hour, threshold: 0, interval: 1.hour, description: "All mail is blocked")
40
43
  ```
41
44
 
42
45
  ```ruby
@@ -224,6 +227,8 @@ You can add two additional configurations: `:strategy` and `:burst_rate` to use
224
227
  Prop will handle the details after configured, and you don't have to specify `:strategy`
225
228
  again when using `throttle`, `throttle!` or any other methods.
226
229
 
230
+ The leaky bucket algorithm used is "leaky bucket as a meter".
231
+
227
232
  ```ruby
228
233
  Prop.configure(:api_request, strategy: :leaky_bucket, burst_rate: 20, threshold: 5, interval: 1.minute)
229
234
  ```
data/lib/prop.rb CHANGED
@@ -3,7 +3,7 @@ require "prop/limiter"
3
3
  require "forwardable"
4
4
 
5
5
  module Prop
6
- VERSION = "2.2.5"
6
+ VERSION = "2.6.0"
7
7
 
8
8
  # Short hand for accessing Prop::Limiter methods
9
9
  class << self
@@ -16,16 +16,16 @@ module Prop
16
16
  # options argument is kept for api consistency for all strategies
17
17
  def increment(cache_key, amount, options = {})
18
18
  raise ArgumentError, "Change amount must be a Integer, was #{amount.class}" unless amount.is_a?(Integer)
19
- cache.increment(cache_key, amount) || (cache.write(cache_key, amount, raw: true) && amount) # WARNING: potential race condition
19
+ cache.increment(cache_key, amount) || (cache.write(cache_key, amount, raw: true, expires_in: options.fetch(:interval, nil)) && amount) # WARNING: potential race condition
20
20
  end
21
21
 
22
22
  def decrement(cache_key, amount, options = {})
23
23
  raise ArgumentError, "Change amount must be a Integer, was #{amount.class}" unless amount.is_a?(Integer)
24
- cache.decrement(cache_key, amount) || (cache.write(cache_key, 0, raw: true) && 0) # WARNING: potential race condition
24
+ cache.decrement(cache_key, amount) || (cache.write(cache_key, 0, raw: true, expires_in: options.fetch(:interval, nil)) && 0) # WARNING: potential race condition
25
25
  end
26
26
 
27
- def reset(cache_key)
28
- cache.write(cache_key, zero_counter, raw: true)
27
+ def reset(cache_key, options = {})
28
+ cache.write(cache_key, zero_counter, raw: true, expires_in: options.fetch(:interval, nil))
29
29
  end
30
30
 
31
31
  def compare_threshold?(counter, operator, options)
@@ -56,8 +56,8 @@ module Prop
56
56
  end
57
57
 
58
58
  def validate_options!(options)
59
- validate_positive_integer(options[:threshold], :threshold)
60
- validate_positive_integer(options[:interval], :interval)
59
+ validate_threshold(options[:threshold], :threshold)
60
+ validate_interval(options[:interval], :interval)
61
61
 
62
62
  amount = options[:increment] || options[:decrement]
63
63
  if amount
@@ -67,7 +67,11 @@ module Prop
67
67
 
68
68
  private
69
69
 
70
- def validate_positive_integer(option, key)
70
+ def validate_threshold(option, key)
71
+ raise ArgumentError.new("#{key.inspect} must be a non-negative Integer") if !option.is_a?(Integer) || option < 0
72
+ end
73
+
74
+ def validate_interval(option, key)
71
75
  raise ArgumentError.new("#{key.inspect} must be a positive Integer") if !option.is_a?(Integer) || option <= 0
72
76
  end
73
77
 
@@ -5,39 +5,69 @@ require 'prop/key'
5
5
  module Prop
6
6
  class LeakyBucketStrategy
7
7
  class << self
8
+ def _throttle_leaky_bucket(handle, key, cache_key, options)
9
+ (over_limit, bucket) = options.key?(:decrement) ?
10
+ decrement(cache_key, options.fetch(:decrement), options) :
11
+ increment(cache_key, options.fetch(:increment, 1), options)
12
+
13
+ [over_limit, bucket]
14
+ end
15
+
8
16
  def counter(cache_key, options)
9
- bucket = Prop::Limiter.cache.read(cache_key) || zero_counter
10
- now = Time.now.to_i
11
- leak_amount = (now - bucket.fetch(:last_updated)) / options.fetch(:interval) * options.fetch(:threshold)
17
+ cache.read(cache_key) || zero_counter
18
+ end
19
+
20
+ def leak_amount(bucket, amount, options, now)
21
+ leak_rate = (now - bucket.fetch(:last_leak_time, 0)) / options.fetch(:interval).to_f
22
+ leak_amount = (leak_rate * options.fetch(:threshold).to_f)
23
+ leak_amount.to_i
24
+ end
12
25
 
13
- bucket[:bucket] = [bucket.fetch(:bucket) - leak_amount, 0].max
14
- bucket[:last_updated] = now
15
- bucket
26
+ def update_bucket(current_bucket_size, max_bucket_size, amount)
27
+ over_limit = (max_bucket_size-current_bucket_size) < amount
28
+ updated_bucket_size = over_limit ? current_bucket_size : current_bucket_size + amount
29
+ [over_limit, updated_bucket_size]
16
30
  end
17
31
 
18
32
  # WARNING: race condition
19
33
  # this increment is not atomic, so it might miss counts when used frequently
20
34
  def increment(cache_key, amount, options)
21
- counter = counter(cache_key, options)
22
- counter[:bucket] += amount
23
- Prop::Limiter.cache.write(cache_key, counter)
24
- counter
35
+ bucket = counter(cache_key, options)
36
+ now = Time.now.to_i
37
+ max_bucket_size = options.fetch(:burst_rate)
38
+ current_bucket_size = bucket.fetch(:bucket, 0)
39
+ leak_amount = leak_amount(bucket, amount, options, now)
40
+ if leak_amount > 0
41
+ # maybe TODO, update last_leak_time to reflect the exact time for the current leak amount
42
+ # the current strategy will always reflect a little less leakage, probably not an issue though
43
+ bucket[:last_leak_time] = now
44
+ current_bucket_size = [(current_bucket_size - leak_amount), 0].max
45
+ end
46
+
47
+ over_limit, updated_bucket_size = update_bucket(current_bucket_size, max_bucket_size, amount)
48
+ bucket[:bucket] = updated_bucket_size
49
+ bucket[:over_limit] = over_limit
50
+ cache.write(cache_key, bucket)
51
+ [over_limit, bucket]
25
52
  end
26
53
 
27
54
  def decrement(cache_key, amount, options)
28
- counter = counter(cache_key, options)
29
- counter[:bucket] -= amount
30
- counter[:bucket] = 0 unless counter[:bucket] > 0
31
- Prop::Limiter.cache.write(cache_key, counter)
32
- counter
55
+ now = Time.now.to_i
56
+ bucket = counter(cache_key, options)
57
+ leak_amount = leak_amount(bucket, amount, options, now)
58
+ bucket[:bucket] = [bucket[:bucket] - amount - leak_amount, 0].max
59
+ bucket[:last_leak_time] = now if leak_amount > 0
60
+ bucket[:over_limit] = false
61
+ cache.write(cache_key, bucket)
62
+ [false, bucket]
33
63
  end
34
64
 
35
- def reset(cache_key)
36
- Prop::Limiter.cache.write(cache_key, zero_counter)
65
+ def reset(cache_key, options = {})
66
+ cache.write(cache_key, zero_counter, raw: true)
37
67
  end
38
68
 
39
- def compare_threshold?(counter, operator, options)
40
- counter.fetch(:bucket).to_i.send operator, options.fetch(:burst_rate)
69
+ def compare_threshold?(bucket, operator, options)
70
+ bucket.fetch(:over_limit, false)
41
71
  end
42
72
 
43
73
  def build(options)
@@ -69,7 +99,11 @@ module Prop
69
99
  end
70
100
 
71
101
  def zero_counter
72
- { bucket: 0, last_updated: 0 }
102
+ { bucket: 0, last_leak_time: 0, over_limit: false }
103
+ end
104
+
105
+ def cache
106
+ Prop::Limiter.cache
73
107
  end
74
108
  end
75
109
  end
data/lib/prop/limiter.rb CHANGED
@@ -46,8 +46,7 @@ module Prop
46
46
  #
47
47
  # Raises Prop::RateLimited if the number if the threshold for this handle has been reached
48
48
  def configure(handle, defaults)
49
- raise ArgumentError.new("Invalid threshold setting") unless defaults[:threshold].to_i > 0
50
- raise ArgumentError.new("Invalid interval setting") unless defaults[:interval].to_i > 0
49
+ Prop::Options.validate_options!(defaults)
51
50
 
52
51
  self.handles ||= {}
53
52
  self.handles[handle] = defaults
@@ -56,7 +55,7 @@ module Prop
56
55
  # Public: Disables Prop for a block of code
57
56
  #
58
57
  # block - a block of code within which Prop will not raise
59
- def disabled(&block)
58
+ def disabled(&_block)
60
59
  @disabled = true
61
60
  yield
62
61
  ensure
@@ -72,8 +71,8 @@ module Prop
72
71
  #
73
72
  # Returns true if the threshold for this handle has been reached, else returns false
74
73
  def throttle(handle, key = nil, options = {})
75
- options, cache_key = prepare(handle, key, options)
76
- throttled = _throttle(handle, key, cache_key, options).first
74
+ options, cache_key, strategy = prepare(handle, key, options)
75
+ throttled = _throttle(strategy, handle, key, cache_key, options).first
77
76
  block_given? && !throttled ? yield : throttled
78
77
  end
79
78
 
@@ -87,8 +86,8 @@ module Prop
87
86
  # Raises Prop::RateLimited if the threshold for this handle has been reached
88
87
  # Returns the value of the block if given a such, otherwise the current count of the throttle
89
88
  def throttle!(handle, key = nil, options = {}, &block)
90
- options, cache_key = prepare(handle, key, options)
91
- throttled, counter = _throttle(handle, key, cache_key, options)
89
+ options, cache_key, strategy = prepare(handle, key, options)
90
+ throttled, counter = _throttle(strategy, handle, key, cache_key, options)
92
91
 
93
92
  if throttled
94
93
  raise Prop::RateLimited.new(options.merge(
@@ -108,9 +107,9 @@ module Prop
108
107
  #
109
108
  # Returns true if a call to `throttle!` with same parameters would raise, otherwise false
110
109
  def throttled?(handle, key = nil, options = {})
111
- options, cache_key = prepare(handle, key, options)
112
- counter = @strategy.counter(cache_key, options)
113
- @strategy.compare_threshold?(counter, :>=, options)
110
+ options, cache_key, strategy = prepare(handle, key, options)
111
+ counter = strategy.counter(cache_key, options)
112
+ strategy.compare_threshold?(counter, :>=, options)
114
113
  end
115
114
 
116
115
  # Public: Resets a specific throttle
@@ -120,8 +119,8 @@ module Prop
120
119
  #
121
120
  # Returns nothing
122
121
  def reset(handle, key = nil, options = {})
123
- _options, cache_key = prepare(handle, key, options)
124
- @strategy.reset(cache_key)
122
+ _options, cache_key, strategy = prepare(handle, key, options)
123
+ strategy.reset(cache_key, options)
125
124
  end
126
125
 
127
126
  # Public: Counts the number of times the given handle/key combination has been hit in the current window
@@ -131,8 +130,8 @@ module Prop
131
130
  #
132
131
  # Returns a count of hits in the current window
133
132
  def count(handle, key = nil, options = {})
134
- options, cache_key = prepare(handle, key, options)
135
- @strategy.counter(cache_key, options)
133
+ options, cache_key, strategy = prepare(handle, key, options)
134
+ strategy.counter(cache_key, options)
136
135
  end
137
136
  alias :query :count
138
137
 
@@ -143,18 +142,26 @@ module Prop
143
142
 
144
143
  private
145
144
 
146
- def _throttle(handle, key, cache_key, options)
147
- return [false, @strategy.zero_counter] if disabled?
145
+ def leaky_bucket_strategy?(strategy)
146
+ strategy == Prop::LeakyBucketStrategy
147
+ end
148
+
149
+ def _throttle(strategy, handle, key, cache_key, options)
150
+ return [false, strategy.zero_counter] if disabled?
151
+
152
+ if leaky_bucket_strategy?(strategy)
153
+ return Prop::LeakyBucketStrategy._throttle_leaky_bucket(handle, key, cache_key, options)
154
+ end
148
155
 
149
156
  counter = options.key?(:decrement) ?
150
- @strategy.decrement(cache_key, options.fetch(:decrement), options) :
151
- @strategy.increment(cache_key, options.fetch(:increment, 1), options)
157
+ strategy.decrement(cache_key, options.fetch(:decrement), options) :
158
+ strategy.increment(cache_key, options.fetch(:increment, 1), options)
152
159
 
153
- if @strategy.compare_threshold?(counter, :>, options)
160
+ if strategy.compare_threshold?(counter, :>, options)
154
161
  before_throttle_callback &&
155
162
  before_throttle_callback.call(handle, key, options[:threshold], options[:interval])
156
163
 
157
- result = if options[:first_throttled] && @strategy.first_throttled?(counter, options)
164
+ result = if options[:first_throttled] && strategy.first_throttled?(counter, options)
158
165
  :first_throttled
159
166
  else
160
167
  true
@@ -177,11 +184,11 @@ module Prop
177
184
 
178
185
  options = Prop::Options.build(key: key, params: params, defaults: defaults)
179
186
 
180
- @strategy = options.fetch(:strategy)
187
+ strategy = options.fetch(:strategy)
181
188
 
182
- cache_key = @strategy.build(key: key, handle: handle, interval: options[:interval])
189
+ cache_key = strategy.build(key: key, handle: handle, interval: options[:interval])
183
190
 
184
- [ options, cache_key ]
191
+ [ options, cache_key, strategy ]
185
192
  end
186
193
  end
187
194
  end
data/lib/prop/options.rb CHANGED
@@ -12,17 +12,24 @@ module Prop
12
12
  result = defaults.merge(params)
13
13
 
14
14
  result[:key] = Prop::Key.normalize(key)
15
+ result[:strategy] = get_strategy(result)
15
16
 
16
- result[:strategy] = if leaky_bucket.include?(result[:strategy])
17
+ result[:strategy].validate_options!(result)
18
+ result
19
+ end
20
+
21
+ def self.validate_options!(options)
22
+ get_strategy(options).validate_options!(options)
23
+ end
24
+
25
+ def self.get_strategy(options)
26
+ if leaky_bucket.include?(options[:strategy])
17
27
  Prop::LeakyBucketStrategy
18
- elsif result[:strategy] == nil
28
+ elsif options[:strategy] == nil
19
29
  Prop::IntervalStrategy
20
30
  else
21
- result[:strategy] # allowing any new/unknown strategy to be used
31
+ options[:strategy] # allowing any new/unknown strategy to be used
22
32
  end
23
-
24
- result[:strategy].validate_options!(result)
25
- result
26
33
  end
27
34
 
28
35
  def self.leaky_bucket
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: prop
3
3
  version: !ruby/object:Gem::Version
4
- version: 2.2.5
4
+ version: 2.6.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Morten Primdahl
8
8
  autorequire:
9
9
  bindir: bin
10
10
  cert_chain: []
11
- date: 2018-05-07 00:00:00.000000000 Z
11
+ date: 2021-08-12 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: rake
@@ -115,8 +115,7 @@ required_rubygems_version: !ruby/object:Gem::Requirement
115
115
  - !ruby/object:Gem::Version
116
116
  version: '0'
117
117
  requirements: []
118
- rubyforge_project:
119
- rubygems_version: 2.7.6
118
+ rubygems_version: 3.1.6
120
119
  signing_key:
121
120
  specification_version: 4
122
121
  summary: Gem for implementing rate limits.