prorate 0.4.0 → 0.5.0

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 415042776ee7b18bf44e586fe148c822a44746bd
4
- data.tar.gz: 7e249159cd2b18b699fc1f7b4fcb0f3923f2082d
3
+ metadata.gz: 89ad7bfadc58561263e35de3de2a0eef9c4b5aca
4
+ data.tar.gz: 51591b0aabe76204c4a9029a095150faf434c84d
5
5
  SHA512:
6
- metadata.gz: f84dc40f3b0d789a2a8a3b0d9739b958958d895c2a43d2a60265a325465c46923ec8b5ce39fbf94d7942ce806315ffcb19bf39d4de9039853285aadfa8ecb933
7
- data.tar.gz: 9335ecd1f3c25f082515dc5a7396aee2670748e6f287b481ef723699baaf355e5b67d0e23123721aefca894f0a2dd2e03065700ef581cad1b6bf9a64f963a1f9
6
+ metadata.gz: 5393f7eb13d4bd9236f3809c90215ae1a9700dd20a17c195db4180a526502278055365fc8e5dfce4644bba737a808cd738e7e6d3120f07ff733d2a8d214a7749
7
+ data.tar.gz: 604f4fd70fd8d9506cdccff6e819d91d2e26eae94e70ba00f3cf212118ca5d2fb2eeab8052c0702bea4a76d423be93c860dded6de64cd140ad1ddacad9ea1e2e
data/README.md CHANGED
@@ -4,6 +4,9 @@ Provides a low-level time-based throttle. Is mainly meant for situations where u
4
4
  useful since you need access to more variables. Under the hood, this uses a Lua script that implements the
5
5
  [Leaky Bucket](https://en.wikipedia.org/wiki/Leaky_bucket) algorithm in a single threaded and race condition safe way.
6
6
 
7
+ [![Build Status](https://travis-ci.org/WeTransfer/prorate.svg?branch=master)](https://travis-ci.org/WeTransfer/prorate)
8
+ [![Gem Version](https://badge.fury.io/rb/prorate.svg)](https://badge.fury.io/rb/prorate)
9
+
7
10
  ## Installation
8
11
 
9
12
  Add this line to your application's Gemfile:
@@ -27,14 +30,14 @@ Within your Rails controller:
27
30
  t = Prorate::Throttle.new(redis: Redis.new, logger: Rails.logger,
28
31
  name: "throttle-login-email", limit: 20, period: 5.seconds)
29
32
  # Add all the parameters that function as a discriminator
30
- t << request.ip
31
- t << params.require(:email)
33
+ t << request.ip << params.require(:email)
32
34
  # ...and call the throttle! method
33
35
  t.throttle! # Will raise a Prorate::Throttled exception if the limit has been reached
34
36
 
35
37
  To capture that exception, in the controller
36
38
 
37
39
  rescue_from Prorate::Throttled do |e|
40
+ response.set_header('Retry-After', e.retry_in_seconds.to_s)
38
41
  render nothing: true, status: 429
39
42
  end
40
43
 
data/Rakefile CHANGED
@@ -1,6 +1,13 @@
1
1
  require "bundler/gem_tasks"
2
2
  require "rspec/core/rake_task"
3
3
  require 'rubocop/rake_task'
4
+ require 'yard'
5
+
6
+ YARD::Rake::YardocTask.new(:doc) do |t|
7
+ # The dash has to be between the two to "divide" the source files and
8
+ # miscellaneous documentation files that contain no code
9
+ t.files = ['lib/**/*.rb', '-', 'LICENSE.txt']
10
+ end
4
11
 
5
12
  RSpec::Core::RakeTask.new(:spec)
6
13
  RuboCop::RakeTask.new(:rubocop)
@@ -14,6 +14,7 @@ local block_key = ARGV[1] .. ".block"
14
14
  local max_bucket_capacity = tonumber(ARGV[2])
15
15
  local leak_rate = tonumber(ARGV[3])
16
16
  local block_duration = tonumber(ARGV[4])
17
+ local n_tokens = tonumber(ARGV[5]) -- How many tokens this call adds to the bucket. Defaults to 1
17
18
  local now = tonumber(redis.call("TIME")[1]) --unix timestamp, will be required in all paths
18
19
 
19
20
  local key_lifetime = math.ceil(max_bucket_capacity / leak_rate)
@@ -23,28 +24,29 @@ if blocked_until then
23
24
  return {(tonumber(blocked_until) - now), 0}
24
25
  end
25
26
 
26
- -- get current bucket level
27
- local bucket_level = tonumber(redis.call("GET", bucket_level_key))
28
- if not bucket_level then
29
- -- this throttle/identifier combo does not exist yet, so much calculation can be skipped
30
- redis.call("SETEX", bucket_level_key, key_lifetime, 1) -- set bucket with initial value
31
- retval = {0, 1}
27
+ -- get current bucket level. The throttle key might not exist yet in which
28
+ -- case we default to 0
29
+ local bucket_level = tonumber(redis.call("GET", bucket_level_key)) or 0
30
+
31
+ -- ...and then perform the leaky bucket fillup/leak. We need to do this also when the bucket has
32
+ -- just been created because the initial n_tokens to add might be so high that it will
33
+ -- immediately overflow the bucket and trigger the throttle, on the first call.
34
+ local last_updated = tonumber(redis.call("GET", last_updated_key)) or now -- use sensible default of 'now' if the key does not exist
35
+ local new_bucket_level = math.max(0, bucket_level - (leak_rate * (now - last_updated)))
36
+
37
+ if (new_bucket_level + n_tokens) <= max_bucket_capacity then
38
+ new_bucket_level = math.max(0, new_bucket_level + n_tokens)
39
+ retval = {0, math.ceil(new_bucket_level)}
32
40
  else
33
- -- if it already exists, do the leaky bucket thing
34
- local last_updated = tonumber(redis.call("GET", last_updated_key)) or now -- use sensible default of 'now' if the key does not exist
35
- local new_bucket_level = math.max(0, bucket_level - (leak_rate * (now - last_updated)))
36
-
37
- if (new_bucket_level + 1) <= max_bucket_capacity then
38
- new_bucket_level = new_bucket_level + 1
39
- retval = {0, math.ceil(new_bucket_level)}
40
- else
41
- redis.call("SETEX", block_key, block_duration, now + block_duration)
42
- retval = {block_duration, 0}
43
- end
44
- redis.call("SETEX", bucket_level_key, key_lifetime, new_bucket_level) --still needs to be saved
41
+ redis.call("SETEX", block_key, block_duration, now + block_duration)
42
+ retval = {block_duration, 0}
45
43
  end
46
44
 
47
- -- update last_updated for this bucket, required in all branches
45
+ -- Save the new bucket level
46
+ redis.call("SETEX", bucket_level_key, key_lifetime, new_bucket_level)
47
+
48
+ -- Record when we updated the bucket so that the amount of tokens leaked
49
+ -- can be correctly determined on the next invocation
48
50
  redis.call("SETEX", last_updated_key, key_lifetime, now)
49
51
 
50
52
  return retval
@@ -1,20 +1,12 @@
1
1
  require 'digest'
2
2
 
3
3
  module Prorate
4
- class ScriptHashMismatch < StandardError
5
- end
6
-
7
4
  class MisconfiguredThrottle < StandardError
8
5
  end
9
6
 
10
7
  class Throttle < Ks.strict(:name, :limit, :period, :block_for, :redis, :logger)
11
- def self.lua_script_hash
12
- script_filepath = File.join(__dir__, "rate_limit.lua")
13
- script = File.read(script_filepath)
14
- Digest::SHA1.hexdigest(script)
15
- end
16
-
17
- CURRENT_SCRIPT_HASH = lua_script_hash
8
+ LUA_SCRIPT_CODE = File.read(File.join(__dir__, "rate_limit.lua"))
9
+ LUA_SCRIPT_HASH = Digest::SHA1.hexdigest(LUA_SCRIPT_CODE)
18
10
 
19
11
  def initialize(*)
20
12
  super
@@ -24,17 +16,77 @@ module Prorate
24
16
  @leak_rate = limit.to_f / period # tokens per second;
25
17
  end
26
18
 
19
+ # Add a value that will be used to distinguish this throttle from others.
20
+ # It has to be something user- or connection-specific, and multiple
21
+ # discriminators can be combined:
22
+ #
23
+ # throttle << ip_address << user_agent_fingerprint
24
+ #
25
+ # @param discriminator[Object] a Ruby object that can be marshaled
26
+ # in an equivalent way between requests, using `Marshal.dump
27
27
  def <<(discriminator)
28
28
  @discriminators << discriminator
29
29
  end
30
30
 
31
- def throttle!
31
+ # Applies the throttle and raises a {Throttled} exception if it has been triggered
32
+ #
33
+ # Accepts an optional number of tokens to put in the bucket (default is 1).
34
+ # The effect of `n_tokens:` set to 0 is a "ping".
35
+ # It makes sure the throttle keys in Redis get created and adjusts the
36
+ # last invoked time of the leaky bucket. Can be used when a throttle
37
+ # is applied in a "shadow" fashion. For example, imagine you
38
+ # have a cascade of throttles with the following block times:
39
+ #
40
+ # Throttle A: [-------]
41
+ # Throttle B: [----------]
42
+ #
43
+ # You apply Throttle A: and it fires, but when that happens you also
44
+ # want to enable a throttle that is applied to "repeat offenders" only -
45
+ # - for instance ones that probe for tokens and/or passwords.
46
+ #
47
+ # Throttle C: [-------------------------------]
48
+ #
49
+ # If your "Throttle A" fires, you can trigger Throttle C
50
+ #
51
+ # Throttle A: [-----|-]
52
+ # Throttle C: [-----|-------------------------]
53
+ #
54
+ # because you know that Throttle A has fired and thus Throttle C comes
55
+ # into effect. What you want to do, however, is to fire Throttle C
56
+ # even though Throttle A: would have unlatched, which would create this
57
+ # call sequence:
58
+ #
59
+ # Throttle A: [-------] *(A not triggered)
60
+ # Throttle C: [------------|------------------]
61
+ #
62
+ # To achieve that you can keep Throttle C alive using `throttle!(n_tokens: 0)`,
63
+ # on every check that touches Throttle A and/or Throttle C. It keeps the leaky bucket
64
+ # updated but does not add any tokens to it:
65
+ #
66
+ # Throttle A: [------] *(A not triggered since block period has ended)
67
+ # Throttle C: [-----------|(ping)------------------] C is still blocking
68
+ #
69
+ # So you can effectively "keep a throttle alive" without ever triggering it,
70
+ # or keep it alive in combination with other throttles.
71
+ #
72
+ # @param n_tokens[Integer] the number of tokens to put in the bucket. If you are
73
+ # using Prorate for rate limiting, and a single request is adding N objects to your
74
+ # database for example, you can "top up" the bucket with a set number of tokens
75
+ # with a arbitrary ratio - like 1 token per inserted row. Once the bucket fills up
76
+ # the Throttled exception is going to be raised. Defaults to 1.
77
+ def throttle!(n_tokens: 1)
32
78
  discriminator = Digest::SHA1.hexdigest(Marshal.dump(@discriminators))
33
79
  identifier = [name, discriminator].join(':')
34
80
 
35
81
  redis.with do |r|
36
- logger.info { "Applying throttle counter %s" % name }
37
- remaining_block_time, bucket_level = run_lua_throttler(redis: r, identifier: identifier, bucket_capacity: limit, leak_rate: @leak_rate, block_for: block_for)
82
+ logger.debug { "Applying throttle counter %s" % name }
83
+ remaining_block_time, bucket_level = run_lua_throttler(
84
+ redis: r,
85
+ identifier: identifier,
86
+ bucket_capacity: limit,
87
+ leak_rate: @leak_rate,
88
+ block_for: block_for,
89
+ n_tokens: n_tokens)
38
90
 
39
91
  if remaining_block_time > 0
40
92
  logger.warn { "Throttle %s exceeded limit of %d in %d seconds and is blocked for the next %d seconds" % [name, limit, period, remaining_block_time] }
@@ -44,16 +96,16 @@ module Prorate
44
96
  end
45
97
  end
46
98
 
47
- def run_lua_throttler(redis:, identifier:, bucket_capacity:, leak_rate:, block_for:)
48
- redis.evalsha(CURRENT_SCRIPT_HASH, [], [identifier, bucket_capacity, leak_rate, block_for])
99
+ private
100
+
101
+ def run_lua_throttler(redis:, identifier:, bucket_capacity:, leak_rate:, block_for:, n_tokens:)
102
+ redis.evalsha(LUA_SCRIPT_HASH, [], [identifier, bucket_capacity, leak_rate, block_for, n_tokens])
49
103
  rescue Redis::CommandError => e
50
104
  if e.message.include? "NOSCRIPT"
51
- # The Redis server has never seen this script before. Needs to run only once in the entire lifetime of the Redis server (unless the script changes)
52
- script_filepath = File.join(__dir__, "rate_limit.lua")
53
- script = File.read(script_filepath)
54
- raise ScriptHashMismatch if Digest::SHA1.hexdigest(script) != CURRENT_SCRIPT_HASH
55
- redis.script(:load, script)
56
- redis.evalsha(CURRENT_SCRIPT_HASH, [], [identifier, bucket_capacity, leak_rate, block_for])
105
+ # The Redis server has never seen this script before. Needs to run only once in the entire lifetime
106
+ # of the Redis server, until the script changes - in which case it will be loaded under a different SHA
107
+ redis.script(:load, LUA_SCRIPT_CODE)
108
+ retry
57
109
  else
58
110
  raise e
59
111
  end
@@ -1,5 +1,17 @@
1
+ # The Throttled exception gets raised when a throttle is triggered.
2
+ #
3
+ # The exception carries additional attributes which can be used for
4
+ # error tracking and for creating a correct Retry-After HTTP header for
5
+ # a 429 response
1
6
  class Prorate::Throttled < StandardError
2
- attr_reader :throttle_name, :retry_in_seconds
7
+ # @attr [String] the name of the throttle (like "shpongs-per-ip").
8
+ # Can be used to detect which throttle has fired when multiple
9
+ # throttles are used within the same block.
10
+ attr_reader :throttle_name
11
+
12
+ # @attr [Integer] for how long the caller will be blocked, in seconds.
13
+ attr_reader :retry_in_seconds
14
+
3
15
  def initialize(throttle_name, try_again_in)
4
16
  @throttle_name = throttle_name
5
17
  @retry_in_seconds = try_again_in
@@ -1,3 +1,3 @@
1
1
  module Prorate
2
- VERSION = "0.4.0"
2
+ VERSION = "0.5.0"
3
3
  end
@@ -34,4 +34,5 @@ Gem::Specification.new do |spec|
34
34
  spec.add_development_dependency "rake", "~> 12.3"
35
35
  spec.add_development_dependency "rspec", "~> 3.0"
36
36
  spec.add_development_dependency 'wetransfer_style', '0.6.0'
37
+ spec.add_development_dependency 'yard', '~> 0.9'
37
38
  end
metadata CHANGED
@@ -1,14 +1,14 @@
1
1
  --- !ruby/object:Gem::Specification
2
2
  name: prorate
3
3
  version: !ruby/object:Gem::Version
4
- version: 0.4.0
4
+ version: 0.5.0
5
5
  platform: ruby
6
6
  authors:
7
7
  - Julik Tarkhanov
8
8
  autorequire:
9
9
  bindir: exe
10
10
  cert_chain: []
11
- date: 2019-08-06 00:00:00.000000000 Z
11
+ date: 2019-08-13 00:00:00.000000000 Z
12
12
  dependencies:
13
13
  - !ruby/object:Gem::Dependency
14
14
  name: ks
@@ -108,6 +108,20 @@ dependencies:
108
108
  - - '='
109
109
  - !ruby/object:Gem::Version
110
110
  version: 0.6.0
111
+ - !ruby/object:Gem::Dependency
112
+ name: yard
113
+ requirement: !ruby/object:Gem::Requirement
114
+ requirements:
115
+ - - "~>"
116
+ - !ruby/object:Gem::Version
117
+ version: '0.9'
118
+ type: :development
119
+ prerelease: false
120
+ version_requirements: !ruby/object:Gem::Requirement
121
+ requirements:
122
+ - - "~>"
123
+ - !ruby/object:Gem::Version
124
+ version: '0.9'
111
125
  description: Can be used to implement all kinds of throttles
112
126
  email:
113
127
  - me@julik.nl