pecorino 0.6.0 → 0.7.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +5 -26
- data/CHANGELOG.md +12 -0
- data/README.md +5 -1
- data/lib/pecorino/adapters/base_adapter.rb +66 -0
- data/lib/pecorino/adapters/memory_adapter.rb +147 -0
- data/lib/pecorino/{postgres.rb → adapters/postgres_adapter.rb} +39 -11
- data/lib/pecorino/adapters/redis_adapter/add_tokens_conditionally.lua +90 -0
- data/lib/pecorino/adapters/redis_adapter.rb +95 -0
- data/lib/pecorino/{sqlite.rb → adapters/sqlite_adapter.rb} +49 -24
- data/lib/pecorino/block.rb +9 -4
- data/lib/pecorino/install_generator.rb +2 -2
- data/lib/pecorino/leaky_bucket.rb +6 -4
- data/lib/pecorino/throttle.rb +8 -5
- data/lib/pecorino/version.rb +1 -1
- data/lib/pecorino.rb +31 -26
- data/pecorino.gemspec +3 -2
- metadata +38 -14
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA256:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: a3a2879a7549a5e41367824a5abccc4fa6cf54b8d78dd1b042f97e8a7bb5f401
|
4
|
+
data.tar.gz: 870bac3b9a9cb8e6dfca8b738022deb25db8d8ce33b027a9f3dd2268e3000971
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 83f230bf892dd891ef913238c1bc403cc5d9734afaea5b66698109e8792b93197af0de10fc6a9a7716dcbc64ead32acf75c64f13e60708bcd9855e9840e5e203
|
7
|
+
data.tar.gz: b6d4bcbb64558f08082f2603d3c791c6b951ef993fb0090e8e2b5998edeae4346dec618e667f365f0f9042453a29b148327a78e5c789f5bea602d6aadd9d5e62
|
data/.github/workflows/ci.yml
CHANGED
@@ -2,37 +2,11 @@ name: CI
|
|
2
2
|
|
3
3
|
on:
|
4
4
|
- push
|
5
|
-
- pull_request
|
6
5
|
|
7
6
|
env:
|
8
7
|
BUNDLE_PATH: vendor/bundle
|
9
8
|
|
10
9
|
jobs:
|
11
|
-
# lint:
|
12
|
-
# name: Code Style
|
13
|
-
# runs-on: ubuntu-22.04
|
14
|
-
# if: github.event_name == 'push' || github.event.pull_request.head.repo.full_name != github.repository
|
15
|
-
# strategy:
|
16
|
-
# matrix:
|
17
|
-
# ruby:
|
18
|
-
# - '2.7'
|
19
|
-
# steps:
|
20
|
-
# - name: Checkout
|
21
|
-
# uses: actions/checkout@v4
|
22
|
-
# - name: Setup Ruby
|
23
|
-
# uses: ruby/setup-ruby@v1
|
24
|
-
# with:
|
25
|
-
# ruby-version: ${{ matrix.ruby }}
|
26
|
-
# bundler-cache: true
|
27
|
-
# - name: Rubocop Cache
|
28
|
-
# uses: actions/cache@v3
|
29
|
-
# with:
|
30
|
-
# path: ~/.cache/rubocop_cache
|
31
|
-
# key: ${{ runner.os }}-rubocop-${{ hashFiles('.rubocop.yml') }}
|
32
|
-
# restore-keys: |
|
33
|
-
# ${{ runner.os }}-rubocop-
|
34
|
-
# - name: Rubocop
|
35
|
-
# run: bundle exec rubocop
|
36
10
|
test:
|
37
11
|
name: Tests
|
38
12
|
runs-on: ubuntu-22.04
|
@@ -50,6 +24,11 @@ jobs:
|
|
50
24
|
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
|
51
25
|
ports:
|
52
26
|
- 5432:5432
|
27
|
+
redis:
|
28
|
+
image: redis
|
29
|
+
options: --health-cmd "redis-cli ping" --health-interval 10s --health-timeout 5s --health-retries 5
|
30
|
+
ports:
|
31
|
+
- 6379:6379
|
53
32
|
steps:
|
54
33
|
- name: Checkout
|
55
34
|
uses: actions/checkout@v4
|
data/CHANGELOG.md
CHANGED
@@ -1,3 +1,15 @@
|
|
1
|
+
## 0.7.1
|
2
|
+
|
3
|
+
- Release dependency constraint to permit Rails 8 (not thoroughly validated yet)
|
4
|
+
|
5
|
+
## 0.7.0
|
6
|
+
|
7
|
+
- Allow `Pecorino.adapter` to be assigned, and add `adapter:` to all classes. This allows the adapter for Pecorino to be configured manually and overridden in an initializer.
|
8
|
+
- Add Redis-based adapter derived from Prorate
|
9
|
+
- Formalize and test the adapter API
|
10
|
+
- Add a memory-based adapter for single-process applications (and as a reference)
|
11
|
+
- For SQLite tables, do not use UUID primary keys - there is no need for that, and SQLite does not have a antive UUID gen functin that is enabled on all builds
|
12
|
+
|
1
13
|
## 0.6.0
|
2
14
|
|
3
15
|
- Add `Pecorino::Block` for setting blocks directly. These are available both to `Throttle` with the same key and on their own. This can be used to set arbitrary blocks without having to configure a `Throttle` first.
|
data/README.md
CHANGED
@@ -2,7 +2,11 @@
|
|
2
2
|
|
3
3
|
Pecorino is a rate limiter based on the concept of leaky buckets, or more specifically - based on the [generic cell rate](https://brandur.org/rate-limiting) algorithm. It uses your DB as the storage backend for the throttles. It is compact, easy to install, and does not require additional infrastructure. The approach used by Pecorino has been previously used by [prorate](https://github.com/WeTransfer/prorate) with Redis, and that approach has proven itself.
|
4
4
|
|
5
|
-
Pecorino is designed to integrate seamlessly into any Rails application
|
5
|
+
Pecorino is designed to integrate seamlessly into any Rails application, and will use either:
|
6
|
+
|
7
|
+
* A memory store (good enough if you have just 1 process)
|
8
|
+
* A PostgreSQL or SQLite database (at the moment there is no MySQL support, we would be delighted if you could add it)
|
9
|
+
* A Redis instance
|
6
10
|
|
7
11
|
If you would like to know more about the leaky bucket algorithm: [this article](http://live.julik.nl/2022/08/the-unreasonable-effectiveness-of-leaky-buckets) or the [Wikipedia article](https://en.wikipedia.org/wiki/Leaky_bucket) are both good starting points. [This Wikipedia article](https://en.wikipedia.org/wiki/Generic_cell_rate_algorithm) describes the generic cell rate algorithm in more detail as well.
|
8
12
|
|
@@ -0,0 +1,66 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# An adapter allows Pecorino throttles, leaky buckets and other
|
4
|
+
# resources to interfact to a data storage backend - a database, usually.
|
5
|
+
class Pecorino::Adapters::BaseAdapter
|
6
|
+
# Returns the state of a leaky bucket. The state should be a tuple of two
|
7
|
+
# values: the current level (Float) and whether the bucket is now at capacity (Boolean)
|
8
|
+
#
|
9
|
+
# @param key[String] the key of the leaky bucket
|
10
|
+
# @param capacity[Float] the capacity of the leaky bucket to limit to
|
11
|
+
# @param leak_rate[Float] how many tokens leak out of the bucket per second
|
12
|
+
# @return [Array]
|
13
|
+
def state(key:, capacity:, leak_rate:)
|
14
|
+
[0, false]
|
15
|
+
end
|
16
|
+
|
17
|
+
# Adds tokens to the leaky bucket. The return value is a tuple of two
|
18
|
+
# values: the current level (Float) and whether the bucket is now at capacity (Boolean)
|
19
|
+
#
|
20
|
+
# @param key[String] the key of the leaky bucket
|
21
|
+
# @param capacity[Float] the capacity of the leaky bucket to limit to
|
22
|
+
# @param leak_rate[Float] how many tokens leak out of the bucket per second
|
23
|
+
# @param n_tokens[Float] how many tokens to add
|
24
|
+
# @return [Array]
|
25
|
+
def add_tokens(key:, capacity:, leak_rate:, n_tokens:)
|
26
|
+
[0, false]
|
27
|
+
end
|
28
|
+
|
29
|
+
# Adds tokens to the leaky bucket conditionally. If there is capacity, the tokens will
|
30
|
+
# be added. If there isn't - the fillup will be rejected. The return value is a triplet of
|
31
|
+
# the current level (Float), whether the bucket is now at capacity (Boolean)
|
32
|
+
# and whether the fillup was accepted (Boolean)
|
33
|
+
#
|
34
|
+
# @param key[String] the key of the leaky bucket
|
35
|
+
# @param capacity[Float] the capacity of the leaky bucket to limit to
|
36
|
+
# @param leak_rate[Float] how many tokens leak out of the bucket per second
|
37
|
+
# @param n_tokens[Float] how many tokens to add
|
38
|
+
# @return [Array]
|
39
|
+
def add_tokens_conditionally(key:, capacity:, leak_rate:, n_tokens:)
|
40
|
+
[0, false, false]
|
41
|
+
end
|
42
|
+
|
43
|
+
# Sets a timed block for the given key - this is used when a throttle fires. The return value
|
44
|
+
# is not defined - the call should always succeed.
|
45
|
+
# @param key[String] the key of the block
|
46
|
+
# @param block_for[#to_f, Active Support Duration] the duration of the block, in seconds
|
47
|
+
def set_block(key:, block_for:)
|
48
|
+
end
|
49
|
+
|
50
|
+
# Returns the time until which a block for a given key is in effect. If there is no block in
|
51
|
+
# effect, the method should return `nil`. The return value is either a `Time` or `nil`
|
52
|
+
# @param key[String] the key of the block
|
53
|
+
def blocked_until(key:)
|
54
|
+
end
|
55
|
+
|
56
|
+
# Deletes leaky buckets which have an expiry value prior to now and throttle blocks which have
|
57
|
+
# now lapsed
|
58
|
+
# @return [void]
|
59
|
+
def prune
|
60
|
+
end
|
61
|
+
|
62
|
+
# Creates the database tables for Pecorino to operate, or initializes other
|
63
|
+
# schema-like resources the adapter needs to operate
|
64
|
+
def create_tables(active_record_schema)
|
65
|
+
end
|
66
|
+
end
|
@@ -0,0 +1,147 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# A memory store for leaky buckets and blocks
|
4
|
+
class Pecorino::Adapters::MemoryAdapter
|
5
|
+
class KeyedLock
|
6
|
+
def initialize
|
7
|
+
@locked_keys = Set.new
|
8
|
+
@lock_mutex = Mutex.new
|
9
|
+
end
|
10
|
+
|
11
|
+
def lock(key)
|
12
|
+
loop do
|
13
|
+
@lock_mutex.synchronize do
|
14
|
+
next if @locked_keys.include?(key)
|
15
|
+
@locked_keys << key
|
16
|
+
return
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
def unlock(key)
|
22
|
+
@lock_mutex.synchronize do
|
23
|
+
@locked_keys.delete(key)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
def with(key)
|
28
|
+
lock(key)
|
29
|
+
yield
|
30
|
+
ensure
|
31
|
+
unlock(key)
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def initialize
|
36
|
+
@buckets = {}
|
37
|
+
@blocks = {}
|
38
|
+
@lock = KeyedLock.new
|
39
|
+
end
|
40
|
+
|
41
|
+
# Returns the state of a leaky bucket. The state should be a tuple of two
|
42
|
+
# values: the current level (Float) and whether the bucket is now at capacity (Boolean)
|
43
|
+
def state(key:, capacity:, leak_rate:)
|
44
|
+
@lock.lock(key)
|
45
|
+
level, ts = @buckets[key]
|
46
|
+
@lock.unlock(key)
|
47
|
+
|
48
|
+
return [0, false] unless level
|
49
|
+
|
50
|
+
dt = get_mono_time - ts
|
51
|
+
level_after_leak = [0, level - (leak_rate * dt)].max
|
52
|
+
[level_after_leak.to_f, (level_after_leak - capacity) >= 0]
|
53
|
+
end
|
54
|
+
|
55
|
+
# Adds tokens to the leaky bucket. The return value is a tuple of two
|
56
|
+
# values: the current level (Float) and whether the bucket is now at capacity (Boolean)
|
57
|
+
def add_tokens(key:, capacity:, leak_rate:, n_tokens:)
|
58
|
+
add_tokens_with_lock(key, capacity, leak_rate, n_tokens, _conditionally = false)
|
59
|
+
end
|
60
|
+
|
61
|
+
# Adds tokens to the leaky bucket conditionally. If there is capacity, the tokens will
|
62
|
+
# be added. If there isn't - the fillup will be rejected. The return value is a triplet of
|
63
|
+
# the current level (Float), whether the bucket is now at capacity (Boolean)
|
64
|
+
# and whether the fillup was accepted (Boolean)
|
65
|
+
def add_tokens_conditionally(key:, capacity:, leak_rate:, n_tokens:)
|
66
|
+
add_tokens_with_lock(key, capacity, leak_rate, n_tokens, _conditionally = true)
|
67
|
+
end
|
68
|
+
|
69
|
+
# Sets a timed block for the given key - this is used when a throttle fires. The return value
|
70
|
+
# is not defined - the call should always succeed.
|
71
|
+
def set_block(key:, block_for:)
|
72
|
+
raise ArgumentError, "block_for must be positive" unless block_for > 0
|
73
|
+
@lock.lock(key)
|
74
|
+
@blocks[key] = get_mono_time + block_for.to_f
|
75
|
+
Time.now + block_for.to_f
|
76
|
+
ensure
|
77
|
+
@lock.unlock(key)
|
78
|
+
end
|
79
|
+
|
80
|
+
# Returns the time until which a block for a given key is in effect. If there is no block in
|
81
|
+
# effect, the method should return `nil`. The return value is either a `Time` or `nil`
|
82
|
+
def blocked_until(key:)
|
83
|
+
blocked_until_monotonic = @blocks[key]
|
84
|
+
return unless blocked_until_monotonic
|
85
|
+
|
86
|
+
now_monotonic = get_mono_time
|
87
|
+
return unless blocked_until_monotonic > now_monotonic
|
88
|
+
|
89
|
+
Time.now + (blocked_until_monotonic - now_monotonic)
|
90
|
+
end
|
91
|
+
|
92
|
+
# Deletes leaky buckets which have an expiry value prior to now and throttle blocks which have
|
93
|
+
# now lapsed
|
94
|
+
def prune
|
95
|
+
now_monotonic = get_mono_time
|
96
|
+
|
97
|
+
@blocks.keys.each do |key|
|
98
|
+
@lock.with(key) do
|
99
|
+
@blocks.delete(key) if @blocks[key] && @blocks[key] < now_monotonic
|
100
|
+
end
|
101
|
+
end
|
102
|
+
|
103
|
+
@buckets.keys.each do |key|
|
104
|
+
@lock.with(key) do
|
105
|
+
_level, expire_at_monotonic = @buckets[key]
|
106
|
+
@buckets.delete(key) if expire_at_monotonic && expire_at_monotonic < now_monotonic
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
# No-op
|
112
|
+
def create_tables(active_record_schema)
|
113
|
+
end
|
114
|
+
|
115
|
+
private
|
116
|
+
|
117
|
+
def add_tokens_with_lock(key, capacity, leak_rate, n_tokens, conditionally)
|
118
|
+
@lock.lock(key)
|
119
|
+
now = get_mono_time
|
120
|
+
level, ts, _ = @buckets[key] || [0.0, now]
|
121
|
+
|
122
|
+
dt = now - ts
|
123
|
+
level_after_leak = clamp(0, level - (leak_rate * dt), capacity)
|
124
|
+
level_after_fillup = level_after_leak + n_tokens
|
125
|
+
if level_after_fillup > capacity && conditionally
|
126
|
+
return [level_after_leak, level_after_leak >= capacity, _did_accept = false]
|
127
|
+
end
|
128
|
+
|
129
|
+
clamped_level_after_fillup = clamp(0, level_after_fillup, capacity)
|
130
|
+
expire_after = now + (level_after_fillup / leak_rate)
|
131
|
+
@buckets[key] = [clamped_level_after_fillup, now, expire_after]
|
132
|
+
|
133
|
+
[clamped_level_after_fillup, clamped_level_after_fillup == capacity, _did_accept = true]
|
134
|
+
ensure
|
135
|
+
@lock.unlock(key)
|
136
|
+
end
|
137
|
+
|
138
|
+
def get_mono_time
|
139
|
+
Process.clock_gettime(Process::CLOCK_MONOTONIC)
|
140
|
+
end
|
141
|
+
|
142
|
+
def clamp(min, value, max)
|
143
|
+
return min if value < min
|
144
|
+
return max if value > max
|
145
|
+
value
|
146
|
+
end
|
147
|
+
end
|
@@ -1,6 +1,10 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
Pecorino::
|
3
|
+
class Pecorino::Adapters::PostgresAdapter
|
4
|
+
def initialize(model_class)
|
5
|
+
@model_class = model_class
|
6
|
+
end
|
7
|
+
|
4
8
|
def state(key:, capacity:, leak_rate:)
|
5
9
|
query_params = {
|
6
10
|
key: key.to_s,
|
@@ -10,7 +14,7 @@ Pecorino::Postgres = Struct.new(:model_class) do
|
|
10
14
|
# The `level` of the bucket is what got stored at `last_touched_at` time, and we can
|
11
15
|
# extrapolate from it to see how many tokens have leaked out since `last_touched_at` -
|
12
16
|
# we don't need to UPDATE the value in the bucket here
|
13
|
-
sql = model_class.sanitize_sql_array([<<~SQL, query_params])
|
17
|
+
sql = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
14
18
|
SELECT
|
15
19
|
GREATEST(
|
16
20
|
0.0, LEAST(
|
@@ -26,7 +30,7 @@ Pecorino::Postgres = Struct.new(:model_class) do
|
|
26
30
|
|
27
31
|
# If the return value of the query is a NULL it means no such bucket exists,
|
28
32
|
# so we assume the bucket is empty
|
29
|
-
current_level = model_class.connection.uncached { model_class.connection.select_value(sql) } || 0.0
|
33
|
+
current_level = @model_class.connection.uncached { @model_class.connection.select_value(sql) } || 0.0
|
30
34
|
[current_level, capacity - current_level.abs < 0.01]
|
31
35
|
end
|
32
36
|
|
@@ -45,7 +49,7 @@ Pecorino::Postgres = Struct.new(:model_class) do
|
|
45
49
|
fillup: n_tokens.to_f
|
46
50
|
}
|
47
51
|
|
48
|
-
sql = model_class.sanitize_sql_array([<<~SQL, query_params])
|
52
|
+
sql = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
49
53
|
INSERT INTO pecorino_leaky_buckets AS t
|
50
54
|
(key, last_touched_at, may_be_deleted_after, level)
|
51
55
|
VALUES
|
@@ -79,7 +83,7 @@ Pecorino::Postgres = Struct.new(:model_class) do
|
|
79
83
|
# query as a repeat (since we use "select_one" for the RETURNING bit) and will not call into Postgres
|
80
84
|
# correctly, thus the clock_timestamp() value would be frozen between calls. We don't want that here.
|
81
85
|
# See https://stackoverflow.com/questions/73184531/why-would-postgres-clock-timestamp-freeze-inside-a-rails-unit-test
|
82
|
-
upserted = model_class.connection.uncached { model_class.connection.select_one(sql) }
|
86
|
+
upserted = @model_class.connection.uncached { @model_class.connection.select_one(sql) }
|
83
87
|
capped_level_after_fillup, at_capacity = upserted.fetch("level"), upserted.fetch("at_capacity")
|
84
88
|
[capped_level_after_fillup, at_capacity]
|
85
89
|
end
|
@@ -99,7 +103,7 @@ Pecorino::Postgres = Struct.new(:model_class) do
|
|
99
103
|
fillup: n_tokens.to_f
|
100
104
|
}
|
101
105
|
|
102
|
-
sql = model_class.sanitize_sql_array([<<~SQL, query_params])
|
106
|
+
sql = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
103
107
|
WITH pre AS MATERIALIZED (
|
104
108
|
SELECT
|
105
109
|
-- Note the double clamping here. First we clamp the "current level - leak" to not go below zero,
|
@@ -137,15 +141,16 @@ Pecorino::Postgres = Struct.new(:model_class) do
|
|
137
141
|
level AS level_after
|
138
142
|
SQL
|
139
143
|
|
140
|
-
upserted = model_class.connection.uncached { model_class.connection.select_one(sql) }
|
144
|
+
upserted = @model_class.connection.uncached { @model_class.connection.select_one(sql) }
|
141
145
|
level_after = upserted.fetch("level_after")
|
142
146
|
level_before = upserted.fetch("level_before")
|
143
147
|
[level_after, level_after >= capacity, level_after != level_before]
|
144
148
|
end
|
145
149
|
|
146
150
|
def set_block(key:, block_for:)
|
151
|
+
raise ArgumentError, "block_for must be positive" unless block_for > 0
|
147
152
|
query_params = {key: key.to_s, block_for: block_for.to_f}
|
148
|
-
block_set_query = model_class.sanitize_sql_array([<<~SQL, query_params])
|
153
|
+
block_set_query = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
149
154
|
INSERT INTO pecorino_blocks AS t
|
150
155
|
(key, blocked_until)
|
151
156
|
VALUES
|
@@ -154,13 +159,36 @@ Pecorino::Postgres = Struct.new(:model_class) do
|
|
154
159
|
blocked_until = GREATEST(EXCLUDED.blocked_until, t.blocked_until)
|
155
160
|
RETURNING blocked_until
|
156
161
|
SQL
|
157
|
-
model_class.connection.uncached { model_class.connection.select_value(block_set_query) }
|
162
|
+
@model_class.connection.uncached { @model_class.connection.select_value(block_set_query) }
|
158
163
|
end
|
159
164
|
|
160
165
|
def blocked_until(key:)
|
161
|
-
block_check_query = model_class.sanitize_sql_array([<<~SQL, key])
|
166
|
+
block_check_query = @model_class.sanitize_sql_array([<<~SQL, key])
|
162
167
|
SELECT blocked_until FROM pecorino_blocks WHERE key = ? AND blocked_until >= clock_timestamp() LIMIT 1
|
163
168
|
SQL
|
164
|
-
model_class.connection.uncached { model_class.connection.select_value(block_check_query) }
|
169
|
+
@model_class.connection.uncached { @model_class.connection.select_value(block_check_query) }
|
170
|
+
end
|
171
|
+
|
172
|
+
def prune
|
173
|
+
@model_class.connection.execute("DELETE FROM pecorino_blocks WHERE blocked_until < NOW()")
|
174
|
+
@model_class.connection.execute("DELETE FROM pecorino_leaky_buckets WHERE may_be_deleted_after < NOW()")
|
175
|
+
end
|
176
|
+
|
177
|
+
def create_tables(active_record_schema)
|
178
|
+
active_record_schema.create_table :pecorino_leaky_buckets, id: :uuid do |t|
|
179
|
+
t.string :key, null: false
|
180
|
+
t.float :level, null: false
|
181
|
+
t.datetime :last_touched_at, null: false
|
182
|
+
t.datetime :may_be_deleted_after, null: false
|
183
|
+
end
|
184
|
+
active_record_schema.add_index :pecorino_leaky_buckets, [:key], unique: true
|
185
|
+
active_record_schema.add_index :pecorino_leaky_buckets, [:may_be_deleted_after]
|
186
|
+
|
187
|
+
active_record_schema.create_table :pecorino_blocks, id: :uuid do |t|
|
188
|
+
t.string :key, null: false
|
189
|
+
t.datetime :blocked_until, null: false
|
190
|
+
end
|
191
|
+
active_record_schema.add_index :pecorino_blocks, [:key], unique: true
|
192
|
+
active_record_schema.add_index :pecorino_blocks, [:blocked_until]
|
165
193
|
end
|
166
194
|
end
|
@@ -0,0 +1,90 @@
|
|
1
|
+
-- Single threaded Leaky Bucket implementation (without blocking).
|
2
|
+
-- args: key_base, leak_rate, bucket_ttl, fillup. To just verify the state of the bucket leak_rate of 0 may be passed.
|
3
|
+
-- returns: the leve of the bucket in number of tokens.
|
4
|
+
-- This script is largely adapted from Prorate https://github.com/WeTransfer/prorate
|
5
|
+
|
6
|
+
-- this is required to be able to use TIME and writes; basically it lifts the script into IO
|
7
|
+
redis.replicate_commands()
|
8
|
+
|
9
|
+
-- Redis documentation recommends passing the keys separately so that Redis
|
10
|
+
-- can - in the future - verify that they live on the same shard of a cluster, and
|
11
|
+
-- raise an error if they are not. As far as can be understood this functionality is not
|
12
|
+
-- yet present, but if we can make a little effort to make ourselves more future proof
|
13
|
+
-- we should.
|
14
|
+
local bucket_level_key = KEYS[1]
|
15
|
+
local last_updated_key = KEYS[2]
|
16
|
+
|
17
|
+
local leak_rate = tonumber(ARGV[1])
|
18
|
+
local fillup = tonumber(ARGV[2]) -- How many tokens this call adds to the bucket.
|
19
|
+
local bucket_capacity = tonumber(ARGV[3]) -- How many tokens is the bucket allowed to contain
|
20
|
+
local conditional_fillup = tonumber(ARGV[4]) -- Whether to fillup conditionally
|
21
|
+
|
22
|
+
-- Compute the key TTL for the bucket. We are interested in how long it takes the bucket
|
23
|
+
-- to leak all the way to 0, as this is the time when the values stay relevant. We pad with 1 second
|
24
|
+
-- to have a little cushion.
|
25
|
+
local key_lifetime = math.ceil((bucket_capacity / leak_rate) + 1)
|
26
|
+
|
27
|
+
-- Take a timestamp
|
28
|
+
local redis_time = redis.call("TIME") -- Array of [seconds, microseconds]
|
29
|
+
local now = tonumber(redis_time[1]) + (tonumber(redis_time[2]) / 1000000)
|
30
|
+
|
31
|
+
-- get current bucket level. The throttle key might not exist yet in which
|
32
|
+
-- case we default to 0
|
33
|
+
local bucket_level = tonumber(redis.call("GET", bucket_level_key)) or 0
|
34
|
+
|
35
|
+
-- ...and then perform the leaky bucket fillup/leak. We need to do this also when the bucket has
|
36
|
+
-- just been created because the initial fillup to add might be so high that it will
|
37
|
+
-- immediately overflow the bucket and trigger the throttle, on the first call.
|
38
|
+
local last_updated = tonumber(redis.call("GET", last_updated_key)) or now -- use sensible default of 'now' if the key does not exist
|
39
|
+
|
40
|
+
-- Subtract the number of tokens leaked since last call
|
41
|
+
local dt = now - last_updated
|
42
|
+
local bucket_level_after_leaking = math.max(0, math.min(bucket_level - (leak_rate * dt), bucket_capacity))
|
43
|
+
local bucket_level_after_fillup = bucket_level_after_leaking + fillup
|
44
|
+
local did_accept = 0
|
45
|
+
|
46
|
+
-- Figure out whether the fillup would overflow the bucket
|
47
|
+
if conditional_fillup == 1 and bucket_level_after_fillup > bucket_capacity then
|
48
|
+
local at_capacity = bucket_level_after_leaking >= bucket_capacity
|
49
|
+
-- See below about string return
|
50
|
+
return {string.format("%.9f", bucket_level_after_leaking), at_capacity, did_accept}
|
51
|
+
end
|
52
|
+
|
53
|
+
-- and _then_ and add the tokens we fillup with. Cap the value to be 0 < capacity
|
54
|
+
local new_bucket_level = math.max(0, math.min(bucket_capacity, bucket_level_after_fillup))
|
55
|
+
|
56
|
+
-- Since we return a floating point number string-formatted even if the bucket is full we
|
57
|
+
-- have some loss of precision in the formatting, even if the bucket was actually full.
|
58
|
+
-- This bit of information is useful to preserve.
|
59
|
+
local at_capacity = 0
|
60
|
+
if new_bucket_level == bucket_capacity then
|
61
|
+
at_capacity = 1
|
62
|
+
end
|
63
|
+
|
64
|
+
did_accept = 1
|
65
|
+
|
66
|
+
-- If both the initial level was 0, and the level after putting tokens in is 0 we
|
67
|
+
-- can avoid setting keys in Redis at all as this was only a level check.
|
68
|
+
if new_bucket_level == 0 and bucket_level == 0 then
|
69
|
+
return {"0.0", at_capacity, did_accept}
|
70
|
+
end
|
71
|
+
|
72
|
+
-- Save the new bucket level
|
73
|
+
redis.call("SETEX", bucket_level_key, key_lifetime, new_bucket_level)
|
74
|
+
|
75
|
+
-- Record when we updated the bucket so that the amount of tokens leaked
|
76
|
+
-- can be correctly determined on the next invocation
|
77
|
+
redis.call("SETEX", last_updated_key, key_lifetime, now)
|
78
|
+
|
79
|
+
-- Most Redis adapters when used with the Lua interface truncate floats
|
80
|
+
-- to integers (at least in Python that is documented to be the case in
|
81
|
+
-- the Redis ebook here
|
82
|
+
-- https://redislabs.com/ebook/part-3-next-steps/chapter-11-scripting-redis-with-lua/11-1-adding-functionality-without-writing-c
|
83
|
+
-- We need access to the bucket level as a float value since our leak rate might as well be floating point, and to achieve that
|
84
|
+
-- we can go two ways. We can turn the float into a Lua string, and then parse it on the other side, or we can convert it to
|
85
|
+
-- a tuple of two integer values - one for the integer component and one for fraction.
|
86
|
+
-- Now, the unpleasant aspect is that when we do this we will lose precision - the number is not going to be
|
87
|
+
-- exactly equal to capacity, thus we lose the bit of information which tells us whether we filled up the bucket or not.
|
88
|
+
-- Also since the only moment we can register whether the bucket is above capacity is now - in this script, since
|
89
|
+
-- by the next call some tokens will have leaked.
|
90
|
+
return {string.format("%.9f", new_bucket_level), at_capacity, did_accept}
|
@@ -0,0 +1,95 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "base_adapter"
|
4
|
+
require "digest"
|
5
|
+
require "redis"
|
6
|
+
|
7
|
+
# An adapter for storing Pecorino leaky buckets and blocks in Redis. It uses Lua
|
8
|
+
# to enforce atomicity for leaky bucket operations
|
9
|
+
class Pecorino::Adapters::RedisAdapter < Pecorino::Adapters::BaseAdapter
|
10
|
+
class RedisScript
|
11
|
+
def initialize(script_filename)
|
12
|
+
@script_body = File.read(File.dirname(__FILE__) + "/redis_adapter/" + script_filename)
|
13
|
+
@sha = Digest::SHA1.hexdigest(@script_body)
|
14
|
+
end
|
15
|
+
|
16
|
+
def load_and_eval(redis, keys, argv)
|
17
|
+
redis.evalsha(@sha, keys: keys, argv: argv)
|
18
|
+
rescue Redis::CommandError => e
|
19
|
+
if e.message.include? "NOSCRIPT"
|
20
|
+
redis.script(:load, @script_body)
|
21
|
+
retry
|
22
|
+
else
|
23
|
+
raise e
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
28
|
+
ADD_TOKENS_SCRIPT = RedisScript.new("add_tokens_conditionally.lua")
|
29
|
+
|
30
|
+
def initialize(redis_connection_or_connection_pool, key_prefix: "pecorino")
|
31
|
+
@redis_pool = redis_connection_or_connection_pool
|
32
|
+
@key_prefix = key_prefix
|
33
|
+
end
|
34
|
+
|
35
|
+
# Returns the state of a leaky bucket. The state should be a tuple of two
|
36
|
+
# values: the current level (Float) and whether the bucket is now at capacity (Boolean)
|
37
|
+
def state(key:, capacity:, leak_rate:)
|
38
|
+
add_tokens(key: key, capacity: capacity, leak_rate: leak_rate, n_tokens: 0)
|
39
|
+
end
|
40
|
+
|
41
|
+
# Adds tokens to the leaky bucket. The return value is a tuple of two
|
42
|
+
# values: the current level (Float) and whether the bucket is now at capacity (Boolean)
|
43
|
+
def add_tokens(key:, capacity:, leak_rate:, n_tokens:)
|
44
|
+
keys = ["#{@key_prefix}:leaky_bucket:#{key}:level", "#{@key_prefix}:leaky_bucket:#{key}:last_touched"]
|
45
|
+
argv = [leak_rate, n_tokens, capacity, _conditional = 0]
|
46
|
+
decimal_float_level, at_capacity_int, _ = with_redis do |redis|
|
47
|
+
ADD_TOKENS_SCRIPT.load_and_eval(redis, keys, argv)
|
48
|
+
end
|
49
|
+
[decimal_float_level.to_f, at_capacity_int == 1]
|
50
|
+
end
|
51
|
+
|
52
|
+
# Adds tokens to the leaky bucket conditionally. If there is capacity, the tokens will
|
53
|
+
# be added. If there isn't - the fillup will be rejected. The return value is a triplet of
|
54
|
+
# the current level (Float), whether the bucket is now at capacity (Boolean)
|
55
|
+
# and whether the fillup was accepted (Boolean)
|
56
|
+
def add_tokens_conditionally(key:, capacity:, leak_rate:, n_tokens:)
|
57
|
+
keys = ["#{@key_prefix}:leaky_bucket:#{key}:level", "#{@key_prefix}:leaky_bucket:#{key}:last_touched"]
|
58
|
+
argv = [leak_rate, n_tokens, capacity, _conditional = 1]
|
59
|
+
decimal_float_level, at_capacity_int, did_accept_int = with_redis do |redis|
|
60
|
+
ADD_TOKENS_SCRIPT.load_and_eval(redis, keys, argv)
|
61
|
+
end
|
62
|
+
[decimal_float_level.to_f, at_capacity_int == 1, did_accept_int == 1]
|
63
|
+
end
|
64
|
+
|
65
|
+
# Sets a timed block for the given key - this is used when a throttle fires. The return value
|
66
|
+
# is not defined - the call should always succeed.
|
67
|
+
def set_block(key:, block_for:)
|
68
|
+
raise ArgumentError, "block_for must be positive" unless block_for > 0
|
69
|
+
blocked_until = Time.now + block_for
|
70
|
+
with_redis do |r|
|
71
|
+
r.setex("#{@key_prefix}:leaky_bucket:#{key}:block", block_for.to_f.ceil, blocked_until.to_f)
|
72
|
+
end
|
73
|
+
blocked_until
|
74
|
+
end
|
75
|
+
|
76
|
+
# Returns the time until which a block for a given key is in effect. If there is no block in
|
77
|
+
# effect, the method should return `nil`. The return value is either a `Time` or `nil`
|
78
|
+
def blocked_until(key:)
|
79
|
+
seconds_from_epoch = with_redis do |r|
|
80
|
+
r.get("#{@key_prefix}:leaky_bucket:#{key}:block")
|
81
|
+
end
|
82
|
+
return unless seconds_from_epoch
|
83
|
+
Time.at(seconds_from_epoch.to_f).utc
|
84
|
+
end
|
85
|
+
|
86
|
+
private
|
87
|
+
|
88
|
+
def with_redis
|
89
|
+
if @redis_pool.respond_to?(:with)
|
90
|
+
@redis_pool.with { |conn| yield(conn) }
|
91
|
+
else
|
92
|
+
yield @redis_pool
|
93
|
+
end
|
94
|
+
end
|
95
|
+
end
|
@@ -1,6 +1,10 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
Pecorino::
|
3
|
+
class Pecorino::Adapters::SqliteAdapter
|
4
|
+
def initialize(model_class)
|
5
|
+
@model_class = model_class
|
6
|
+
end
|
7
|
+
|
4
8
|
def state(key:, capacity:, leak_rate:)
|
5
9
|
# With a server database, it is really important to use the clock of the database itself so
|
6
10
|
# that concurrent requests will see consistent bucket level calculations. Since SQLite is
|
@@ -17,7 +21,7 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
17
21
|
# The `level` of the bucket is what got stored at `last_touched_at` time, and we can
|
18
22
|
# extrapolate from it to see how many tokens have leaked out since `last_touched_at` -
|
19
23
|
# we don't need to UPDATE the value in the bucket here
|
20
|
-
sql = model_class.sanitize_sql_array([<<~SQL, query_params])
|
24
|
+
sql = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
21
25
|
SELECT
|
22
26
|
MAX(
|
23
27
|
0.0, MIN(
|
@@ -33,7 +37,7 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
33
37
|
|
34
38
|
# If the return value of the query is a NULL it means no such bucket exists,
|
35
39
|
# so we assume the bucket is empty
|
36
|
-
current_level = model_class.connection.uncached { model_class.connection.select_value(sql) } || 0.0
|
40
|
+
current_level = @model_class.connection.uncached { @model_class.connection.select_value(sql) } || 0.0
|
37
41
|
[current_level, capacity - current_level.abs < 0.01]
|
38
42
|
end
|
39
43
|
|
@@ -50,16 +54,14 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
50
54
|
delete_after_s: may_be_deleted_after_seconds,
|
51
55
|
leak_rate: leak_rate.to_f,
|
52
56
|
now_s: Time.now.to_f, # See above as to why we are using a time value passed in
|
53
|
-
fillup: n_tokens.to_f
|
54
|
-
id: SecureRandom.uuid # SQLite3 does not autogenerate UUIDs
|
57
|
+
fillup: n_tokens.to_f
|
55
58
|
}
|
56
59
|
|
57
|
-
sql = model_class.sanitize_sql_array([<<~SQL, query_params])
|
60
|
+
sql = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
58
61
|
INSERT INTO pecorino_leaky_buckets AS t
|
59
|
-
(
|
62
|
+
(key, last_touched_at, may_be_deleted_after, level)
|
60
63
|
VALUES
|
61
64
|
(
|
62
|
-
:id,
|
63
65
|
:key,
|
64
66
|
:now_s, -- Precision loss must be avoided here as it is used for calculations
|
65
67
|
DATETIME('now', '+:delete_after_s seconds'), -- Precision loss is acceptable here
|
@@ -89,7 +91,7 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
89
91
|
# query as a repeat (since we use "select_one" for the RETURNING bit) and will not call into Postgres
|
90
92
|
# correctly, thus the clock_timestamp() value would be frozen between calls. We don't want that here.
|
91
93
|
# See https://stackoverflow.com/questions/73184531/why-would-postgres-clock-timestamp-freeze-inside-a-rails-unit-test
|
92
|
-
upserted = model_class.connection.uncached { model_class.connection.select_one(sql) }
|
94
|
+
upserted = @model_class.connection.uncached { @model_class.connection.select_one(sql) }
|
93
95
|
capped_level_after_fillup, one_if_did_overflow = upserted.fetch("level"), upserted.fetch("did_overflow")
|
94
96
|
[capped_level_after_fillup, one_if_did_overflow == 1]
|
95
97
|
end
|
@@ -107,19 +109,17 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
107
109
|
delete_after_s: may_be_deleted_after_seconds,
|
108
110
|
leak_rate: leak_rate.to_f,
|
109
111
|
now_s: Time.now.to_f, # See above as to why we are using a time value passed in
|
110
|
-
fillup: n_tokens.to_f
|
111
|
-
id: SecureRandom.uuid # SQLite3 does not autogenerate UUIDs
|
112
|
+
fillup: n_tokens.to_f
|
112
113
|
}
|
113
114
|
|
114
115
|
# Sadly with SQLite we need to do an INSERT first, because otherwise the inserted row is visible
|
115
116
|
# to the WITH clause, so we cannot combine the initial fillup and the update into one statement.
|
116
117
|
# This shuld be fine however since we will suppress the INSERT on a key conflict
|
117
|
-
insert_sql = model_class.sanitize_sql_array([<<~SQL, query_params])
|
118
|
+
insert_sql = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
118
119
|
INSERT INTO pecorino_leaky_buckets AS t
|
119
|
-
(
|
120
|
+
(key, last_touched_at, may_be_deleted_after, level)
|
120
121
|
VALUES
|
121
122
|
(
|
122
|
-
:id,
|
123
123
|
:key,
|
124
124
|
:now_s, -- Precision loss must be avoided here as it is used for calculations
|
125
125
|
DATETIME('now', '+:delete_after_s seconds'), -- Precision loss is acceptable here
|
@@ -130,9 +130,9 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
130
130
|
-- so that it can't be deleted between our INSERT and our UPDATE
|
131
131
|
may_be_deleted_after = EXCLUDED.may_be_deleted_after
|
132
132
|
SQL
|
133
|
-
model_class.connection.execute(insert_sql)
|
133
|
+
@model_class.connection.execute(insert_sql)
|
134
134
|
|
135
|
-
sql = model_class.sanitize_sql_array([<<~SQL, query_params])
|
135
|
+
sql = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
136
136
|
-- With SQLite MATERIALIZED has to be used so that level_post is calculated before the UPDATE takes effect
|
137
137
|
WITH pre(level_post_with_uncapped_fillup, level_post) AS MATERIALIZED (
|
138
138
|
SELECT
|
@@ -156,30 +156,31 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
156
156
|
level AS level_after
|
157
157
|
SQL
|
158
158
|
|
159
|
-
upserted = model_class.connection.uncached { model_class.connection.select_one(sql) }
|
159
|
+
upserted = @model_class.connection.uncached { @model_class.connection.select_one(sql) }
|
160
160
|
level_after = upserted.fetch("level_after")
|
161
161
|
level_before = upserted.fetch("level_before")
|
162
162
|
[level_after, level_after >= capacity, level_after != level_before]
|
163
163
|
end
|
164
164
|
|
165
165
|
def set_block(key:, block_for:)
|
166
|
-
|
167
|
-
|
166
|
+
raise ArgumentError, "block_for must be positive" unless block_for > 0
|
167
|
+
query_params = {key: key.to_s, block_for: block_for.to_f, now_s: Time.now.to_f}
|
168
|
+
block_set_query = @model_class.sanitize_sql_array([<<~SQL, query_params])
|
168
169
|
INSERT INTO pecorino_blocks AS t
|
169
|
-
(
|
170
|
+
(key, blocked_until)
|
170
171
|
VALUES
|
171
|
-
(:
|
172
|
+
(:key, :now_s + :block_for)
|
172
173
|
ON CONFLICT (key) DO UPDATE SET
|
173
174
|
blocked_until = MAX(EXCLUDED.blocked_until, t.blocked_until)
|
174
175
|
RETURNING blocked_until;
|
175
176
|
SQL
|
176
|
-
blocked_until_s = model_class.connection.uncached { model_class.connection.select_value(block_set_query) }
|
177
|
+
blocked_until_s = @model_class.connection.uncached { @model_class.connection.select_value(block_set_query) }
|
177
178
|
Time.at(blocked_until_s)
|
178
179
|
end
|
179
180
|
|
180
181
|
def blocked_until(key:)
|
181
182
|
now_s = Time.now.to_f
|
182
|
-
block_check_query = model_class.sanitize_sql_array([<<~SQL, {now_s: now_s, key: key}])
|
183
|
+
block_check_query = @model_class.sanitize_sql_array([<<~SQL, {now_s: now_s, key: key}])
|
183
184
|
SELECT
|
184
185
|
blocked_until
|
185
186
|
FROM
|
@@ -187,7 +188,31 @@ Pecorino::Sqlite = Struct.new(:model_class) do
|
|
187
188
|
WHERE
|
188
189
|
key = :key AND blocked_until >= :now_s LIMIT 1
|
189
190
|
SQL
|
190
|
-
blocked_until_s = model_class.connection.uncached { model_class.connection.select_value(block_check_query) }
|
191
|
+
blocked_until_s = @model_class.connection.uncached { @model_class.connection.select_value(block_check_query) }
|
191
192
|
blocked_until_s && Time.at(blocked_until_s)
|
192
193
|
end
|
194
|
+
|
195
|
+
def prune
|
196
|
+
now_s = Time.now.to_f
|
197
|
+
@model_class.connection.execute("DELETE FROM pecorino_blocks WHERE blocked_until < ?", now_s)
|
198
|
+
@model_class.connection.execute("DELETE FROM pecorino_leaky_buckets WHERE may_be_deleted_after < ?", now_s)
|
199
|
+
end
|
200
|
+
|
201
|
+
def create_tables(active_record_schema)
|
202
|
+
active_record_schema.create_table :pecorino_leaky_buckets do |t|
|
203
|
+
t.string :key, null: false
|
204
|
+
t.float :level, null: false
|
205
|
+
t.datetime :last_touched_at, null: false
|
206
|
+
t.datetime :may_be_deleted_after, null: false
|
207
|
+
end
|
208
|
+
active_record_schema.add_index :pecorino_leaky_buckets, [:key], unique: true
|
209
|
+
active_record_schema.add_index :pecorino_leaky_buckets, [:may_be_deleted_after]
|
210
|
+
|
211
|
+
active_record_schema.create_table :pecorino_blocks do |t|
|
212
|
+
t.string :key, null: false
|
213
|
+
t.datetime :blocked_until, null: false
|
214
|
+
end
|
215
|
+
active_record_schema.add_index :pecorino_blocks, [:key], unique: true
|
216
|
+
active_record_schema.add_index :pecorino_blocks, [:blocked_until]
|
217
|
+
end
|
193
218
|
end
|
data/lib/pecorino/block.rb
CHANGED
@@ -8,17 +8,22 @@ class Pecorino::Block
|
|
8
8
|
#
|
9
9
|
# @param key[String] the key to set the block for
|
10
10
|
# @param block_for[Float] the number of seconds or a time interval to block for
|
11
|
+
# @param adapter[Pecorino::Adapters::BaseAdapter] the adapter to set the value in.
|
11
12
|
# @return [Time] the time when the block will be released
|
12
|
-
def self.set!(key:, block_for:)
|
13
|
-
|
13
|
+
def self.set!(key:, block_for:, adapter: Pecorino.adapter)
|
14
|
+
adapter.set_block(key: key, block_for: block_for)
|
14
15
|
Time.now + block_for
|
16
|
+
rescue ArgumentError # negative block
|
17
|
+
nil
|
15
18
|
end
|
16
19
|
|
17
20
|
# Returns the time until a certain block is in effect
|
18
21
|
#
|
22
|
+
# @param key[String] the key to get the expiry time for
|
23
|
+
# @param adapter[Pecorino::Adapters::BaseAdapter] the adapter to get the value from
|
19
24
|
# @return [Time,nil] the time when the block will be released
|
20
|
-
def self.blocked_until(key:)
|
21
|
-
t =
|
25
|
+
def self.blocked_until(key:, adapter: Pecorino.adapter)
|
26
|
+
t = adapter.blocked_until(key: key)
|
22
27
|
(t && t > Time.now) ? t : nil
|
23
28
|
end
|
24
29
|
end
|
@@ -5,8 +5,8 @@ require "rails/generators/active_record"
|
|
5
5
|
|
6
6
|
module Pecorino
|
7
7
|
#
|
8
|
-
# Rails generator used for setting up
|
9
|
-
# Run it with +bin/rails g
|
8
|
+
# Rails generator used for setting up Pecorino in a Rails application.
|
9
|
+
# Run it with +bin/rails g pecorino:install+ in your console.
|
10
10
|
#
|
11
11
|
class InstallGenerator < Rails::Generators::Base
|
12
12
|
include ActiveRecord::Generators::Migration
|
@@ -90,12 +90,14 @@ class Pecorino::LeakyBucket
|
|
90
90
|
# the bucket contents will then be capped at this value. So with
|
91
91
|
# bucket_capacity set to 12 and a `fillup(14)` the bucket will reach the level
|
92
92
|
# of 12, and will then immediately start leaking again.
|
93
|
-
|
93
|
+
# @param adapter[Pecorino::Adapters::BaseAdapter] a compatible adapter
|
94
|
+
def initialize(key:, capacity:, adapter: Pecorino.adapter, leak_rate: nil, over_time: nil)
|
94
95
|
raise ArgumentError, "Either leak_rate: or over_time: must be specified" if leak_rate.nil? && over_time.nil?
|
95
96
|
raise ArgumentError, "Either leak_rate: or over_time: may be specified, but not both" if leak_rate && over_time
|
96
97
|
@leak_rate = leak_rate || (capacity / over_time.to_f)
|
97
98
|
@key = key
|
98
99
|
@capacity = capacity.to_f
|
100
|
+
@adapter = adapter
|
99
101
|
end
|
100
102
|
|
101
103
|
# Places `n` tokens in the bucket. If the bucket has less capacity than `n` tokens, the bucket will be filled to capacity.
|
@@ -109,7 +111,7 @@ class Pecorino::LeakyBucket
|
|
109
111
|
# @param n_tokens[Float] How many tokens to fillup by
|
110
112
|
# @return [State] the state of the bucket after the operation
|
111
113
|
def fillup(n_tokens)
|
112
|
-
capped_level_after_fillup, is_full =
|
114
|
+
capped_level_after_fillup, is_full = @adapter.add_tokens(capacity: @capacity, key: @key, leak_rate: @leak_rate, n_tokens: n_tokens)
|
113
115
|
State.new(capped_level_after_fillup, is_full)
|
114
116
|
end
|
115
117
|
|
@@ -131,7 +133,7 @@ class Pecorino::LeakyBucket
|
|
131
133
|
# @param n_tokens[Float] How many tokens to fillup by
|
132
134
|
# @return [ConditionalFillupResult] the state of the bucket after the operation and whether the operation succeeded
|
133
135
|
def fillup_conditionally(n_tokens)
|
134
|
-
capped_level_after_fillup, is_full, did_accept =
|
136
|
+
capped_level_after_fillup, is_full, did_accept = @adapter.add_tokens_conditionally(capacity: @capacity, key: @key, leak_rate: @leak_rate, n_tokens: n_tokens)
|
135
137
|
ConditionalFillupResult.new(capped_level_after_fillup, is_full, did_accept)
|
136
138
|
end
|
137
139
|
|
@@ -140,7 +142,7 @@ class Pecorino::LeakyBucket
|
|
140
142
|
#
|
141
143
|
# @return [State] the snapshotted state of the bucket at time of query
|
142
144
|
def state
|
143
|
-
current_level, is_full =
|
145
|
+
current_level, is_full = @adapter.state(key: @key, capacity: @capacity, leak_rate: @leak_rate)
|
144
146
|
State.new(current_level, is_full)
|
145
147
|
end
|
146
148
|
|
data/lib/pecorino/throttle.rb
CHANGED
@@ -99,10 +99,13 @@ class Pecorino::Throttle
|
|
99
99
|
# @param key[String] the key for both the block record and the leaky bucket
|
100
100
|
# @param block_for[Numeric] the number of seconds to block any further requests for. Defaults to time it takes
|
101
101
|
# the bucket to leak out to the level of 0
|
102
|
+
# @param adapter[Pecorino::Adapters::BaseAdapter] a compatible adapter
|
102
103
|
# @param leaky_bucket_options Options for `Pecorino::LeakyBucket.new`
|
103
104
|
# @see PecorinoLeakyBucket.new
|
104
|
-
def initialize(key:, block_for: nil, **leaky_bucket_options)
|
105
|
-
@
|
105
|
+
def initialize(key:, block_for: nil, adapter: Pecorino.adapter, **leaky_bucket_options)
|
106
|
+
@adapter = adapter
|
107
|
+
leaky_bucket_options.delete(:adapter)
|
108
|
+
@bucket = Pecorino::LeakyBucket.new(key: key, adapter: @adapter, **leaky_bucket_options)
|
106
109
|
@key = key.to_s
|
107
110
|
@block_for = block_for ? block_for.to_f : (@bucket.capacity / @bucket.leak_rate)
|
108
111
|
end
|
@@ -116,7 +119,7 @@ class Pecorino::Throttle
|
|
116
119
|
# @param n_tokens[Float]
|
117
120
|
# @return [boolean]
|
118
121
|
def able_to_accept?(n_tokens = 1)
|
119
|
-
|
122
|
+
@adapter.blocked_until(key: @key).nil? && @bucket.able_to_accept?(n_tokens)
|
120
123
|
end
|
121
124
|
|
122
125
|
# Register that a request is being performed. Will raise Throttled
|
@@ -156,7 +159,7 @@ class Pecorino::Throttle
|
|
156
159
|
#
|
157
160
|
# @return [State] the state of the throttle after filling up the leaky bucket / trying to pass the block
|
158
161
|
def request(n = 1)
|
159
|
-
existing_blocked_until = Pecorino::Block.blocked_until(key: @key)
|
162
|
+
existing_blocked_until = Pecorino::Block.blocked_until(key: @key, adapter: @adapter)
|
160
163
|
return State.new(existing_blocked_until.utc) if existing_blocked_until
|
161
164
|
|
162
165
|
# Topup the leaky bucket, and if the topup gets rejected - block the caller
|
@@ -165,7 +168,7 @@ class Pecorino::Throttle
|
|
165
168
|
State.new(nil)
|
166
169
|
else
|
167
170
|
# and set the block if the fillup was rejected
|
168
|
-
fresh_blocked_until = Pecorino::Block.set!(key: @key, block_for: @block_for)
|
171
|
+
fresh_blocked_until = Pecorino::Block.set!(key: @key, block_for: @block_for, adapter: @adapter)
|
169
172
|
State.new(fresh_blocked_until.utc)
|
170
173
|
end
|
171
174
|
end
|
data/lib/pecorino/version.rb
CHANGED
data/lib/pecorino.rb
CHANGED
@@ -7,24 +7,24 @@ require_relative "pecorino/version"
|
|
7
7
|
require_relative "pecorino/railtie" if defined?(Rails::Railtie)
|
8
8
|
|
9
9
|
module Pecorino
|
10
|
-
autoload :Postgres, "pecorino/postgres"
|
11
|
-
autoload :Sqlite, "pecorino/sqlite"
|
12
10
|
autoload :LeakyBucket, "pecorino/leaky_bucket"
|
13
11
|
autoload :Block, "pecorino/block"
|
14
12
|
autoload :Throttle, "pecorino/throttle"
|
15
13
|
autoload :CachedThrottle, "pecorino/cached_throttle"
|
16
14
|
|
15
|
+
module Adapters
|
16
|
+
autoload :MemoryAdapter, "pecorino/adapters/memory_adapter"
|
17
|
+
autoload :PostgresAdapter, "pecorino/adapters/postgres_adapter"
|
18
|
+
autoload :SqliteAdapter, "pecorino/adapters/sqlite_adapter"
|
19
|
+
autoload :RedisAdapter, "pecorino/adapters/redis_adapter"
|
20
|
+
end
|
21
|
+
|
17
22
|
# Deletes stale leaky buckets and blocks which have expired. Run this method regularly to
|
18
23
|
# avoid accumulating too many unused rows in your tables.
|
19
24
|
#
|
20
25
|
# @return void
|
21
26
|
def self.prune!
|
22
|
-
|
23
|
-
# blocked it is probably better to avoid the big delete)
|
24
|
-
ActiveRecord::Base.connection.execute("DELETE FROM pecorino_blocks WHERE blocked_until < NOW()")
|
25
|
-
|
26
|
-
# Prune buckets which are no longer used. No "uncached" needed here since we are using "execute"
|
27
|
-
ActiveRecord::Base.connection.execute("DELETE FROM pecorino_leaky_buckets WHERE may_be_deleted_after < NOW()")
|
27
|
+
adapter.prune
|
28
28
|
end
|
29
29
|
|
30
30
|
# Creates the tables and indexes needed for Pecorino. Call this from your migrations like so:
|
@@ -38,36 +38,41 @@ module Pecorino
|
|
38
38
|
# @param active_record_schema[ActiveRecord::SchemaMigration] the migration through which we will create the tables
|
39
39
|
# @return void
|
40
40
|
def self.create_tables(active_record_schema)
|
41
|
-
active_record_schema
|
42
|
-
|
43
|
-
t.float :level, null: false
|
44
|
-
t.datetime :last_touched_at, null: false
|
45
|
-
t.datetime :may_be_deleted_after, null: false
|
46
|
-
end
|
47
|
-
active_record_schema.add_index :pecorino_leaky_buckets, [:key], unique: true
|
48
|
-
active_record_schema.add_index :pecorino_leaky_buckets, [:may_be_deleted_after]
|
41
|
+
adapter.create_tables(active_record_schema)
|
42
|
+
end
|
49
43
|
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
44
|
+
# Allows assignment of an adapter for storing throttles. Normally this would be a subclass of `Pecorino::Adapters::BaseAdapter`, but
|
45
|
+
# you can assign anything you like. Set this in an initializer. By default Pecorino will use the adapter configured from your main
|
46
|
+
# database, but you can also create a separate database for it - or use Redis or memory storage.
|
47
|
+
#
|
48
|
+
# @param adapter[Pecorino::Adapters::BaseAdapter]
|
49
|
+
# @return [Pecorino::Adapters::BaseAdapter]
|
50
|
+
def self.adapter=(adapter)
|
51
|
+
@adapter = adapter
|
52
|
+
end
|
53
|
+
|
54
|
+
# Returns the currently configured adapter, or the default adapter from the main database
|
55
|
+
#
|
56
|
+
# @return [Pecorino::Adapters::BaseAdapter]
|
57
|
+
def self.adapter
|
58
|
+
@adapter || default_adapter_from_main_database
|
56
59
|
end
|
57
60
|
|
58
61
|
# Returns the database implementation for setting the values atomically. Since the implementation
|
59
62
|
# differs per database, this method will return a different adapter depending on which database is
|
60
63
|
# being used
|
61
|
-
|
64
|
+
#
|
65
|
+
# @param adapter[Pecorino::Adapters::BaseAdapter]
|
66
|
+
def self.default_adapter_from_main_database
|
62
67
|
model_class = ActiveRecord::Base
|
63
68
|
adapter_name = model_class.connection.adapter_name
|
64
69
|
case adapter_name
|
65
70
|
when /postgres/i
|
66
|
-
Pecorino::
|
71
|
+
Pecorino::Adapters::PostgresAdapter.new(model_class)
|
67
72
|
when /sqlite/i
|
68
|
-
Pecorino::
|
73
|
+
Pecorino::Adapters::SqliteAdapter.new(model_class)
|
69
74
|
else
|
70
|
-
raise "Pecorino does not support #{adapter_name} just yet"
|
75
|
+
raise "Pecorino does not support the #{adapter_name} database just yet"
|
71
76
|
end
|
72
77
|
end
|
73
78
|
end
|
data/pecorino.gemspec
CHANGED
@@ -30,15 +30,16 @@ Gem::Specification.new do |spec|
|
|
30
30
|
spec.require_paths = ["lib"]
|
31
31
|
|
32
32
|
# Uncomment to register a new dependency of your gem
|
33
|
-
spec.add_dependency "activerecord", "
|
33
|
+
spec.add_dependency "activerecord", ">= 7"
|
34
34
|
spec.add_development_dependency "pg"
|
35
35
|
spec.add_development_dependency "sqlite3"
|
36
|
-
spec.add_development_dependency "activesupport", "
|
36
|
+
spec.add_development_dependency "activesupport", ">= 7"
|
37
37
|
spec.add_development_dependency "rake", "~> 13.0"
|
38
38
|
spec.add_development_dependency "minitest", "~> 5.0"
|
39
39
|
spec.add_development_dependency "standard"
|
40
40
|
spec.add_development_dependency "magic_frozen_string_literal"
|
41
41
|
spec.add_development_dependency "minitest-fail-fast"
|
42
|
+
spec.add_development_dependency "redis", "~> 5", "< 6"
|
42
43
|
|
43
44
|
# For more information and examples about making a new gem, checkout our
|
44
45
|
# guide at: https://bundler.io/guides/creating_gem.html
|
metadata
CHANGED
@@ -1,27 +1,27 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: pecorino
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 0.
|
4
|
+
version: 0.7.1
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Julik Tarkhanov
|
8
|
-
autorequire:
|
8
|
+
autorequire:
|
9
9
|
bindir: exe
|
10
10
|
cert_chain: []
|
11
|
-
date:
|
11
|
+
date: 2025-03-13 00:00:00.000000000 Z
|
12
12
|
dependencies:
|
13
13
|
- !ruby/object:Gem::Dependency
|
14
14
|
name: activerecord
|
15
15
|
requirement: !ruby/object:Gem::Requirement
|
16
16
|
requirements:
|
17
|
-
- - "
|
17
|
+
- - ">="
|
18
18
|
- !ruby/object:Gem::Version
|
19
19
|
version: '7'
|
20
20
|
type: :runtime
|
21
21
|
prerelease: false
|
22
22
|
version_requirements: !ruby/object:Gem::Requirement
|
23
23
|
requirements:
|
24
|
-
- - "
|
24
|
+
- - ">="
|
25
25
|
- !ruby/object:Gem::Version
|
26
26
|
version: '7'
|
27
27
|
- !ruby/object:Gem::Dependency
|
@@ -56,16 +56,16 @@ dependencies:
|
|
56
56
|
name: activesupport
|
57
57
|
requirement: !ruby/object:Gem::Requirement
|
58
58
|
requirements:
|
59
|
-
- - "
|
59
|
+
- - ">="
|
60
60
|
- !ruby/object:Gem::Version
|
61
|
-
version: '7
|
61
|
+
version: '7'
|
62
62
|
type: :development
|
63
63
|
prerelease: false
|
64
64
|
version_requirements: !ruby/object:Gem::Requirement
|
65
65
|
requirements:
|
66
|
-
- - "
|
66
|
+
- - ">="
|
67
67
|
- !ruby/object:Gem::Version
|
68
|
-
version: '7
|
68
|
+
version: '7'
|
69
69
|
- !ruby/object:Gem::Dependency
|
70
70
|
name: rake
|
71
71
|
requirement: !ruby/object:Gem::Requirement
|
@@ -136,6 +136,26 @@ dependencies:
|
|
136
136
|
- - ">="
|
137
137
|
- !ruby/object:Gem::Version
|
138
138
|
version: '0'
|
139
|
+
- !ruby/object:Gem::Dependency
|
140
|
+
name: redis
|
141
|
+
requirement: !ruby/object:Gem::Requirement
|
142
|
+
requirements:
|
143
|
+
- - "~>"
|
144
|
+
- !ruby/object:Gem::Version
|
145
|
+
version: '5'
|
146
|
+
- - "<"
|
147
|
+
- !ruby/object:Gem::Version
|
148
|
+
version: '6'
|
149
|
+
type: :development
|
150
|
+
prerelease: false
|
151
|
+
version_requirements: !ruby/object:Gem::Requirement
|
152
|
+
requirements:
|
153
|
+
- - "~>"
|
154
|
+
- !ruby/object:Gem::Version
|
155
|
+
version: '5'
|
156
|
+
- - "<"
|
157
|
+
- !ruby/object:Gem::Version
|
158
|
+
version: '6'
|
139
159
|
description: Pecorino allows you to define throttles and rate meters for your metered
|
140
160
|
resources, all through your standard DB
|
141
161
|
email:
|
@@ -154,14 +174,18 @@ files:
|
|
154
174
|
- README.md
|
155
175
|
- Rakefile
|
156
176
|
- lib/pecorino.rb
|
177
|
+
- lib/pecorino/adapters/base_adapter.rb
|
178
|
+
- lib/pecorino/adapters/memory_adapter.rb
|
179
|
+
- lib/pecorino/adapters/postgres_adapter.rb
|
180
|
+
- lib/pecorino/adapters/redis_adapter.rb
|
181
|
+
- lib/pecorino/adapters/redis_adapter/add_tokens_conditionally.lua
|
182
|
+
- lib/pecorino/adapters/sqlite_adapter.rb
|
157
183
|
- lib/pecorino/block.rb
|
158
184
|
- lib/pecorino/cached_throttle.rb
|
159
185
|
- lib/pecorino/install_generator.rb
|
160
186
|
- lib/pecorino/leaky_bucket.rb
|
161
187
|
- lib/pecorino/migrations/create_pecorino_tables.rb.erb
|
162
|
-
- lib/pecorino/postgres.rb
|
163
188
|
- lib/pecorino/railtie.rb
|
164
|
-
- lib/pecorino/sqlite.rb
|
165
189
|
- lib/pecorino/throttle.rb
|
166
190
|
- lib/pecorino/version.rb
|
167
191
|
- pecorino.gemspec
|
@@ -172,7 +196,7 @@ metadata:
|
|
172
196
|
homepage_uri: https://github.com/cheddar-me/pecorino
|
173
197
|
source_code_uri: https://github.com/cheddar-me/pecorino
|
174
198
|
changelog_uri: https://github.com/cheddar-me/pecorino/CHANGELOG.md
|
175
|
-
post_install_message:
|
199
|
+
post_install_message:
|
176
200
|
rdoc_options: []
|
177
201
|
require_paths:
|
178
202
|
- lib
|
@@ -187,8 +211,8 @@ required_rubygems_version: !ruby/object:Gem::Requirement
|
|
187
211
|
- !ruby/object:Gem::Version
|
188
212
|
version: '0'
|
189
213
|
requirements: []
|
190
|
-
rubygems_version: 3.
|
191
|
-
signing_key:
|
214
|
+
rubygems_version: 3.1.6
|
215
|
+
signing_key:
|
192
216
|
specification_version: 4
|
193
217
|
summary: Database-based rate limiter using leaky buckets
|
194
218
|
test_files: []
|