rapidity 0.0.6.312500 → 0.0.7.362805
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Gemfile.lock +15 -16
- data/README.md +50 -0
- data/lib/rapidity/share/base.rb +168 -0
- data/lib/rapidity/share/limit.rb +53 -0
- data/lib/rapidity/share/lua_scripts/acquire.lua +175 -0
- data/lib/rapidity/share/lua_scripts/acquire_queue.lua +149 -0
- data/lib/rapidity/share/lua_scripts/available_in.lua +147 -0
- data/lib/rapidity/share/lua_scripts/check_queue.lua +50 -0
- data/lib/rapidity/share/lua_scripts/delete.lua +31 -0
- data/lib/rapidity/share/lua_scripts/info.lua +37 -0
- data/lib/rapidity/share/lua_scripts/init.lua +82 -0
- data/lib/rapidity/share/lua_scripts/list.lua +59 -0
- data/lib/rapidity/share/lua_scripts/release_queue.lua +138 -0
- data/lib/rapidity/share/lua_scripts/reset.lua +61 -0
- data/lib/rapidity/share/producer.rb +77 -0
- data/lib/rapidity/share/sender.rb +97 -0
- data/lib/rapidity/version.rb +1 -1
- data/lib/rapidity.rb +10 -6
- metadata +52 -10
checksums.yaml
CHANGED
|
@@ -1,7 +1,7 @@
|
|
|
1
1
|
---
|
|
2
2
|
SHA256:
|
|
3
|
-
metadata.gz:
|
|
4
|
-
data.tar.gz:
|
|
3
|
+
metadata.gz: ec1c0e662f963921cf709fa7769897774ab2814b9c1c6b5017dc38379e25614a
|
|
4
|
+
data.tar.gz: 978b48d1149569859268054e0cbd8eeff7577c9b2fc5cceec2040a1361b974e1
|
|
5
5
|
SHA512:
|
|
6
|
-
metadata.gz:
|
|
7
|
-
data.tar.gz:
|
|
6
|
+
metadata.gz: ca19db773ea028961e6dd593faf29d27a0039b809e22b4be258cbbd1a237b5b14d306ef57928d53a5ffd7578e113e4701d933416bd1ed49ad86a29e65f9edd7b
|
|
7
|
+
data.tar.gz: 5d43836d52cda325188b2628e92037e54e44d58972c36ea2eefc024e64aa011191c16971326c01ad86b2eb7db84af6b8a8dc68e8a17374f5b901227202fec437
|
data/Gemfile.lock
CHANGED
|
@@ -1,19 +1,17 @@
|
|
|
1
1
|
PATH
|
|
2
2
|
remote: .
|
|
3
3
|
specs:
|
|
4
|
-
rapidity (0.0.
|
|
4
|
+
rapidity (0.0.7.362805)
|
|
5
5
|
activesupport
|
|
6
6
|
connection_pool
|
|
7
|
+
ostruct
|
|
7
8
|
redis
|
|
8
9
|
|
|
9
10
|
GEM
|
|
10
11
|
remote: https://rubygems.org/
|
|
11
12
|
specs:
|
|
12
|
-
activesupport (
|
|
13
|
-
|
|
14
|
-
i18n (>= 1.6, < 2)
|
|
15
|
-
minitest (>= 5.1)
|
|
16
|
-
tzinfo (~> 2.0)
|
|
13
|
+
activesupport (3.1.12)
|
|
14
|
+
multi_json (~> 1.0)
|
|
17
15
|
addressable (2.8.0)
|
|
18
16
|
public_suffix (>= 2.0.2, < 5.0)
|
|
19
17
|
ansi (1.5.0)
|
|
@@ -27,7 +25,6 @@ GEM
|
|
|
27
25
|
byebug (11.1.3)
|
|
28
26
|
coercible (1.0.0)
|
|
29
27
|
descendants_tracker (~> 0.0.1)
|
|
30
|
-
concurrent-ruby (1.1.10)
|
|
31
28
|
connection_pool (2.2.5)
|
|
32
29
|
descendants_tracker (0.0.4)
|
|
33
30
|
thread_safe (~> 0.3, >= 0.3.1)
|
|
@@ -44,13 +41,12 @@ GEM
|
|
|
44
41
|
path_expander (~> 1.0)
|
|
45
42
|
ruby_parser (~> 3.1, > 3.1.0)
|
|
46
43
|
sexp_processor (~> 4.8)
|
|
47
|
-
i18n (1.10.0)
|
|
48
|
-
concurrent-ruby (~> 1.0)
|
|
49
44
|
ice_nine (0.11.2)
|
|
50
45
|
kwalify (0.7.2)
|
|
51
46
|
launchy (2.5.0)
|
|
52
47
|
addressable (~> 2.7)
|
|
53
|
-
|
|
48
|
+
multi_json (1.19.1)
|
|
49
|
+
ostruct (0.6.3)
|
|
54
50
|
parser (3.1.2.0)
|
|
55
51
|
ast (~> 2.4.1)
|
|
56
52
|
path_expander (1.1.0)
|
|
@@ -64,6 +60,7 @@ GEM
|
|
|
64
60
|
kwalify (~> 0.7.0)
|
|
65
61
|
parser (~> 3.1.0)
|
|
66
62
|
rainbow (>= 2.0, < 4.0)
|
|
63
|
+
rexml (3.4.4)
|
|
67
64
|
rspec (3.11.0)
|
|
68
65
|
rspec-core (~> 3.11.0)
|
|
69
66
|
rspec-expectations (~> 3.11.0)
|
|
@@ -95,12 +92,15 @@ GEM
|
|
|
95
92
|
tty-which (~> 0.4.0)
|
|
96
93
|
virtus (~> 1.0)
|
|
97
94
|
sexp_processor (4.16.1)
|
|
98
|
-
shoulda-matchers (
|
|
99
|
-
activesupport (>=
|
|
95
|
+
shoulda-matchers (2.8.0)
|
|
96
|
+
activesupport (>= 3.0.0)
|
|
100
97
|
simplecov (0.21.2)
|
|
101
98
|
docile (~> 1.1)
|
|
102
99
|
simplecov-html (~> 0.11)
|
|
103
100
|
simplecov_json_formatter (~> 0.1)
|
|
101
|
+
simplecov-cobertura (3.1.0)
|
|
102
|
+
rexml
|
|
103
|
+
simplecov (~> 0.19)
|
|
104
104
|
simplecov-console (0.9.1)
|
|
105
105
|
ansi
|
|
106
106
|
simplecov
|
|
@@ -112,8 +112,6 @@ GEM
|
|
|
112
112
|
thread_safe (0.3.6)
|
|
113
113
|
timeouter (0.1.3.38794)
|
|
114
114
|
tty-which (0.4.2)
|
|
115
|
-
tzinfo (2.0.4)
|
|
116
|
-
concurrent-ruby (~> 1.0)
|
|
117
115
|
unicode-display_width (2.2.0)
|
|
118
116
|
virtus (1.0.5)
|
|
119
117
|
axiom-types (~> 0.1)
|
|
@@ -128,15 +126,16 @@ PLATFORMS
|
|
|
128
126
|
DEPENDENCIES
|
|
129
127
|
awesome_print
|
|
130
128
|
bump
|
|
131
|
-
bundler
|
|
129
|
+
bundler
|
|
132
130
|
byebug
|
|
133
131
|
rapidity!
|
|
134
|
-
rspec
|
|
132
|
+
rspec
|
|
135
133
|
rspec-collection_matchers
|
|
136
134
|
rspec_junit_formatter
|
|
137
135
|
rubycritic
|
|
138
136
|
shoulda-matchers
|
|
139
137
|
simplecov
|
|
138
|
+
simplecov-cobertura
|
|
140
139
|
simplecov-console
|
|
141
140
|
timeouter
|
|
142
141
|
|
data/README.md
CHANGED
|
@@ -79,6 +79,56 @@ loop do
|
|
|
79
79
|
end
|
|
80
80
|
```
|
|
81
81
|
|
|
82
|
+
## Share module expansion
|
|
83
|
+
If your message producer and message sender are independent services, and you want the sender to be agnostic of the business rules for rate limiting, use the classes in the Share module. The producer is responsible for initializing and configuring the rate limits (e.g., token bucket) with the correct business parameters in Redis. The sender then only consumes these pre-defined limits without knowing the underlying rules.
|
|
84
|
+
|
|
85
|
+
```mermaid
|
|
86
|
+
flowchart LR
|
|
87
|
+
G(producer)
|
|
88
|
+
B[Redis]
|
|
89
|
+
A(["message broker"])
|
|
90
|
+
T(sender)
|
|
91
|
+
E["external system with request limiting"]
|
|
92
|
+
G-- init limit -->B
|
|
93
|
+
B-- acquire limit -->T
|
|
94
|
+
G-- message [limit1, limit2] -->A
|
|
95
|
+
A--->T
|
|
96
|
+
T-- limited request -->E
|
|
97
|
+
```
|
|
98
|
+
|
|
99
|
+
### Base scenario
|
|
100
|
+
|
|
101
|
+
`Producer` ONLY creates limits, and send messages to the `Sender`. `Sender` actually use limits to achieve overall Rate Limiting.
|
|
102
|
+
|
|
103
|
+
### Optinal **Feedback-Driven Flow Control** scenario
|
|
104
|
+
|
|
105
|
+
Beyond basic rate limiting, the Share module offers **optional queue management capabilities** that enable sophisticated **Feedback-Driven Flow Control**. This feature allows systems to handle temporary load spikes more gracefully while maintaining communication between producers and consumers.
|
|
106
|
+
|
|
107
|
+
In this scenario, `Producer` additionally checks the optional `max_queue` attribute in the limit to understand whether it makes sense to send requests to `Sender` or whether it is already loaded with previous requests.
|
|
108
|
+
`Producer` MUST obtain semaphore `max_queue` on limites, then `Sender` MUST release semaphore `max_queue` after actual send request.
|
|
109
|
+
|
|
110
|
+
### Workflow with Code Examples
|
|
111
|
+
1. Initializing Limits and Optional Queues (Producer Side)
|
|
112
|
+
The message producer initializes rate limits with specific business rules. Queues can be added for handling traffic spikes.
|
|
113
|
+
|
|
114
|
+
```ruby
|
|
115
|
+
@pool = ConnectionPool.new(size: 5, timeout: 5) { Redis.new }
|
|
116
|
+
@producer = Rapidity::Share::Producer.new(@pool)
|
|
117
|
+
api_day_limit = Limit.new("day_limit", max_tokens: 1000, period: 86400, namespace: 'api_v2')
|
|
118
|
+
api_hour_limit = Limit.new("hour_limit", max_tokens: 100, period: 3600, max_queue: 100, namespace: 'api_v2')
|
|
119
|
+
@producer.init(api_day_limit)
|
|
120
|
+
@producer.init(api_hour_limit)
|
|
121
|
+
```
|
|
122
|
+
2. Each message is tagged with the limits it should consume when processed.
|
|
123
|
+
3. Messages flow through your message broker to the sender service.
|
|
124
|
+
4. The message sender attempts to acquire tokens before sending. If unavailable, it waits according to the token bucket algorithm.
|
|
125
|
+
```ruby
|
|
126
|
+
@pool = ConnectionPool.new(size: 5, timeout: 5) { Redis.new }
|
|
127
|
+
@sender = Rapidity::Share::Producer.new(@pool)
|
|
128
|
+
@sender.acquire(message['api_v2:day_limit', 'api_v2:hour_limit'], tokens: 1)
|
|
129
|
+
```
|
|
130
|
+
5. For queue-backed limits, senders can release tokens back to the queue to signal max_queue availability.
|
|
131
|
+
|
|
82
132
|
## Installation
|
|
83
133
|
|
|
84
134
|
It's a gem:
|
|
@@ -0,0 +1,168 @@
|
|
|
1
|
+
require 'ostruct'
|
|
2
|
+
|
|
3
|
+
module Rapidity
|
|
4
|
+
module Share
|
|
5
|
+
class Base
|
|
6
|
+
|
|
7
|
+
LUA_SCRIPTS = []
|
|
8
|
+
BASE_SCRIPTS = [:list, :info, :reset, :delete]
|
|
9
|
+
DEFAULT_KEY_TTL = 6000
|
|
10
|
+
|
|
11
|
+
def initialize(pool, ttl: DEFAULT_KEY_TTL.to_i, logger: nil)
|
|
12
|
+
@pool = pool
|
|
13
|
+
@ttl = ttl
|
|
14
|
+
@logger = logger || Logger.new(STDOUT)
|
|
15
|
+
@logger.level = Logger::DEBUG
|
|
16
|
+
# load_redis_scripts
|
|
17
|
+
end
|
|
18
|
+
|
|
19
|
+
# Returns a list of limits matching the pattern
|
|
20
|
+
#
|
|
21
|
+
# @param match_pattern [String] pattern for key matching
|
|
22
|
+
# @param max_count [Integer] maximum number of records to return
|
|
23
|
+
# @return [Array<Limit>] array of Limit objects or empty array
|
|
24
|
+
def list(match_pattern, max_count: 1000)
|
|
25
|
+
response = wrap_executed_script do |r|
|
|
26
|
+
r.evalsha(@lua_list, argv: [match_pattern, max_count])
|
|
27
|
+
end
|
|
28
|
+
|
|
29
|
+
response = response.each_slice(2).to_h
|
|
30
|
+
if response["count"].to_i > 0
|
|
31
|
+
response["limits"].map do |data|
|
|
32
|
+
build_limit(data)
|
|
33
|
+
end
|
|
34
|
+
else
|
|
35
|
+
[]
|
|
36
|
+
end
|
|
37
|
+
end
|
|
38
|
+
|
|
39
|
+
# Resets limit values
|
|
40
|
+
#
|
|
41
|
+
# @param limit_or_str [Limit, String] limit object or its name
|
|
42
|
+
# @param ttl [Integer] key TTL after reset
|
|
43
|
+
# @return [OpenStruct] operation result with success and limit fields
|
|
44
|
+
def reset(limit_or_str, ttl: @ttl)
|
|
45
|
+
response = wrap_executed_script do |r|
|
|
46
|
+
r.evalsha(@lua_reset, keys: [get_name(limit_or_str)], argv: [ttl])
|
|
47
|
+
end
|
|
48
|
+
|
|
49
|
+
handle_response(response, with_limit: true)
|
|
50
|
+
end
|
|
51
|
+
|
|
52
|
+
# Deletes a limit from Redis
|
|
53
|
+
#
|
|
54
|
+
# @param limit_or_str [Limit, String] limit object or its name
|
|
55
|
+
# @return [OpenStruct] operation result with success field
|
|
56
|
+
def delete(limit_or_str)
|
|
57
|
+
response = wrap_executed_script do |r|
|
|
58
|
+
r.evalsha(@lua_delete, keys: [get_name(limit_or_str)])
|
|
59
|
+
end
|
|
60
|
+
|
|
61
|
+
handle_response(response)
|
|
62
|
+
end
|
|
63
|
+
|
|
64
|
+
# Retrieves information about a limit
|
|
65
|
+
#
|
|
66
|
+
# @param limit_or_str [Limit, String] limit object or its name
|
|
67
|
+
# @param ttl [Integer] key TTL
|
|
68
|
+
# @return [OpenStruct] operation result with success and limit fields
|
|
69
|
+
def info(limit_or_str, ttl: @ttl)
|
|
70
|
+
response = wrap_executed_script do |r|
|
|
71
|
+
r.evalsha(@lua_info, keys: [get_name(limit_or_str)], argv: [ttl])
|
|
72
|
+
end
|
|
73
|
+
|
|
74
|
+
handle_response(response, with_limit: true)
|
|
75
|
+
end
|
|
76
|
+
|
|
77
|
+
# Processes Redis response and converts it to OpenStruct
|
|
78
|
+
#
|
|
79
|
+
# @param response [Array] raw Redis response
|
|
80
|
+
# @param with_limit [Boolean] whether to include limit object in result
|
|
81
|
+
# @return [OpenStruct] structured response
|
|
82
|
+
def handle_response(response, with_limit: false)
|
|
83
|
+
response = response.each_slice(2).to_h
|
|
84
|
+
success = response["result"] == "true"
|
|
85
|
+
result_data = { success: success, **response }
|
|
86
|
+
result_data[:limit] = build_limit(response["info"]) if success && with_limit
|
|
87
|
+
OpenStruct.new(result_data)
|
|
88
|
+
end
|
|
89
|
+
|
|
90
|
+
def get_name(limit_or_str)
|
|
91
|
+
limit_or_str.is_a?(Limit) ? limit_or_str.name : limit_or_str
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
def build_limit(redis_data)
|
|
95
|
+
name = redis_data[0]
|
|
96
|
+
params = redis_data[1].each_slice(2).to_h
|
|
97
|
+
Limit.from_hash(name, **params.symbolize_keys)
|
|
98
|
+
end
|
|
99
|
+
|
|
100
|
+
# Wrapper for Redis script execution with retry logic
|
|
101
|
+
#
|
|
102
|
+
# @param max_retries [Integer] maximum number of retry attempts
|
|
103
|
+
# @param delay [Float] delay between retries in seconds
|
|
104
|
+
# @param block [Proc] block containing Redis operations
|
|
105
|
+
# @return [Array] Redis response or error response
|
|
106
|
+
def wrap_executed_script(max_retries: 5, delay: 0.1, &block)
|
|
107
|
+
retries_count = 0
|
|
108
|
+
begin
|
|
109
|
+
@pool.with do |conn|
|
|
110
|
+
conn.with do |r|
|
|
111
|
+
yield r
|
|
112
|
+
end
|
|
113
|
+
end
|
|
114
|
+
rescue Redis::CannotConnectError, Redis::TimeoutError, Errno::ECONNREFUSED => e
|
|
115
|
+
retries_count += 1
|
|
116
|
+
if retries_count < max_retries
|
|
117
|
+
@logger.warn("Redis connection error: #{e.message}.")
|
|
118
|
+
sleep(delay)
|
|
119
|
+
retry
|
|
120
|
+
else
|
|
121
|
+
@logger.error("Redis is not available: #{e.message}")
|
|
122
|
+
raise e
|
|
123
|
+
end
|
|
124
|
+
rescue ::Redis::CommandError => e
|
|
125
|
+
if e.message.include?('NOSCRIPT')
|
|
126
|
+
retries_count += 1
|
|
127
|
+
if retries_count < max_retries
|
|
128
|
+
@logger.warn("Get not script error from redis: #{e.message}. Reload lua scripts")
|
|
129
|
+
# существует вероятность что сервер мог быть перезагружен
|
|
130
|
+
# и нужно заново загрузить скрипты
|
|
131
|
+
load_redis_scripts
|
|
132
|
+
retry
|
|
133
|
+
end
|
|
134
|
+
end
|
|
135
|
+
raise e
|
|
136
|
+
rescue TypeError => e
|
|
137
|
+
if e.message.include?('Unsupported command argument type: NilClass')
|
|
138
|
+
retries_count += 1
|
|
139
|
+
if retries_count < max_retries
|
|
140
|
+
@logger.info("First time load lua scripts")
|
|
141
|
+
# При первом запуске instance_variable с lua скриптами не инициализированы, при этом
|
|
142
|
+
# evalsha райзит эту ошибку. Загружаем скрипты в redis
|
|
143
|
+
load_redis_scripts
|
|
144
|
+
retry
|
|
145
|
+
end
|
|
146
|
+
end
|
|
147
|
+
raise e
|
|
148
|
+
end
|
|
149
|
+
end
|
|
150
|
+
|
|
151
|
+
private
|
|
152
|
+
|
|
153
|
+
# Loads Lua scripts into Redis
|
|
154
|
+
#
|
|
155
|
+
# @return [void]
|
|
156
|
+
def load_redis_scripts
|
|
157
|
+
@pool.with do |conn|
|
|
158
|
+
(BASE_SCRIPTS + self.class::LUA_SCRIPTS).each do |script|
|
|
159
|
+
instance_variable_set("@lua_#{script}".to_sym,
|
|
160
|
+
conn.with {|r| r.script(:load, File.read(File.join(__dir__, 'lua_scripts', "#{script.to_s}.lua"))) }
|
|
161
|
+
)
|
|
162
|
+
end
|
|
163
|
+
end
|
|
164
|
+
end
|
|
165
|
+
|
|
166
|
+
end
|
|
167
|
+
end
|
|
168
|
+
end
|
|
@@ -0,0 +1,53 @@
|
|
|
1
|
+
module Rapidity
|
|
2
|
+
module Share
|
|
3
|
+
class Limit
|
|
4
|
+
|
|
5
|
+
attr_reader :name, :max_tokens, :tokens, :interval, :max_queue, :semaphore, :last_used, :rate
|
|
6
|
+
|
|
7
|
+
def initialize(name, max_tokens, interval,
|
|
8
|
+
namespace: nil, tokens: nil,
|
|
9
|
+
last_used: nil, rate: nil,
|
|
10
|
+
semaphore: nil, max_queue: nil,
|
|
11
|
+
validate: true, **kwargs)
|
|
12
|
+
@name = namespace.to_s.empty? ? name : [namespace, name].join(':')
|
|
13
|
+
@namespace = namespace.to_s
|
|
14
|
+
@max_tokens = max_tokens.to_i
|
|
15
|
+
@tokens = tokens.to_i
|
|
16
|
+
@interval = interval.to_i
|
|
17
|
+
@semaphore = semaphore.to_i
|
|
18
|
+
@max_queue = max_queue.to_i
|
|
19
|
+
@last_used = last_used.to_i
|
|
20
|
+
@rate = rate.to_i == 0 ? @max_tokens.to_f/@interval.to_f : rate
|
|
21
|
+
|
|
22
|
+
@kwargs = kwargs
|
|
23
|
+
validate_parameters! if validate
|
|
24
|
+
end
|
|
25
|
+
|
|
26
|
+
def self.from_hash(name, **kwargs)
|
|
27
|
+
max_tokens = kwargs.delete(:max_tokens)
|
|
28
|
+
interval = kwargs.delete(:interval)
|
|
29
|
+
self.new(name, max_tokens, interval, **kwargs)
|
|
30
|
+
end
|
|
31
|
+
|
|
32
|
+
def persisted?
|
|
33
|
+
@last_used > 0
|
|
34
|
+
end
|
|
35
|
+
|
|
36
|
+
def valid?
|
|
37
|
+
@max_tokens > 0 && @interval > 0
|
|
38
|
+
end
|
|
39
|
+
|
|
40
|
+
def base_params
|
|
41
|
+
[max_tokens, interval, max_queue]
|
|
42
|
+
end
|
|
43
|
+
|
|
44
|
+
private
|
|
45
|
+
|
|
46
|
+
def validate_parameters!
|
|
47
|
+
raise ArgumentError, "max_tokens must be greater than 0" unless @max_tokens > 0
|
|
48
|
+
raise ArgumentError, "interval must be greater than 0" unless @interval > 0
|
|
49
|
+
end
|
|
50
|
+
|
|
51
|
+
end
|
|
52
|
+
end
|
|
53
|
+
end
|
|
@@ -0,0 +1,175 @@
|
|
|
1
|
+
-------------------------------------------------------------------------------
|
|
2
|
+
-- СПЕЦИФИКАЦИЯ ФАЙЛА
|
|
3
|
+
-------------------------------------------------------------------------------
|
|
4
|
+
-- Этот скрипт отвечает за реализацию алгоритма Token Bucket (маркерная корзина) для Rate Limiter'а,
|
|
5
|
+
-- выполняемую прямо внутри Redis с помощью Lua.
|
|
6
|
+
-- Выполнение внутри Redis гарантирует атомарность операций (никто не сможет изменить
|
|
7
|
+
-- данные между чтением и записью) и высокую производительность.
|
|
8
|
+
--
|
|
9
|
+
-- Описание функционала файла acquire.lua
|
|
10
|
+
-- 1. Атомарный захват по нескольким лимитам ("Всё или ничего"):
|
|
11
|
+
-- Скрипт принимает список ключей (лимитов) и запрошенное количество токенов.
|
|
12
|
+
-- Он проверяет, есть ли нужное количество токенов во всех переданных лимитах одновременно.
|
|
13
|
+
-- 2. Ленивое пополнение (Lazy Refill): Токены не пополняются каким-то фоновым процессом.
|
|
14
|
+
-- Вместо этого при каждом обращении (в функции Limit:update) вычисляется разница во времени
|
|
15
|
+
-- с момента последнего обращения (current_time - self.last_used) и добавляется количество токенов, пропорциональное скорости rate.
|
|
16
|
+
-- 3. Проверка условий (Validation):
|
|
17
|
+
-- - Если запрашивается <= 0 токенов или передан пустой список ключей — сразу возвращается ошибка.
|
|
18
|
+
-- - Если хотя бы одного ключа не существует в Redis, возвращается key_not_found.
|
|
19
|
+
-- - Если хотя бы в одном лимите не хватает токенов, скрипт возвращает ошибку not_limits и ничего не списывает из других лимитов (откат транзакции).
|
|
20
|
+
-- 4. Фиксация состояния: Только если проверены и удовлетворены все лимиты, происходит
|
|
21
|
+
-- фактическое списание токенов (tokens - requested), сохранение новых значений
|
|
22
|
+
-- в Redis (HMSET) и обновление времени жизни ключей (EXPIRE). Возвращается успешный результат.
|
|
23
|
+
-------------------------------------------------------------------------------
|
|
24
|
+
|
|
25
|
+
-- Указываем Redis реплицировать сами эффекты от скрипта (HMSET), а не сам скрипт.
|
|
26
|
+
-- Это необходимо для использования команды TIME внутри скрипта в версиях Redis 3.2 - 5.0.
|
|
27
|
+
redis.replicate_commands()
|
|
28
|
+
|
|
29
|
+
-- Входящие аргументы
|
|
30
|
+
-- KEYS: список ключей (имен лимитов)
|
|
31
|
+
-- ARGV[1]: запрашиваемое количество токенов (requested) !ЧИСЛО!
|
|
32
|
+
-- ARGV[2]: время жизни ключей в секундах (key_ttl)
|
|
33
|
+
local limit_keys = KEYS
|
|
34
|
+
local requested = tonumber(ARGV[1]) or 0
|
|
35
|
+
local key_ttl = tonumber(ARGV[2]) or 0
|
|
36
|
+
|
|
37
|
+
-- Получаем текущее время сервера (в секундах)
|
|
38
|
+
local current_time = redis.call("TIME")[1]
|
|
39
|
+
|
|
40
|
+
-------------------------------------------------------------------------------
|
|
41
|
+
-- ООП обертка для работы с лимитом (Token Bucket)
|
|
42
|
+
-------------------------------------------------------------------------------
|
|
43
|
+
local Limit = {}
|
|
44
|
+
Limit.__index = Limit
|
|
45
|
+
|
|
46
|
+
-- Инициализация объекта Limit из Redis
|
|
47
|
+
function Limit:new(limit_key)
|
|
48
|
+
local obj = { key = limit_key, exists = false }
|
|
49
|
+
setmetatable(obj, self)
|
|
50
|
+
|
|
51
|
+
-- Оптимизация: вместо EXISTS + HMGET делаем только HMGET.
|
|
52
|
+
-- Если ключа нет, values[1] будет nil.
|
|
53
|
+
local values = redis.call("HMGET", limit_key,
|
|
54
|
+
"max_tokens",
|
|
55
|
+
"tokens",
|
|
56
|
+
"interval",
|
|
57
|
+
"last_used",
|
|
58
|
+
"rate"
|
|
59
|
+
)
|
|
60
|
+
|
|
61
|
+
if not values[1] then
|
|
62
|
+
return obj -- Ключ не существует
|
|
63
|
+
end
|
|
64
|
+
|
|
65
|
+
obj.max_tokens = tonumber(values[1]) or 0
|
|
66
|
+
obj.tokens = tonumber(values[2]) or 0
|
|
67
|
+
obj.interval = tonumber(values[3]) or 0
|
|
68
|
+
obj.last_used = tonumber(values[4]) or 0
|
|
69
|
+
obj.rate = tonumber(values[5]) or 0
|
|
70
|
+
|
|
71
|
+
obj.exists = (obj.max_tokens > 0) and (obj.interval > 0)
|
|
72
|
+
|
|
73
|
+
return obj
|
|
74
|
+
end
|
|
75
|
+
|
|
76
|
+
-- "Ленивое" пополнение корзины токенов на основе прошедшего времени
|
|
77
|
+
function Limit:update(current_time)
|
|
78
|
+
if not self.exists then return self end
|
|
79
|
+
|
|
80
|
+
local time_passed = current_time - self.last_used
|
|
81
|
+
if time_passed <= 0 then return self end
|
|
82
|
+
|
|
83
|
+
local tokens_to_add = math.floor(time_passed * self.rate)
|
|
84
|
+
self.tokens = math.min(self.tokens + tokens_to_add, self.max_tokens)
|
|
85
|
+
|
|
86
|
+
if tokens_to_add > 0 then
|
|
87
|
+
local time_consumed = tokens_to_add / self.rate
|
|
88
|
+
self.last_used = math.min(self.last_used + time_consumed, current_time)
|
|
89
|
+
end
|
|
90
|
+
|
|
91
|
+
return self
|
|
92
|
+
end
|
|
93
|
+
|
|
94
|
+
-- Сохранение обновленного стейта лимита в Redis
|
|
95
|
+
function Limit:save()
|
|
96
|
+
if not self.exists then return end
|
|
97
|
+
|
|
98
|
+
redis.call("HMSET", self.key,
|
|
99
|
+
"tokens", self.tokens,
|
|
100
|
+
"last_used", self.last_used
|
|
101
|
+
)
|
|
102
|
+
end
|
|
103
|
+
|
|
104
|
+
-- Проверка, достаточно ли токенов
|
|
105
|
+
-- requested - это количество (ЧИСЛО) запрошенных токенов
|
|
106
|
+
function Limit:can_acquire(requested)
|
|
107
|
+
return self.exists and self.tokens >= requested
|
|
108
|
+
end
|
|
109
|
+
|
|
110
|
+
-- Списание токенов из памяти объекта (без сохранения в БД)
|
|
111
|
+
-- requested - это количество (ЧИСЛО) запрошенных токенов
|
|
112
|
+
function Limit:acquire(requested)
|
|
113
|
+
if not self:can_acquire(requested) then return false end
|
|
114
|
+
self.tokens = self.tokens - requested
|
|
115
|
+
return true
|
|
116
|
+
end
|
|
117
|
+
|
|
118
|
+
-------------------------------------------------------------------------------
|
|
119
|
+
-- Основная логика обработки всех лимитов
|
|
120
|
+
-------------------------------------------------------------------------------
|
|
121
|
+
-- requested - это количество (ЧИСЛО) запрошенных токенов
|
|
122
|
+
local function process_all_limits(limit_keys, requested, current_time)
|
|
123
|
+
-- Базовые валидации
|
|
124
|
+
if requested <= 0 then
|
|
125
|
+
return { "result", "false", "retryable", "false", "error", "limits not requested" }
|
|
126
|
+
end
|
|
127
|
+
|
|
128
|
+
if #limit_keys == 0 then
|
|
129
|
+
return { "result", "false", "retryable", "false", "error", "no keys passed" }
|
|
130
|
+
end
|
|
131
|
+
|
|
132
|
+
local limits = {}
|
|
133
|
+
|
|
134
|
+
-- Фаза 1: Проверка всех лимитов (Read & Validate)
|
|
135
|
+
for i = 1, #limit_keys do
|
|
136
|
+
local key = limit_keys[i]
|
|
137
|
+
local limit = Limit:new(key)
|
|
138
|
+
|
|
139
|
+
if not limit.exists then
|
|
140
|
+
return { "result", "false", "retryable", "false", "error", "key_not_found", "key", key }
|
|
141
|
+
end
|
|
142
|
+
|
|
143
|
+
-- Пересчитываем количество токенов на текущий момент
|
|
144
|
+
limit:update(current_time)
|
|
145
|
+
|
|
146
|
+
-- Если хотя бы в одном лимите не хватает токенов - прерываем операцию для всех
|
|
147
|
+
if not limit:can_acquire(requested) then
|
|
148
|
+
return {
|
|
149
|
+
"result", "false",
|
|
150
|
+
"retryable", "true",
|
|
151
|
+
"error", "not_limits",
|
|
152
|
+
"tokens_available", limit.tokens,
|
|
153
|
+
"key", key
|
|
154
|
+
}
|
|
155
|
+
end
|
|
156
|
+
|
|
157
|
+
table.insert(limits, limit)
|
|
158
|
+
end
|
|
159
|
+
|
|
160
|
+
-- Фаза 2: Применение изменений (Write)
|
|
161
|
+
-- Выполняется только если всем лимитам хватило токенов
|
|
162
|
+
for i = 1, #limits do
|
|
163
|
+
local limit = limits[i]
|
|
164
|
+
limit:acquire(requested)
|
|
165
|
+
limit:save()
|
|
166
|
+
|
|
167
|
+
-- Продлеваем жизнь ключу, чтобы он не удалился, если к нему активно обращаются.
|
|
168
|
+
-- "GT" обновляет TTL только если новый TTL больше текущего.
|
|
169
|
+
redis.call("EXPIRE", limit.key, key_ttl, "GT")
|
|
170
|
+
end
|
|
171
|
+
|
|
172
|
+
return { "result", "true", "keys", limit_keys }
|
|
173
|
+
end
|
|
174
|
+
|
|
175
|
+
return process_all_limits(limit_keys, requested, current_time)
|