kubra-sidekiq-throttled 1.5.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/LICENSE.txt +23 -0
- data/README.adoc +416 -0
- data/lib/sidekiq/throttled/config.rb +66 -0
- data/lib/sidekiq/throttled/cooldown.rb +55 -0
- data/lib/sidekiq/throttled/errors.rb +8 -0
- data/lib/sidekiq/throttled/expirable_set.rb +70 -0
- data/lib/sidekiq/throttled/job.rb +143 -0
- data/lib/sidekiq/throttled/message.rb +32 -0
- data/lib/sidekiq/throttled/middlewares/server.rb +28 -0
- data/lib/sidekiq/throttled/patches/basic_fetch.rb +34 -0
- data/lib/sidekiq/throttled/patches/super_fetch.rb +39 -0
- data/lib/sidekiq/throttled/patches/throttled_retriever.rb +26 -0
- data/lib/sidekiq/throttled/registry.rb +120 -0
- data/lib/sidekiq/throttled/strategy/base.rb +25 -0
- data/lib/sidekiq/throttled/strategy/concurrency.lua +61 -0
- data/lib/sidekiq/throttled/strategy/concurrency.rb +127 -0
- data/lib/sidekiq/throttled/strategy/threshold.lua +14 -0
- data/lib/sidekiq/throttled/strategy/threshold.rb +104 -0
- data/lib/sidekiq/throttled/strategy.rb +213 -0
- data/lib/sidekiq/throttled/strategy_collection.rb +73 -0
- data/lib/sidekiq/throttled/version.rb +8 -0
- data/lib/sidekiq/throttled/web/stats.rb +75 -0
- data/lib/sidekiq/throttled/web/throttled.html.erb +35 -0
- data/lib/sidekiq/throttled/web.rb +43 -0
- data/lib/sidekiq/throttled/worker.rb +13 -0
- data/lib/sidekiq/throttled.rb +116 -0
- metadata +119 -0
@@ -0,0 +1,104 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "redis_prescription"
|
4
|
+
|
5
|
+
require_relative "./base"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
module Throttled
|
9
|
+
class Strategy
|
10
|
+
# Threshold throttling strategy
|
11
|
+
# @todo Use redis TIME command instead of sending current timestamp from
|
12
|
+
# sidekiq manager. See: http://redis.io/commands/time
|
13
|
+
class Threshold
|
14
|
+
include Base
|
15
|
+
|
16
|
+
# LUA script used to limit fetch threshold.
|
17
|
+
# Logic behind the scene can be described in following pseudo code:
|
18
|
+
#
|
19
|
+
# def exceeded?
|
20
|
+
# limit <= LLEN(@key) && NOW - LINDEX(@key, -1) < @period
|
21
|
+
# end
|
22
|
+
#
|
23
|
+
# def increase!
|
24
|
+
# LPUSH(@key, NOW)
|
25
|
+
# LTRIM(@key, 0, @limit - 1)
|
26
|
+
# EXPIRE(@key, @period)
|
27
|
+
# end
|
28
|
+
#
|
29
|
+
# return 1 if exceeded?
|
30
|
+
#
|
31
|
+
# increase!
|
32
|
+
# return 0
|
33
|
+
SCRIPT = RedisPrescription.new(File.read("#{__dir__}/threshold.lua"))
|
34
|
+
private_constant :SCRIPT
|
35
|
+
|
36
|
+
# @param [#to_s] strategy_key
|
37
|
+
# @param [#to_i, #call] limit Amount of allowed concurrent jobs
|
38
|
+
# per period running for given key.
|
39
|
+
# @param [#to_f, #call] :period Period in seconds.
|
40
|
+
# @param [Proc] key_suffix Dynamic key suffix generator.
|
41
|
+
def initialize(strategy_key, limit:, period:, key_suffix: nil)
|
42
|
+
@base_key = "#{strategy_key}:threshold"
|
43
|
+
@limit = limit
|
44
|
+
@period = period
|
45
|
+
@key_suffix = key_suffix
|
46
|
+
end
|
47
|
+
|
48
|
+
# @return [Float] Period in seconds
|
49
|
+
def period(job_args = nil)
|
50
|
+
return @period.to_f unless @period.respond_to? :call
|
51
|
+
|
52
|
+
@period.call(*job_args).to_f
|
53
|
+
end
|
54
|
+
|
55
|
+
# @return [Boolean] Whenever strategy has dynamic config
|
56
|
+
def dynamic?
|
57
|
+
@key_suffix || @limit.respond_to?(:call) || @period.respond_to?(:call)
|
58
|
+
end
|
59
|
+
|
60
|
+
# @return [Boolean] whenever job is throttled or not
|
61
|
+
def throttled?(*job_args)
|
62
|
+
job_limit = limit(job_args)
|
63
|
+
return false unless job_limit
|
64
|
+
return true if job_limit <= 0
|
65
|
+
|
66
|
+
keys = [key(job_args)]
|
67
|
+
argv = [job_limit, period(job_args), Time.now.to_f]
|
68
|
+
|
69
|
+
Sidekiq.redis { |redis| 1 == SCRIPT.call(redis, keys: keys, argv: argv) }
|
70
|
+
end
|
71
|
+
|
72
|
+
# @return [Float] How long, in seconds, before we'll next be able to take on jobs
|
73
|
+
def retry_in(*job_args)
|
74
|
+
job_limit = limit(job_args)
|
75
|
+
return 0.0 if !job_limit || count(*job_args) < job_limit
|
76
|
+
|
77
|
+
job_period = period(job_args)
|
78
|
+
job_key = key(job_args)
|
79
|
+
time_since_oldest = Time.now.to_f - Sidekiq.redis { |redis| redis.lindex(job_key, -1) }.to_f
|
80
|
+
if time_since_oldest > job_period
|
81
|
+
# The oldest job on our list is from more than the throttling period ago,
|
82
|
+
# which means we have not hit the limit this period.
|
83
|
+
0.0
|
84
|
+
else
|
85
|
+
# If we can only have X jobs every Y minutes, then wait until Y minutes have elapsed
|
86
|
+
# since the oldest job on our list.
|
87
|
+
job_period - time_since_oldest
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
# @return [Integer] Current count of jobs
|
92
|
+
def count(*job_args)
|
93
|
+
Sidekiq.redis { |conn| conn.llen(key(job_args)) }.to_i
|
94
|
+
end
|
95
|
+
|
96
|
+
# Resets count of jobs
|
97
|
+
# @return [void]
|
98
|
+
def reset!(*job_args)
|
99
|
+
Sidekiq.redis { |conn| conn.del(key(job_args)) }
|
100
|
+
end
|
101
|
+
end
|
102
|
+
end
|
103
|
+
end
|
104
|
+
end
|
@@ -0,0 +1,213 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# internal
|
4
|
+
require_relative "./errors"
|
5
|
+
require_relative "./strategy_collection"
|
6
|
+
require_relative "./strategy/concurrency"
|
7
|
+
require_relative "./strategy/threshold"
|
8
|
+
|
9
|
+
module Sidekiq
|
10
|
+
module Throttled
|
11
|
+
# Meta-strategy that couples {Concurrency} and {Threshold} strategies.
|
12
|
+
#
|
13
|
+
# @private
|
14
|
+
class Strategy # rubocop:disable Metrics/ClassLength
|
15
|
+
# :enqueue means put the job back at the end of the queue immediately
|
16
|
+
# :schedule means schedule enqueueing the job for a later time when we expect to have capacity
|
17
|
+
VALID_VALUES_FOR_REQUEUE_WITH = %i[enqueue schedule].freeze
|
18
|
+
|
19
|
+
# @!attribute [r] concurrency
|
20
|
+
# @return [Strategy::Concurrency, nil]
|
21
|
+
attr_reader :concurrency
|
22
|
+
|
23
|
+
# @!attribute [r] threshold
|
24
|
+
# @return [Strategy::Threshold, nil]
|
25
|
+
attr_reader :threshold
|
26
|
+
|
27
|
+
# @!attribute [r] observer
|
28
|
+
# @return [Proc, nil]
|
29
|
+
attr_reader :observer
|
30
|
+
|
31
|
+
# @!attribute [r] requeue_options
|
32
|
+
# @return [Hash, nil]
|
33
|
+
attr_reader :requeue_options
|
34
|
+
|
35
|
+
# @param [#to_s] name
|
36
|
+
# @param [Hash] concurrency Concurrency options.
|
37
|
+
# See keyword args of {Strategy::Concurrency#initialize} for details.
|
38
|
+
# @param [Hash] threshold Threshold options.
|
39
|
+
# See keyword args of {Strategy::Threshold#initialize} for details.
|
40
|
+
# @param [#call] key_suffix Dynamic key suffix generator.
|
41
|
+
# @param [#call] observer Process called after throttled.
|
42
|
+
# @param [#call] requeue What to do with jobs that are throttled.
|
43
|
+
def initialize(name, concurrency: nil, threshold: nil, key_suffix: nil, observer: nil, requeue: nil) # rubocop:disable Metrics/MethodLength, Metrics/ParameterLists
|
44
|
+
@observer = observer
|
45
|
+
|
46
|
+
@concurrency = StrategyCollection.new(concurrency,
|
47
|
+
strategy: Concurrency,
|
48
|
+
name: name,
|
49
|
+
key_suffix: key_suffix)
|
50
|
+
|
51
|
+
@threshold = StrategyCollection.new(threshold,
|
52
|
+
strategy: Threshold,
|
53
|
+
name: name,
|
54
|
+
key_suffix: key_suffix)
|
55
|
+
|
56
|
+
@requeue_options = Throttled.config.default_requeue_options.merge(requeue || {})
|
57
|
+
|
58
|
+
validate!
|
59
|
+
end
|
60
|
+
|
61
|
+
# @return [Boolean] whenever strategy has dynamic config
|
62
|
+
def dynamic?
|
63
|
+
return true if @concurrency&.dynamic?
|
64
|
+
return true if @threshold&.dynamic?
|
65
|
+
|
66
|
+
false
|
67
|
+
end
|
68
|
+
|
69
|
+
# @return [Boolean] whenever job is throttled or not.
|
70
|
+
def throttled?(jid, *job_args)
|
71
|
+
if @concurrency&.throttled?(jid, *job_args)
|
72
|
+
@observer&.call(:concurrency, *job_args)
|
73
|
+
return true
|
74
|
+
end
|
75
|
+
|
76
|
+
if @threshold&.throttled?(*job_args)
|
77
|
+
@observer&.call(:threshold, *job_args)
|
78
|
+
|
79
|
+
finalize!(jid, *job_args)
|
80
|
+
return true
|
81
|
+
end
|
82
|
+
|
83
|
+
false
|
84
|
+
end
|
85
|
+
|
86
|
+
# @return [Proc, Symbol] How to requeue the throttled job
|
87
|
+
def requeue_with
|
88
|
+
requeue_options[:with]
|
89
|
+
end
|
90
|
+
|
91
|
+
# @return [String, nil] Name of the queue to re-queue the job to.
|
92
|
+
def requeue_to
|
93
|
+
requeue_options[:to]
|
94
|
+
end
|
95
|
+
|
96
|
+
# Return throttled job to be executed later. Implementation depends on the strategy's `requeue` options.
|
97
|
+
# @return [void]
|
98
|
+
def requeue_throttled(work) # rubocop:disable Metrics/MethodLength
|
99
|
+
# Resolve :with and :to options, calling them if they are Procs
|
100
|
+
job_args = JSON.parse(work.job)["args"]
|
101
|
+
with = requeue_with.respond_to?(:call) ? requeue_with.call(*job_args) : requeue_with
|
102
|
+
target_queue = calc_target_queue(work)
|
103
|
+
|
104
|
+
case with
|
105
|
+
when :enqueue
|
106
|
+
re_enqueue_throttled(work, target_queue)
|
107
|
+
when :schedule
|
108
|
+
# Find out when we will next be able to execute this job, and reschedule for then.
|
109
|
+
reschedule_throttled(work, target_queue)
|
110
|
+
else
|
111
|
+
raise "unrecognized :with option #{with}"
|
112
|
+
end
|
113
|
+
end
|
114
|
+
|
115
|
+
# Marks job as being processed.
|
116
|
+
# @return [void]
|
117
|
+
def finalize!(jid, *job_args)
|
118
|
+
@concurrency&.finalize!(jid, *job_args)
|
119
|
+
end
|
120
|
+
|
121
|
+
# Resets count of jobs of all available strategies
|
122
|
+
# @return [void]
|
123
|
+
def reset!
|
124
|
+
@concurrency&.reset!
|
125
|
+
@threshold&.reset!
|
126
|
+
end
|
127
|
+
|
128
|
+
private
|
129
|
+
|
130
|
+
def validate!
|
131
|
+
unless VALID_VALUES_FOR_REQUEUE_WITH.include?(@requeue_options[:with]) ||
|
132
|
+
@requeue_options[:with].respond_to?(:call)
|
133
|
+
raise ArgumentError, "requeue: #{@requeue_options[:with]} is not a valid value for :with"
|
134
|
+
end
|
135
|
+
|
136
|
+
raise ArgumentError, "Neither :concurrency nor :threshold given" unless @concurrency.any? || @threshold.any?
|
137
|
+
end
|
138
|
+
|
139
|
+
def calc_target_queue(work) # rubocop:disable Metrics/MethodLength
|
140
|
+
target = case requeue_to
|
141
|
+
when Proc, Method
|
142
|
+
requeue_to.call(*JSON.parse(work.job)["args"])
|
143
|
+
when NilClass
|
144
|
+
work.queue
|
145
|
+
when String, Symbol
|
146
|
+
requeue_to
|
147
|
+
else
|
148
|
+
raise ArgumentError, "Invalid argument for `to`"
|
149
|
+
end
|
150
|
+
|
151
|
+
target = work.queue if target.nil? || target.empty?
|
152
|
+
|
153
|
+
target.to_s
|
154
|
+
end
|
155
|
+
|
156
|
+
# Push the job back to the head of the queue.
|
157
|
+
# The queue name is expected to include the "queue:" prefix, so we add it if it's missing.
|
158
|
+
def re_enqueue_throttled(work, target_queue)
|
159
|
+
target_queue = "queue:#{target_queue}" unless target_queue.start_with?("queue:")
|
160
|
+
|
161
|
+
case work.class.name
|
162
|
+
when "Sidekiq::Pro::SuperFetch::UnitOfWork"
|
163
|
+
# Calls SuperFetch UnitOfWork's requeue to remove the job from the
|
164
|
+
# temporary queue and push job back to the head of the target queue, so that
|
165
|
+
# the job won't be tried immediately after it was requeued (in most cases).
|
166
|
+
work.queue = target_queue
|
167
|
+
work.requeue
|
168
|
+
else
|
169
|
+
# This is the same operation Sidekiq performs upon `Sidekiq::Worker.perform_async` call.
|
170
|
+
Sidekiq.redis { |conn| conn.lpush(target_queue, work.job) }
|
171
|
+
end
|
172
|
+
end
|
173
|
+
|
174
|
+
# Reschedule the job to be executed later in the target queue.
|
175
|
+
# The queue name should NOT include the "queue:" prefix, so we remove it if it's present.
|
176
|
+
def reschedule_throttled(work, target_queue)
|
177
|
+
target_queue = target_queue.delete_prefix("queue:")
|
178
|
+
message = JSON.parse(work.job)
|
179
|
+
job_class = message.fetch("wrapped") { message.fetch("class") { return false } }
|
180
|
+
job_args = message["args"]
|
181
|
+
|
182
|
+
# Re-enqueue the job to the target queue at another time as a NEW unit of work
|
183
|
+
# AND THEN mark this work as done, so SuperFetch doesn't think this instance is orphaned
|
184
|
+
# Technically, the job could processed twice if the process dies between the two lines,
|
185
|
+
# but your job should be idempotent anyway, right?
|
186
|
+
# The job running twice was already a risk with SuperFetch anyway and this doesn't really increase that risk.
|
187
|
+
Sidekiq::Client.enqueue_to_in(target_queue, retry_in(work), Object.const_get(job_class), *job_args)
|
188
|
+
|
189
|
+
work.acknowledge
|
190
|
+
end
|
191
|
+
|
192
|
+
def retry_in(work)
|
193
|
+
message = JSON.parse(work.job)
|
194
|
+
jid = message.fetch("jid") { return false }
|
195
|
+
job_args = message["args"]
|
196
|
+
|
197
|
+
# Ask both concurrency and threshold, if relevant, how long minimum until we can retry.
|
198
|
+
# If we get two answers, take the longer one.
|
199
|
+
intervals = [@concurrency&.retry_in(jid, *job_args), @threshold&.retry_in(*job_args)].compact
|
200
|
+
|
201
|
+
raise "Cannot compute a valid retry interval" if intervals.empty?
|
202
|
+
|
203
|
+
interval = intervals.max
|
204
|
+
|
205
|
+
# Add a random amount of jitter, proportional to the length of the minimum retry time.
|
206
|
+
# This helps spread out jobs more evenly and avoid clumps of jobs on the queue.
|
207
|
+
interval += rand(interval / 5) if interval > 10
|
208
|
+
|
209
|
+
interval
|
210
|
+
end
|
211
|
+
end
|
212
|
+
end
|
213
|
+
end
|
@@ -0,0 +1,73 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# internal
|
4
|
+
module Sidekiq
|
5
|
+
module Throttled
|
6
|
+
# Collection which transparently group several meta-strategies of one kind
|
7
|
+
#
|
8
|
+
# @private
|
9
|
+
class StrategyCollection
|
10
|
+
include Enumerable
|
11
|
+
|
12
|
+
attr_reader :strategies
|
13
|
+
|
14
|
+
# @param [Hash, Array, nil] strategies Concurrency or Threshold options
|
15
|
+
# or array of options.
|
16
|
+
# See keyword args of {Strategy::Concurrency#initialize} for details.
|
17
|
+
# See keyword args of {Strategy::Threshold#initialize} for details.
|
18
|
+
# @param [Class] strategy class of strategy: Concurrency or Threshold
|
19
|
+
# @param [#to_s] name
|
20
|
+
# @param [#call] key_suffix Dynamic key suffix generator.
|
21
|
+
def initialize(strategies, strategy:, name:, key_suffix:)
|
22
|
+
@strategies = (strategies.is_a?(Hash) ? [strategies] : Array(strategies)).map do |options|
|
23
|
+
make_strategy(strategy, name, key_suffix, options)
|
24
|
+
end
|
25
|
+
end
|
26
|
+
|
27
|
+
# @param [#call] block
|
28
|
+
# Iterates each strategy in collection
|
29
|
+
def each(...)
|
30
|
+
@strategies.each(...)
|
31
|
+
end
|
32
|
+
|
33
|
+
# @return [Boolean] whenever any strategy in collection has dynamic config
|
34
|
+
def dynamic?
|
35
|
+
any?(&:dynamic?)
|
36
|
+
end
|
37
|
+
|
38
|
+
# @return [Boolean] whenever job is throttled or not
|
39
|
+
# by any strategy in collection.
|
40
|
+
def throttled?(...)
|
41
|
+
any? { |s| s.throttled?(...) }
|
42
|
+
end
|
43
|
+
|
44
|
+
# @return [Float] How long, in seconds, before we'll next be able to take on jobs
|
45
|
+
def retry_in(*args)
|
46
|
+
map { |s| s.retry_in(*args) }.max
|
47
|
+
end
|
48
|
+
|
49
|
+
# Marks job as being processed.
|
50
|
+
# @return [void]
|
51
|
+
def finalize!(...)
|
52
|
+
each { |c| c.finalize!(...) }
|
53
|
+
end
|
54
|
+
|
55
|
+
# Resets count of jobs of all avaliable strategies
|
56
|
+
# @return [void]
|
57
|
+
def reset!
|
58
|
+
each(&:reset!)
|
59
|
+
end
|
60
|
+
|
61
|
+
private
|
62
|
+
|
63
|
+
# @return [Base, nil]
|
64
|
+
def make_strategy(strategy, name, key_suffix, options)
|
65
|
+
return unless options
|
66
|
+
|
67
|
+
strategy.new("throttled:#{name}",
|
68
|
+
key_suffix: key_suffix,
|
69
|
+
**options)
|
70
|
+
end
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
@@ -0,0 +1,75 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Throttled
|
5
|
+
module Web
|
6
|
+
# Throttle strategy stats generation helper
|
7
|
+
class Stats
|
8
|
+
TIME_CONVERSION = [
|
9
|
+
[60 * 60 * 24, "day", "days"],
|
10
|
+
[60 * 60, "hour", "hours"],
|
11
|
+
[60, "minute", "minutes"],
|
12
|
+
[1, "second", "seconds"]
|
13
|
+
].freeze
|
14
|
+
|
15
|
+
# @param [Strategy::Concurrency, Strategy::Threshold] strategy
|
16
|
+
def initialize(strategy)
|
17
|
+
raise ArgumentError, "Can't handle dynamic strategies" if strategy&.dynamic?
|
18
|
+
|
19
|
+
@strategy = strategy
|
20
|
+
end
|
21
|
+
|
22
|
+
# @return [String]
|
23
|
+
def to_html
|
24
|
+
return "" unless @strategy
|
25
|
+
|
26
|
+
html = humanize_integer(@strategy.limit) << " jobs"
|
27
|
+
|
28
|
+
html << " per " << humanize_duration(@strategy.period) if @strategy.respond_to?(:period)
|
29
|
+
|
30
|
+
html << "<br />" << colorize_count(@strategy.count, @strategy.limit)
|
31
|
+
end
|
32
|
+
|
33
|
+
private
|
34
|
+
|
35
|
+
# @return [String]
|
36
|
+
def colorize_count(int, max)
|
37
|
+
percentile = 100.00 * int / max
|
38
|
+
lvl = if 80 <= percentile then "danger"
|
39
|
+
elsif 60 <= percentile then "warning"
|
40
|
+
else
|
41
|
+
"success"
|
42
|
+
end
|
43
|
+
|
44
|
+
%(<span class="label label-#{lvl}">#{int}</span>)
|
45
|
+
end
|
46
|
+
|
47
|
+
# @return [String]
|
48
|
+
def humanize_duration(int)
|
49
|
+
arr = []
|
50
|
+
|
51
|
+
TIME_CONVERSION.each do |(dimension, unit, units)|
|
52
|
+
count = (int / dimension).to_i
|
53
|
+
|
54
|
+
next unless count.positive?
|
55
|
+
|
56
|
+
int -= count * dimension
|
57
|
+
arr << "#{count} #{1 == count ? unit : units}"
|
58
|
+
end
|
59
|
+
|
60
|
+
arr.join " "
|
61
|
+
end
|
62
|
+
|
63
|
+
# @return [String]
|
64
|
+
def humanize_integer(int)
|
65
|
+
digits = int.to_s.chars
|
66
|
+
str = digits.shift(digits.count % 3).join
|
67
|
+
|
68
|
+
str << " " << digits.shift(3).join while digits.count.positive?
|
69
|
+
|
70
|
+
str.strip
|
71
|
+
end
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
@@ -0,0 +1,35 @@
|
|
1
|
+
<h3>Throttled</h3>
|
2
|
+
|
3
|
+
<div class="table_container">
|
4
|
+
<table class="table table-hover table-bordered table-striped table-white">
|
5
|
+
<thead>
|
6
|
+
<tr>
|
7
|
+
<th>Name</th>
|
8
|
+
<th style="text-align:center;">Concurrency</th>
|
9
|
+
<th style="text-align:center;">Threshold</th>
|
10
|
+
<th style="text-align:center;">Actions</th>
|
11
|
+
</tr>
|
12
|
+
</thead>
|
13
|
+
<% Sidekiq::Throttled::Registry.each_with_static_keys do |name, strategy| %>
|
14
|
+
<tr>
|
15
|
+
<td style="vertical-align:middle;"><%= name %></td>
|
16
|
+
<td style="vertical-align:middle;text-align:center;">
|
17
|
+
<% strategy.concurrency.each do |concurrency| %>
|
18
|
+
<%= Sidekiq::Throttled::Web::Stats.new(concurrency).to_html %>
|
19
|
+
<% end %>
|
20
|
+
</td>
|
21
|
+
<td style="vertical-align:middle;text-align:center;">
|
22
|
+
<% strategy.threshold.each do |threshold| %>
|
23
|
+
<%= Sidekiq::Throttled::Web::Stats.new(threshold).to_html %>
|
24
|
+
<% end %>
|
25
|
+
</td>
|
26
|
+
<td style="vertical-align:middle;text-align:center;">
|
27
|
+
<form action="<%= root_path %>throttled/<%= CGI.escape name %>/reset" method="post">
|
28
|
+
<%= csrf_tag %>
|
29
|
+
<button class="btn btn-danger" type="submit">Reset</button>
|
30
|
+
</form>
|
31
|
+
</td>
|
32
|
+
</tr>
|
33
|
+
<% end %>
|
34
|
+
</table>
|
35
|
+
</div>
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
# stdlib
|
4
|
+
require "pathname"
|
5
|
+
|
6
|
+
# 3rd party
|
7
|
+
require "sidekiq"
|
8
|
+
require "sidekiq/web"
|
9
|
+
|
10
|
+
# internal
|
11
|
+
require_relative "./registry"
|
12
|
+
require_relative "./web/stats"
|
13
|
+
|
14
|
+
module Sidekiq
|
15
|
+
module Throttled
|
16
|
+
# Provides Sidekiq tab to monitor and reset throttled stats.
|
17
|
+
module Web
|
18
|
+
VIEWS = Pathname.new(__dir__).join("web")
|
19
|
+
THROTTLED_TPL = VIEWS.join("throttled.html.erb").read.freeze
|
20
|
+
|
21
|
+
class << self
|
22
|
+
# @api private
|
23
|
+
def registered(app)
|
24
|
+
register_throttled_tab app
|
25
|
+
end
|
26
|
+
|
27
|
+
private
|
28
|
+
|
29
|
+
def register_throttled_tab(app)
|
30
|
+
app.get("/throttled") { erb THROTTLED_TPL.dup }
|
31
|
+
|
32
|
+
app.post("/throttled/:id/reset") do
|
33
|
+
Registry.get(params[:id], &:reset!)
|
34
|
+
redirect "#{root_path}throttled"
|
35
|
+
end
|
36
|
+
end
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
|
42
|
+
Sidekiq::Web.register Sidekiq::Throttled::Web
|
43
|
+
Sidekiq::Web.tabs["Throttled"] = "throttled"
|
@@ -0,0 +1,13 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative "./job"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module Throttled
|
7
|
+
# A new module, Sidekiq::Job, was added in Sidekiq version 6.3.0 as a
|
8
|
+
# simple alias for Sidekiq::Worker as the term "worker" was considered
|
9
|
+
# too generic and confusing. Many people call a Sidekiq process a "worker"
|
10
|
+
# whereas others call the thread that executes jobs a "worker".
|
11
|
+
Worker = Job
|
12
|
+
end
|
13
|
+
end
|
@@ -0,0 +1,116 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
|
5
|
+
require_relative "./throttled/config"
|
6
|
+
require_relative "./throttled/cooldown"
|
7
|
+
require_relative "./throttled/job"
|
8
|
+
require_relative "./throttled/message"
|
9
|
+
require_relative "./throttled/middlewares/server"
|
10
|
+
require_relative "./throttled/patches/basic_fetch"
|
11
|
+
require_relative "./throttled/patches/super_fetch"
|
12
|
+
require_relative "./throttled/registry"
|
13
|
+
require_relative "./throttled/version"
|
14
|
+
require_relative "./throttled/worker"
|
15
|
+
|
16
|
+
# @see https://github.com/mperham/sidekiq/
|
17
|
+
module Sidekiq
|
18
|
+
# Concurrency and threshold throttling for Sidekiq.
|
19
|
+
#
|
20
|
+
# Just add somewhere in your bootstrap:
|
21
|
+
#
|
22
|
+
# require "sidekiq/throttled"
|
23
|
+
#
|
24
|
+
# Once you've done that you can include {Sidekiq::Throttled::Job} to your
|
25
|
+
# job classes and configure throttling:
|
26
|
+
#
|
27
|
+
# class MyJob
|
28
|
+
# include Sidekiq::Job
|
29
|
+
# include Sidekiq::Throttled::Job
|
30
|
+
#
|
31
|
+
# sidekiq_options :queue => :my_queue
|
32
|
+
#
|
33
|
+
# sidekiq_throttle({
|
34
|
+
# # Allow maximum 10 concurrent jobs of this class at a time.
|
35
|
+
# :concurrency => { :limit => 10 },
|
36
|
+
# # Allow maximum 1K jobs being processed within one hour window.
|
37
|
+
# :threshold => { :limit => 1_000, :period => 1.hour }
|
38
|
+
# })
|
39
|
+
#
|
40
|
+
# def perform
|
41
|
+
# # ...
|
42
|
+
# end
|
43
|
+
# end
|
44
|
+
module Throttled
|
45
|
+
MUTEX = Mutex.new
|
46
|
+
private_constant :MUTEX
|
47
|
+
|
48
|
+
@config = Config.new.freeze
|
49
|
+
@cooldown = Cooldown[@config]
|
50
|
+
|
51
|
+
class << self
|
52
|
+
# @api internal
|
53
|
+
#
|
54
|
+
# @return [Cooldown, nil]
|
55
|
+
attr_reader :cooldown
|
56
|
+
|
57
|
+
# @api internal
|
58
|
+
#
|
59
|
+
# @return [Config, nil]
|
60
|
+
attr_reader :config
|
61
|
+
|
62
|
+
# @example
|
63
|
+
# Sidekiq::Throttled.configure do |config|
|
64
|
+
# config.cooldown_period = nil # Disable queues cooldown manager
|
65
|
+
# end
|
66
|
+
#
|
67
|
+
# @yieldparam config [Config]
|
68
|
+
def configure
|
69
|
+
MUTEX.synchronize do
|
70
|
+
config = @config.dup
|
71
|
+
|
72
|
+
yield config
|
73
|
+
|
74
|
+
@config = config.freeze
|
75
|
+
@cooldown = Cooldown[@config]
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
# Tells whenever job is throttled or not.
|
80
|
+
#
|
81
|
+
# @param [String] message Job's JSON payload
|
82
|
+
# @return [Boolean]
|
83
|
+
def throttled?(message)
|
84
|
+
message = Message.new(message)
|
85
|
+
return false unless message.job_class && message.job_id
|
86
|
+
|
87
|
+
Registry.get(message.job_class) do |strategy|
|
88
|
+
return strategy.throttled?(message.job_id, *message.job_args)
|
89
|
+
end
|
90
|
+
|
91
|
+
false
|
92
|
+
rescue StandardError
|
93
|
+
false
|
94
|
+
end
|
95
|
+
|
96
|
+
# Return throttled job to be executed later, delegating the details of how to do that
|
97
|
+
# to the Strategy for that job.
|
98
|
+
#
|
99
|
+
# @return [void]
|
100
|
+
def requeue_throttled(work)
|
101
|
+
message = JSON.parse(work.job)
|
102
|
+
job_class = Object.const_get(message.fetch("wrapped") { message.fetch("class") { return false } })
|
103
|
+
|
104
|
+
Registry.get job_class do |strategy|
|
105
|
+
strategy.requeue_throttled(work)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
|
111
|
+
configure_server do |config|
|
112
|
+
config.server_middleware do |chain|
|
113
|
+
chain.add(Sidekiq::Throttled::Middlewares::Server)
|
114
|
+
end
|
115
|
+
end
|
116
|
+
end
|