sidekiq 6.4.0 → 6.5.12
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/Changes.md +119 -1
- data/README.md +6 -1
- data/bin/sidekiq +3 -3
- data/bin/sidekiqload +70 -66
- data/bin/sidekiqmon +1 -1
- data/lib/sidekiq/api.rb +255 -100
- data/lib/sidekiq/cli.rb +60 -38
- data/lib/sidekiq/client.rb +44 -30
- data/lib/sidekiq/component.rb +65 -0
- data/lib/sidekiq/delay.rb +2 -2
- data/lib/sidekiq/extensions/action_mailer.rb +2 -2
- data/lib/sidekiq/extensions/active_record.rb +2 -2
- data/lib/sidekiq/extensions/class_methods.rb +2 -2
- data/lib/sidekiq/extensions/generic_proxy.rb +3 -3
- data/lib/sidekiq/fetch.rb +20 -18
- data/lib/sidekiq/job_logger.rb +15 -27
- data/lib/sidekiq/job_retry.rb +73 -52
- data/lib/sidekiq/job_util.rb +15 -9
- data/lib/sidekiq/launcher.rb +58 -54
- data/lib/sidekiq/logger.rb +8 -18
- data/lib/sidekiq/manager.rb +28 -25
- data/lib/sidekiq/metrics/deploy.rb +47 -0
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +94 -0
- data/lib/sidekiq/metrics/tracking.rb +134 -0
- data/lib/sidekiq/middleware/chain.rb +82 -38
- data/lib/sidekiq/middleware/current_attributes.rb +18 -12
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +2 -2
- data/lib/sidekiq/paginator.rb +17 -9
- data/lib/sidekiq/processor.rb +47 -41
- data/lib/sidekiq/rails.rb +19 -13
- data/lib/sidekiq/redis_client_adapter.rb +154 -0
- data/lib/sidekiq/redis_connection.rb +80 -49
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +53 -24
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +37 -36
- data/lib/sidekiq/transaction_aware_client.rb +45 -0
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +21 -5
- data/lib/sidekiq/web/csrf_protection.rb +2 -2
- data/lib/sidekiq/web/helpers.rb +21 -8
- data/lib/sidekiq/web.rb +8 -4
- data/lib/sidekiq/worker.rb +26 -20
- data/lib/sidekiq.rb +107 -31
- data/sidekiq.gemspec +2 -2
- data/web/assets/javascripts/application.js +59 -26
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard.js +0 -17
- data/web/assets/javascripts/graph.js +16 -0
- data/web/assets/javascripts/metrics.js +262 -0
- data/web/assets/stylesheets/application.css +45 -1
- data/web/locales/el.yml +43 -19
- data/web/locales/en.yml +7 -0
- data/web/locales/ja.yml +7 -0
- data/web/locales/pt-br.yml +27 -9
- data/web/locales/zh-cn.yml +36 -11
- data/web/locales/zh-tw.yml +32 -7
- data/web/views/_nav.erb +1 -1
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +9 -4
- data/web/views/dashboard.erb +1 -0
- data/web/views/metrics.erb +69 -0
- data/web/views/metrics_for_job.erb +87 -0
- data/web/views/queue.erb +5 -1
- metadata +34 -9
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/util.rb +0 -108
data/lib/sidekiq/job_logger.rb
CHANGED
@@ -12,46 +12,34 @@ module Sidekiq
|
|
12
12
|
|
13
13
|
yield
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
end
|
15
|
+
Sidekiq::Context.add(:elapsed, elapsed(start))
|
16
|
+
@logger.info("done")
|
18
17
|
rescue Exception
|
19
|
-
|
20
|
-
|
21
|
-
end
|
18
|
+
Sidekiq::Context.add(:elapsed, elapsed(start))
|
19
|
+
@logger.info("fail")
|
22
20
|
|
23
21
|
raise
|
24
22
|
end
|
25
23
|
|
26
24
|
def prepare(job_hash, &block)
|
27
|
-
level = job_hash["log_level"]
|
28
|
-
if level
|
29
|
-
@logger.log_at(level) do
|
30
|
-
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
31
|
-
end
|
32
|
-
else
|
33
|
-
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
34
|
-
end
|
35
|
-
end
|
36
|
-
|
37
|
-
def job_hash_context(job_hash)
|
38
25
|
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
39
26
|
# attribute to expose the underlying thing.
|
40
27
|
h = {
|
41
28
|
class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
|
42
29
|
jid: job_hash["jid"]
|
43
30
|
}
|
44
|
-
h[:bid] = job_hash["bid"] if job_hash
|
45
|
-
h[:tags] = job_hash["tags"] if job_hash
|
46
|
-
h
|
47
|
-
end
|
48
|
-
|
49
|
-
def with_elapsed_time_context(start, &block)
|
50
|
-
Sidekiq::Context.with(elapsed_time_context(start), &block)
|
51
|
-
end
|
31
|
+
h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
|
32
|
+
h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
|
52
33
|
|
53
|
-
|
54
|
-
|
34
|
+
Thread.current[:sidekiq_context] = h
|
35
|
+
level = job_hash["log_level"]
|
36
|
+
if level
|
37
|
+
@logger.log_at(level, &block)
|
38
|
+
else
|
39
|
+
yield
|
40
|
+
end
|
41
|
+
ensure
|
42
|
+
Thread.current[:sidekiq_context] = nil
|
55
43
|
end
|
56
44
|
|
57
45
|
private
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,10 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq/scheduled"
|
4
|
-
require "sidekiq/api"
|
5
|
-
|
6
3
|
require "zlib"
|
7
4
|
require "base64"
|
5
|
+
require "sidekiq/component"
|
8
6
|
|
9
7
|
module Sidekiq
|
10
8
|
##
|
@@ -25,11 +23,11 @@ module Sidekiq
|
|
25
23
|
#
|
26
24
|
# A job looks like:
|
27
25
|
#
|
28
|
-
# { 'class' => '
|
26
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
|
29
27
|
#
|
30
28
|
# The 'retry' option also accepts a number (in place of 'true'):
|
31
29
|
#
|
32
|
-
# { 'class' => '
|
30
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
33
31
|
#
|
34
32
|
# The job will be retried this number of times before giving up. (If simply
|
35
33
|
# 'true', Sidekiq retries 25 times)
|
@@ -53,11 +51,11 @@ module Sidekiq
|
|
53
51
|
#
|
54
52
|
# Sidekiq.options[:max_retries] = 7
|
55
53
|
#
|
56
|
-
# or limit the number of retries for a particular
|
54
|
+
# or limit the number of retries for a particular job and send retries to
|
57
55
|
# a low priority queue with:
|
58
56
|
#
|
59
|
-
# class
|
60
|
-
# include Sidekiq::
|
57
|
+
# class MyJob
|
58
|
+
# include Sidekiq::Job
|
61
59
|
# sidekiq_options retry: 10, retry_queue: 'low'
|
62
60
|
# end
|
63
61
|
#
|
@@ -66,17 +64,18 @@ module Sidekiq
|
|
66
64
|
|
67
65
|
class Skip < Handled; end
|
68
66
|
|
69
|
-
include Sidekiq::
|
67
|
+
include Sidekiq::Component
|
70
68
|
|
71
69
|
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
72
70
|
|
73
|
-
def initialize(options
|
74
|
-
@
|
71
|
+
def initialize(options)
|
72
|
+
@config = options
|
73
|
+
@max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
|
75
74
|
end
|
76
75
|
|
77
76
|
# The global retry handler requires only the barest of data.
|
78
77
|
# We want to be able to retry as much as possible so we don't
|
79
|
-
# require the
|
78
|
+
# require the job to be instantiated.
|
80
79
|
def global(jobstr, queue)
|
81
80
|
yield
|
82
81
|
rescue Handled => ex
|
@@ -90,7 +89,7 @@ module Sidekiq
|
|
90
89
|
|
91
90
|
msg = Sidekiq.load_json(jobstr)
|
92
91
|
if msg["retry"]
|
93
|
-
|
92
|
+
process_retry(nil, msg, queue, e)
|
94
93
|
else
|
95
94
|
Sidekiq.death_handlers.each do |handler|
|
96
95
|
handler.call(msg, e)
|
@@ -103,14 +102,14 @@ module Sidekiq
|
|
103
102
|
end
|
104
103
|
|
105
104
|
# The local retry support means that any errors that occur within
|
106
|
-
# this block can be associated with the given
|
105
|
+
# this block can be associated with the given job instance.
|
107
106
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
108
107
|
#
|
109
108
|
# Note that any exception from the block is wrapped in the Skip
|
110
109
|
# exception so the global block does not reprocess the error. The
|
111
110
|
# Skip exception is unwrapped within Sidekiq::Processor#process before
|
112
111
|
# calling the handle_exception handlers.
|
113
|
-
def local(
|
112
|
+
def local(jobinst, jobstr, queue)
|
114
113
|
yield
|
115
114
|
rescue Handled => ex
|
116
115
|
raise ex
|
@@ -123,11 +122,11 @@ module Sidekiq
|
|
123
122
|
|
124
123
|
msg = Sidekiq.load_json(jobstr)
|
125
124
|
if msg["retry"].nil?
|
126
|
-
msg["retry"] =
|
125
|
+
msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
|
127
126
|
end
|
128
127
|
|
129
128
|
raise e unless msg["retry"]
|
130
|
-
|
129
|
+
process_retry(jobinst, msg, queue, e)
|
131
130
|
# We've handled this error associated with this job, don't
|
132
131
|
# need to handle it at the global level
|
133
132
|
raise Skip
|
@@ -135,10 +134,10 @@ module Sidekiq
|
|
135
134
|
|
136
135
|
private
|
137
136
|
|
138
|
-
# Note that +
|
139
|
-
# instantiate the
|
137
|
+
# Note that +jobinst+ can be nil here if an error is raised before we can
|
138
|
+
# instantiate the job instance. All access must be guarded and
|
140
139
|
# best effort.
|
141
|
-
def
|
140
|
+
def process_retry(jobinst, msg, queue, exception)
|
142
141
|
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
143
142
|
|
144
143
|
msg["queue"] = (msg["retry_queue"] || queue)
|
@@ -169,24 +168,54 @@ module Sidekiq
|
|
169
168
|
msg["error_backtrace"] = compress_backtrace(lines)
|
170
169
|
end
|
171
170
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
171
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
172
|
+
return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
|
173
|
+
|
174
|
+
strategy, delay = delay_for(jobinst, count, exception)
|
175
|
+
case strategy
|
176
|
+
when :discard
|
177
|
+
return # poof!
|
178
|
+
when :kill
|
179
|
+
return retries_exhausted(jobinst, msg, exception)
|
180
|
+
end
|
181
|
+
|
182
|
+
# Logging here can break retries if the logging device raises ENOSPC #3979
|
183
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
184
|
+
jitter = rand(10) * (count + 1)
|
185
|
+
retry_at = Time.now.to_f + delay + jitter
|
186
|
+
payload = Sidekiq.dump_json(msg)
|
187
|
+
redis do |conn|
|
188
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
# returns (strategy, seconds)
|
193
|
+
def delay_for(jobinst, count, exception)
|
194
|
+
rv = begin
|
195
|
+
# sidekiq_retry_in can return two different things:
|
196
|
+
# 1. When to retry next, as an integer of seconds
|
197
|
+
# 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
|
198
|
+
jobinst&.sidekiq_retry_in_block&.call(count, exception)
|
199
|
+
rescue Exception => e
|
200
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
201
|
+
nil
|
202
|
+
end
|
203
|
+
|
204
|
+
delay = (count**4) + 15
|
205
|
+
if Integer === rv && rv > 0
|
206
|
+
delay = rv
|
207
|
+
elsif rv == :discard
|
208
|
+
return [:discard, nil] # do nothing, job goes poof
|
209
|
+
elsif rv == :kill
|
210
|
+
return [:kill, nil]
|
184
211
|
end
|
212
|
+
|
213
|
+
[:default, delay]
|
185
214
|
end
|
186
215
|
|
187
|
-
def retries_exhausted(
|
216
|
+
def retries_exhausted(jobinst, msg, exception)
|
188
217
|
begin
|
189
|
-
block =
|
218
|
+
block = jobinst&.sidekiq_retries_exhausted_block
|
190
219
|
block&.call(msg, exception)
|
191
220
|
rescue => e
|
192
221
|
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
@@ -194,7 +223,7 @@ module Sidekiq
|
|
194
223
|
|
195
224
|
send_to_morgue(msg) unless msg["dead"] == false
|
196
225
|
|
197
|
-
|
226
|
+
config.death_handlers.each do |handler|
|
198
227
|
handler.call(msg, exception)
|
199
228
|
rescue => e
|
200
229
|
handle_exception(e, {context: "Error calling death handler", job: msg})
|
@@ -204,7 +233,15 @@ module Sidekiq
|
|
204
233
|
def send_to_morgue(msg)
|
205
234
|
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
206
235
|
payload = Sidekiq.dump_json(msg)
|
207
|
-
|
236
|
+
now = Time.now.to_f
|
237
|
+
|
238
|
+
config.redis do |conn|
|
239
|
+
conn.multi do |xa|
|
240
|
+
xa.zadd("dead", now.to_s, payload)
|
241
|
+
xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
|
242
|
+
xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
|
243
|
+
end
|
244
|
+
end
|
208
245
|
end
|
209
246
|
|
210
247
|
def retry_attempts_from(msg_retry, default)
|
@@ -215,22 +252,6 @@ module Sidekiq
|
|
215
252
|
end
|
216
253
|
end
|
217
254
|
|
218
|
-
def delay_for(worker, count, exception)
|
219
|
-
jitter = rand(10) * (count + 1)
|
220
|
-
if worker&.sidekiq_retry_in_block
|
221
|
-
custom_retry_in = retry_in(worker, count, exception).to_i
|
222
|
-
return custom_retry_in + jitter if custom_retry_in > 0
|
223
|
-
end
|
224
|
-
(count**4) + 15 + jitter
|
225
|
-
end
|
226
|
-
|
227
|
-
def retry_in(worker, count, exception)
|
228
|
-
worker.sidekiq_retry_in_block.call(count, exception)
|
229
|
-
rescue Exception => e
|
230
|
-
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
|
231
|
-
nil
|
232
|
-
end
|
233
|
-
|
234
255
|
def exception_caused_by_shutdown?(e, checked_causes = [])
|
235
256
|
return false unless e.cause
|
236
257
|
|
data/lib/sidekiq/job_util.rb
CHANGED
@@ -4,7 +4,8 @@ require "time"
|
|
4
4
|
module Sidekiq
|
5
5
|
module JobUtil
|
6
6
|
# These functions encapsulate various job utilities.
|
7
|
-
|
7
|
+
|
8
|
+
TRANSIENT_ATTRIBUTES = %w[]
|
8
9
|
|
9
10
|
def validate(item)
|
10
11
|
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
@@ -12,16 +13,19 @@ module Sidekiq
|
|
12
13
|
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
13
14
|
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
14
15
|
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
16
|
+
end
|
15
17
|
|
16
|
-
|
18
|
+
def verify_json(item)
|
19
|
+
job_class = item["wrapped"] || item["class"]
|
20
|
+
if Sidekiq[:on_complex_arguments] == :raise
|
17
21
|
msg = <<~EOM
|
18
|
-
Job arguments to #{
|
22
|
+
Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
|
19
23
|
To disable this error, remove `Sidekiq.strict_args!` from your initializer.
|
20
24
|
EOM
|
21
25
|
raise(ArgumentError, msg) unless json_safe?(item)
|
22
|
-
elsif Sidekiq
|
26
|
+
elsif Sidekiq[:on_complex_arguments] == :warn
|
23
27
|
Sidekiq.logger.warn <<~EOM unless json_safe?(item)
|
24
|
-
Job arguments to #{
|
28
|
+
Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
|
25
29
|
Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
|
26
30
|
by calling `Sidekiq.strict_args!` during Sidekiq initialization.
|
27
31
|
EOM
|
@@ -39,20 +43,22 @@ module Sidekiq
|
|
39
43
|
|
40
44
|
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
|
41
45
|
|
46
|
+
# remove job attributes which aren't necessary to persist into Redis
|
47
|
+
TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
|
48
|
+
|
49
|
+
item["jid"] ||= SecureRandom.hex(12)
|
42
50
|
item["class"] = item["class"].to_s
|
43
51
|
item["queue"] = item["queue"].to_s
|
44
|
-
item["jid"] ||= SecureRandom.hex(12)
|
45
52
|
item["created_at"] ||= Time.now.to_f
|
46
|
-
|
47
53
|
item
|
48
54
|
end
|
49
55
|
|
50
56
|
def normalized_hash(item_class)
|
51
57
|
if item_class.is_a?(Class)
|
52
|
-
raise(ArgumentError, "Message must include a Sidekiq::
|
58
|
+
raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
|
53
59
|
item_class.get_sidekiq_options
|
54
60
|
else
|
55
|
-
Sidekiq.
|
61
|
+
Sidekiq.default_job_options
|
56
62
|
end
|
57
63
|
end
|
58
64
|
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -3,11 +3,12 @@
|
|
3
3
|
require "sidekiq/manager"
|
4
4
|
require "sidekiq/fetch"
|
5
5
|
require "sidekiq/scheduled"
|
6
|
+
require "sidekiq/ring_buffer"
|
6
7
|
|
7
8
|
module Sidekiq
|
8
9
|
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
|
9
10
|
class Launcher
|
10
|
-
include
|
11
|
+
include Sidekiq::Component
|
11
12
|
|
12
13
|
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
13
14
|
|
@@ -15,18 +16,18 @@ module Sidekiq
|
|
15
16
|
proc { "sidekiq" },
|
16
17
|
proc { Sidekiq::VERSION },
|
17
18
|
proc { |me, data| data["tag"] },
|
18
|
-
proc { |me, data| "[#{Processor::
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
|
19
20
|
proc { |me, data| "stopping" if me.stopping? }
|
20
21
|
]
|
21
22
|
|
22
23
|
attr_accessor :manager, :poller, :fetcher
|
23
24
|
|
24
25
|
def initialize(options)
|
26
|
+
@config = options
|
25
27
|
options[:fetch] ||= BasicFetch.new(options)
|
26
28
|
@manager = Sidekiq::Manager.new(options)
|
27
|
-
@poller = Sidekiq::Scheduled::Poller.new
|
29
|
+
@poller = Sidekiq::Scheduled::Poller.new(options)
|
28
30
|
@done = false
|
29
|
-
@options = options
|
30
31
|
end
|
31
32
|
|
32
33
|
def run
|
@@ -43,11 +44,9 @@ module Sidekiq
|
|
43
44
|
@poller.terminate
|
44
45
|
end
|
45
46
|
|
46
|
-
# Shuts down
|
47
|
-
# return until all work is complete and cleaned up.
|
48
|
-
# It can take up to the timeout to complete.
|
47
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
49
48
|
def stop
|
50
|
-
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @
|
49
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
51
50
|
|
52
51
|
@done = true
|
53
52
|
@manager.quiet
|
@@ -55,10 +54,10 @@ module Sidekiq
|
|
55
54
|
|
56
55
|
@manager.stop(deadline)
|
57
56
|
|
58
|
-
# Requeue everything in case there was a
|
57
|
+
# Requeue everything in case there was a thread which fetched a job while the process was stopped.
|
59
58
|
# This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
|
60
|
-
strategy = @
|
61
|
-
strategy.bulk_requeue([], @
|
59
|
+
strategy = @config[:fetch]
|
60
|
+
strategy.bulk_requeue([], @config)
|
62
61
|
|
63
62
|
clear_heartbeat
|
64
63
|
end
|
@@ -76,17 +75,19 @@ module Sidekiq
|
|
76
75
|
heartbeat
|
77
76
|
sleep BEAT_PAUSE
|
78
77
|
end
|
79
|
-
|
78
|
+
logger.info("Heartbeat stopping...")
|
80
79
|
end
|
81
80
|
|
82
81
|
def clear_heartbeat
|
82
|
+
flush_stats
|
83
|
+
|
83
84
|
# Remove record from Redis since we are shutting down.
|
84
85
|
# Note we don't stop the heartbeat thread; if the process
|
85
86
|
# doesn't actually exit, it'll reappear in the Web UI.
|
86
|
-
|
87
|
-
conn.pipelined do
|
88
|
-
|
89
|
-
|
87
|
+
redis do |conn|
|
88
|
+
conn.pipelined do |pipeline|
|
89
|
+
pipeline.srem("processes", [identity])
|
90
|
+
pipeline.unlink("#{identity}:work")
|
90
91
|
end
|
91
92
|
end
|
92
93
|
rescue
|
@@ -99,7 +100,7 @@ module Sidekiq
|
|
99
100
|
❤
|
100
101
|
end
|
101
102
|
|
102
|
-
def
|
103
|
+
def flush_stats
|
103
104
|
fails = Processor::FAILURE.reset
|
104
105
|
procd = Processor::PROCESSED.reset
|
105
106
|
return if fails + procd == 0
|
@@ -107,14 +108,14 @@ module Sidekiq
|
|
107
108
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
108
109
|
begin
|
109
110
|
Sidekiq.redis do |conn|
|
110
|
-
conn.pipelined do
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
111
|
+
conn.pipelined do |pipeline|
|
112
|
+
pipeline.incrby("stat:processed", procd)
|
113
|
+
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
114
|
+
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
|
115
|
+
|
116
|
+
pipeline.incrby("stat:failed", fails)
|
117
|
+
pipeline.incrby("stat:failed:#{nowdate}", fails)
|
118
|
+
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
|
118
119
|
end
|
119
120
|
end
|
120
121
|
rescue => ex
|
@@ -123,7 +124,6 @@ module Sidekiq
|
|
123
124
|
Sidekiq.logger.warn("Unable to flush stats: #{ex}")
|
124
125
|
end
|
125
126
|
end
|
126
|
-
at_exit(&method(:flush_stats))
|
127
127
|
|
128
128
|
def ❤
|
129
129
|
key = identity
|
@@ -132,26 +132,29 @@ module Sidekiq
|
|
132
132
|
begin
|
133
133
|
fails = Processor::FAILURE.reset
|
134
134
|
procd = Processor::PROCESSED.reset
|
135
|
-
curstate = Processor::
|
135
|
+
curstate = Processor::WORK_STATE.dup
|
136
136
|
|
137
|
-
workers_key = "#{key}:workers"
|
138
137
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
139
138
|
|
140
|
-
|
141
|
-
conn.multi do
|
142
|
-
|
143
|
-
|
144
|
-
|
139
|
+
redis do |conn|
|
140
|
+
conn.multi do |transaction|
|
141
|
+
transaction.incrby("stat:processed", procd)
|
142
|
+
transaction.incrby("stat:processed:#{nowdate}", procd)
|
143
|
+
transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
|
145
144
|
|
146
|
-
|
147
|
-
|
148
|
-
|
145
|
+
transaction.incrby("stat:failed", fails)
|
146
|
+
transaction.incrby("stat:failed:#{nowdate}", fails)
|
147
|
+
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
|
148
|
+
end
|
149
149
|
|
150
|
-
|
150
|
+
# work is the current set of executing jobs
|
151
|
+
work_key = "#{key}:work"
|
152
|
+
conn.pipelined do |transaction|
|
153
|
+
transaction.unlink(work_key)
|
151
154
|
curstate.each_pair do |tid, hash|
|
152
|
-
|
155
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
153
156
|
end
|
154
|
-
|
157
|
+
transaction.expire(work_key, 60)
|
155
158
|
end
|
156
159
|
end
|
157
160
|
|
@@ -160,23 +163,24 @@ module Sidekiq
|
|
160
163
|
fails = procd = 0
|
161
164
|
kb = memory_usage(::Process.pid)
|
162
165
|
|
163
|
-
_, exists, _, _, msg =
|
164
|
-
conn.multi {
|
165
|
-
|
166
|
-
|
167
|
-
|
166
|
+
_, exists, _, _, msg = redis { |conn|
|
167
|
+
conn.multi { |transaction|
|
168
|
+
transaction.sadd("processes", [key])
|
169
|
+
transaction.exists?(key)
|
170
|
+
transaction.hmset(key, "info", to_json,
|
168
171
|
"busy", curstate.size,
|
169
172
|
"beat", Time.now.to_f,
|
170
173
|
"rtt_us", rtt,
|
171
|
-
"quiet", @done,
|
174
|
+
"quiet", @done.to_s,
|
172
175
|
"rss", kb)
|
173
|
-
|
174
|
-
|
176
|
+
transaction.expire(key, 60)
|
177
|
+
transaction.rpop("#{key}-signals")
|
175
178
|
}
|
176
179
|
}
|
177
180
|
|
178
181
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
179
182
|
fire_event(:heartbeat) unless exists
|
183
|
+
fire_event(:beat, oneshot: false)
|
180
184
|
|
181
185
|
return unless msg
|
182
186
|
|
@@ -198,7 +202,7 @@ module Sidekiq
|
|
198
202
|
|
199
203
|
def check_rtt
|
200
204
|
a = b = 0
|
201
|
-
|
205
|
+
redis do |x|
|
202
206
|
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
203
207
|
x.ping
|
204
208
|
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
@@ -209,12 +213,12 @@ module Sidekiq
|
|
209
213
|
# Workable is < 10,000µs
|
210
214
|
# Log a warning if it's a disaster.
|
211
215
|
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
|
212
|
-
|
216
|
+
logger.warn <<~EOM
|
213
217
|
Your Redis network connection is performing extremely poorly.
|
214
218
|
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
215
219
|
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
216
220
|
If these values are close to 100,000, that means your Sidekiq process may be
|
217
|
-
CPU
|
221
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
|
218
222
|
EOM
|
219
223
|
RTT_READINGS.reset
|
220
224
|
end
|
@@ -246,10 +250,10 @@ module Sidekiq
|
|
246
250
|
"hostname" => hostname,
|
247
251
|
"started_at" => Time.now.to_f,
|
248
252
|
"pid" => ::Process.pid,
|
249
|
-
"tag" => @
|
250
|
-
"concurrency" => @
|
251
|
-
"queues" => @
|
252
|
-
"labels" => @
|
253
|
+
"tag" => @config[:tag] || "",
|
254
|
+
"concurrency" => @config[:concurrency],
|
255
|
+
"queues" => @config[:queues].uniq,
|
256
|
+
"labels" => @config[:labels],
|
253
257
|
"identity" => identity
|
254
258
|
}
|
255
259
|
end
|
data/lib/sidekiq/logger.rb
CHANGED
@@ -16,6 +16,10 @@ module Sidekiq
|
|
16
16
|
def self.current
|
17
17
|
Thread.current[:sidekiq_context] ||= {}
|
18
18
|
end
|
19
|
+
|
20
|
+
def self.add(k, v)
|
21
|
+
current[k] = v
|
22
|
+
end
|
19
23
|
end
|
20
24
|
|
21
25
|
module LoggingUtils
|
@@ -31,24 +35,10 @@ module Sidekiq
|
|
31
35
|
nil
|
32
36
|
end
|
33
37
|
|
34
|
-
|
35
|
-
level
|
36
|
-
|
37
|
-
|
38
|
-
def info?
|
39
|
-
level <= 1
|
40
|
-
end
|
41
|
-
|
42
|
-
def warn?
|
43
|
-
level <= 2
|
44
|
-
end
|
45
|
-
|
46
|
-
def error?
|
47
|
-
level <= 3
|
48
|
-
end
|
49
|
-
|
50
|
-
def fatal?
|
51
|
-
level <= 4
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
52
42
|
end
|
53
43
|
|
54
44
|
def local_level
|