sidekiq 6.3.1 → 6.5.9
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +134 -0
- data/LICENSE +3 -3
- data/README.md +7 -2
- data/bin/sidekiq +3 -3
- data/bin/sidekiqload +70 -66
- data/bin/sidekiqmon +1 -1
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +261 -104
- data/lib/sidekiq/cli.rb +62 -38
- data/lib/sidekiq/client.rb +47 -67
- data/lib/sidekiq/{util.rb → component.rb} +12 -42
- data/lib/sidekiq/delay.rb +3 -1
- data/lib/sidekiq/extensions/generic_proxy.rb +1 -1
- data/lib/sidekiq/fetch.rb +20 -18
- data/lib/sidekiq/job_logger.rb +15 -27
- data/lib/sidekiq/job_retry.rb +78 -55
- data/lib/sidekiq/job_util.rb +71 -0
- data/lib/sidekiq/launcher.rb +58 -54
- data/lib/sidekiq/logger.rb +8 -18
- data/lib/sidekiq/manager.rb +35 -34
- data/lib/sidekiq/metrics/deploy.rb +47 -0
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +94 -0
- data/lib/sidekiq/metrics/tracking.rb +134 -0
- data/lib/sidekiq/middleware/chain.rb +82 -38
- data/lib/sidekiq/middleware/current_attributes.rb +19 -8
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +2 -2
- data/lib/sidekiq/paginator.rb +17 -9
- data/lib/sidekiq/processor.rb +47 -41
- data/lib/sidekiq/rails.rb +15 -8
- data/lib/sidekiq/redis_client_adapter.rb +154 -0
- data/lib/sidekiq/redis_connection.rb +80 -49
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +66 -27
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +37 -36
- data/lib/sidekiq/transaction_aware_client.rb +45 -0
- data/lib/sidekiq/version.rb +1 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +26 -7
- data/lib/sidekiq/web/csrf_protection.rb +2 -2
- data/lib/sidekiq/web/helpers.rb +21 -8
- data/lib/sidekiq/web.rb +8 -4
- data/lib/sidekiq/worker.rb +78 -19
- data/lib/sidekiq.rb +111 -30
- data/sidekiq.gemspec +2 -2
- data/web/assets/javascripts/application.js +58 -26
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard.js +0 -17
- data/web/assets/javascripts/graph.js +16 -0
- data/web/assets/javascripts/metrics.js +262 -0
- data/web/assets/stylesheets/application-dark.css +13 -17
- data/web/assets/stylesheets/application.css +48 -6
- data/web/locales/el.yml +43 -19
- data/web/locales/en.yml +7 -0
- data/web/locales/ja.yml +7 -0
- data/web/locales/pt-br.yml +27 -9
- data/web/locales/zh-cn.yml +36 -11
- data/web/locales/zh-tw.yml +32 -7
- data/web/views/_nav.erb +1 -1
- data/web/views/_summary.erb +1 -1
- data/web/views/busy.erb +9 -4
- data/web/views/dashboard.erb +1 -0
- data/web/views/metrics.erb +69 -0
- data/web/views/metrics_for_job.erb +87 -0
- data/web/views/queue.erb +5 -1
- metadata +39 -13
- data/lib/generators/sidekiq/worker_generator.rb +0 -57
- data/lib/sidekiq/exception_handler.rb +0 -27
data/lib/sidekiq/job_logger.rb
CHANGED
@@ -12,46 +12,34 @@ module Sidekiq
|
|
12
12
|
|
13
13
|
yield
|
14
14
|
|
15
|
-
|
16
|
-
|
17
|
-
end
|
15
|
+
Sidekiq::Context.add(:elapsed, elapsed(start))
|
16
|
+
@logger.info("done")
|
18
17
|
rescue Exception
|
19
|
-
|
20
|
-
|
21
|
-
end
|
18
|
+
Sidekiq::Context.add(:elapsed, elapsed(start))
|
19
|
+
@logger.info("fail")
|
22
20
|
|
23
21
|
raise
|
24
22
|
end
|
25
23
|
|
26
24
|
def prepare(job_hash, &block)
|
27
|
-
level = job_hash["log_level"]
|
28
|
-
if level
|
29
|
-
@logger.log_at(level) do
|
30
|
-
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
31
|
-
end
|
32
|
-
else
|
33
|
-
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
34
|
-
end
|
35
|
-
end
|
36
|
-
|
37
|
-
def job_hash_context(job_hash)
|
38
25
|
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
39
26
|
# attribute to expose the underlying thing.
|
40
27
|
h = {
|
41
28
|
class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
|
42
29
|
jid: job_hash["jid"]
|
43
30
|
}
|
44
|
-
h[:bid] = job_hash["bid"] if job_hash
|
45
|
-
h[:tags] = job_hash["tags"] if job_hash
|
46
|
-
h
|
47
|
-
end
|
48
|
-
|
49
|
-
def with_elapsed_time_context(start, &block)
|
50
|
-
Sidekiq::Context.with(elapsed_time_context(start), &block)
|
51
|
-
end
|
31
|
+
h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
|
32
|
+
h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
|
52
33
|
|
53
|
-
|
54
|
-
|
34
|
+
Thread.current[:sidekiq_context] = h
|
35
|
+
level = job_hash["log_level"]
|
36
|
+
if level
|
37
|
+
@logger.log_at(level, &block)
|
38
|
+
else
|
39
|
+
yield
|
40
|
+
end
|
41
|
+
ensure
|
42
|
+
Thread.current[:sidekiq_context] = nil
|
55
43
|
end
|
56
44
|
|
57
45
|
private
|
data/lib/sidekiq/job_retry.rb
CHANGED
@@ -1,10 +1,8 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq/scheduled"
|
4
|
-
require "sidekiq/api"
|
5
|
-
|
6
3
|
require "zlib"
|
7
4
|
require "base64"
|
5
|
+
require "sidekiq/component"
|
8
6
|
|
9
7
|
module Sidekiq
|
10
8
|
##
|
@@ -25,18 +23,19 @@ module Sidekiq
|
|
25
23
|
#
|
26
24
|
# A job looks like:
|
27
25
|
#
|
28
|
-
# { 'class' => '
|
26
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
|
29
27
|
#
|
30
28
|
# The 'retry' option also accepts a number (in place of 'true'):
|
31
29
|
#
|
32
|
-
# { 'class' => '
|
30
|
+
# { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
33
31
|
#
|
34
32
|
# The job will be retried this number of times before giving up. (If simply
|
35
33
|
# 'true', Sidekiq retries 25 times)
|
36
34
|
#
|
37
|
-
#
|
35
|
+
# Relevant options for job retries:
|
38
36
|
#
|
39
|
-
# * 'queue' - the queue
|
37
|
+
# * 'queue' - the queue for the initial job
|
38
|
+
# * 'retry_queue' - if job retries should be pushed to a different (e.g. lower priority) queue
|
40
39
|
# * 'retry_count' - number of times we've retried so far.
|
41
40
|
# * 'error_message' - the message from the exception
|
42
41
|
# * 'error_class' - the exception class
|
@@ -52,11 +51,12 @@ module Sidekiq
|
|
52
51
|
#
|
53
52
|
# Sidekiq.options[:max_retries] = 7
|
54
53
|
#
|
55
|
-
# or limit the number of retries for a particular
|
54
|
+
# or limit the number of retries for a particular job and send retries to
|
55
|
+
# a low priority queue with:
|
56
56
|
#
|
57
|
-
# class
|
58
|
-
# include Sidekiq::
|
59
|
-
# sidekiq_options :
|
57
|
+
# class MyJob
|
58
|
+
# include Sidekiq::Job
|
59
|
+
# sidekiq_options retry: 10, retry_queue: 'low'
|
60
60
|
# end
|
61
61
|
#
|
62
62
|
class JobRetry
|
@@ -64,17 +64,18 @@ module Sidekiq
|
|
64
64
|
|
65
65
|
class Skip < Handled; end
|
66
66
|
|
67
|
-
include Sidekiq::
|
67
|
+
include Sidekiq::Component
|
68
68
|
|
69
69
|
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
70
70
|
|
71
|
-
def initialize(options
|
72
|
-
@
|
71
|
+
def initialize(options)
|
72
|
+
@config = options
|
73
|
+
@max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
|
73
74
|
end
|
74
75
|
|
75
76
|
# The global retry handler requires only the barest of data.
|
76
77
|
# We want to be able to retry as much as possible so we don't
|
77
|
-
# require the
|
78
|
+
# require the job to be instantiated.
|
78
79
|
def global(jobstr, queue)
|
79
80
|
yield
|
80
81
|
rescue Handled => ex
|
@@ -88,7 +89,7 @@ module Sidekiq
|
|
88
89
|
|
89
90
|
msg = Sidekiq.load_json(jobstr)
|
90
91
|
if msg["retry"]
|
91
|
-
|
92
|
+
process_retry(nil, msg, queue, e)
|
92
93
|
else
|
93
94
|
Sidekiq.death_handlers.each do |handler|
|
94
95
|
handler.call(msg, e)
|
@@ -101,14 +102,14 @@ module Sidekiq
|
|
101
102
|
end
|
102
103
|
|
103
104
|
# The local retry support means that any errors that occur within
|
104
|
-
# this block can be associated with the given
|
105
|
+
# this block can be associated with the given job instance.
|
105
106
|
# This is required to support the `sidekiq_retries_exhausted` block.
|
106
107
|
#
|
107
108
|
# Note that any exception from the block is wrapped in the Skip
|
108
109
|
# exception so the global block does not reprocess the error. The
|
109
110
|
# Skip exception is unwrapped within Sidekiq::Processor#process before
|
110
111
|
# calling the handle_exception handlers.
|
111
|
-
def local(
|
112
|
+
def local(jobinst, jobstr, queue)
|
112
113
|
yield
|
113
114
|
rescue Handled => ex
|
114
115
|
raise ex
|
@@ -121,11 +122,11 @@ module Sidekiq
|
|
121
122
|
|
122
123
|
msg = Sidekiq.load_json(jobstr)
|
123
124
|
if msg["retry"].nil?
|
124
|
-
msg["retry"] =
|
125
|
+
msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
|
125
126
|
end
|
126
127
|
|
127
128
|
raise e unless msg["retry"]
|
128
|
-
|
129
|
+
process_retry(jobinst, msg, queue, e)
|
129
130
|
# We've handled this error associated with this job, don't
|
130
131
|
# need to handle it at the global level
|
131
132
|
raise Skip
|
@@ -133,10 +134,10 @@ module Sidekiq
|
|
133
134
|
|
134
135
|
private
|
135
136
|
|
136
|
-
# Note that +
|
137
|
-
# instantiate the
|
137
|
+
# Note that +jobinst+ can be nil here if an error is raised before we can
|
138
|
+
# instantiate the job instance. All access must be guarded and
|
138
139
|
# best effort.
|
139
|
-
def
|
140
|
+
def process_retry(jobinst, msg, queue, exception)
|
140
141
|
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
141
142
|
|
142
143
|
msg["queue"] = (msg["retry_queue"] || queue)
|
@@ -167,24 +168,54 @@ module Sidekiq
|
|
167
168
|
msg["error_backtrace"] = compress_backtrace(lines)
|
168
169
|
end
|
169
170
|
|
170
|
-
|
171
|
-
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
171
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
172
|
+
return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
|
173
|
+
|
174
|
+
strategy, delay = delay_for(jobinst, count, exception)
|
175
|
+
case strategy
|
176
|
+
when :discard
|
177
|
+
return # poof!
|
178
|
+
when :kill
|
179
|
+
return retries_exhausted(jobinst, msg, exception)
|
180
|
+
end
|
181
|
+
|
182
|
+
# Logging here can break retries if the logging device raises ENOSPC #3979
|
183
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
184
|
+
jitter = rand(10) * (count + 1)
|
185
|
+
retry_at = Time.now.to_f + delay + jitter
|
186
|
+
payload = Sidekiq.dump_json(msg)
|
187
|
+
redis do |conn|
|
188
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
# returns (strategy, seconds)
|
193
|
+
def delay_for(jobinst, count, exception)
|
194
|
+
rv = begin
|
195
|
+
# sidekiq_retry_in can return two different things:
|
196
|
+
# 1. When to retry next, as an integer of seconds
|
197
|
+
# 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
|
198
|
+
jobinst&.sidekiq_retry_in_block&.call(count, exception)
|
199
|
+
rescue Exception => e
|
200
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
|
201
|
+
nil
|
202
|
+
end
|
203
|
+
|
204
|
+
delay = (count**4) + 15
|
205
|
+
if Integer === rv && rv > 0
|
206
|
+
delay = rv
|
207
|
+
elsif rv == :discard
|
208
|
+
return [:discard, nil] # do nothing, job goes poof
|
209
|
+
elsif rv == :kill
|
210
|
+
return [:kill, nil]
|
182
211
|
end
|
212
|
+
|
213
|
+
[:default, delay]
|
183
214
|
end
|
184
215
|
|
185
|
-
def retries_exhausted(
|
216
|
+
def retries_exhausted(jobinst, msg, exception)
|
186
217
|
begin
|
187
|
-
block =
|
218
|
+
block = jobinst&.sidekiq_retries_exhausted_block
|
188
219
|
block&.call(msg, exception)
|
189
220
|
rescue => e
|
190
221
|
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
@@ -192,7 +223,7 @@ module Sidekiq
|
|
192
223
|
|
193
224
|
send_to_morgue(msg) unless msg["dead"] == false
|
194
225
|
|
195
|
-
|
226
|
+
config.death_handlers.each do |handler|
|
196
227
|
handler.call(msg, exception)
|
197
228
|
rescue => e
|
198
229
|
handle_exception(e, {context: "Error calling death handler", job: msg})
|
@@ -202,7 +233,15 @@ module Sidekiq
|
|
202
233
|
def send_to_morgue(msg)
|
203
234
|
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
204
235
|
payload = Sidekiq.dump_json(msg)
|
205
|
-
|
236
|
+
now = Time.now.to_f
|
237
|
+
|
238
|
+
config.redis do |conn|
|
239
|
+
conn.multi do |xa|
|
240
|
+
xa.zadd("dead", now.to_s, payload)
|
241
|
+
xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
|
242
|
+
xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
|
243
|
+
end
|
244
|
+
end
|
206
245
|
end
|
207
246
|
|
208
247
|
def retry_attempts_from(msg_retry, default)
|
@@ -213,22 +252,6 @@ module Sidekiq
|
|
213
252
|
end
|
214
253
|
end
|
215
254
|
|
216
|
-
def delay_for(worker, count, exception)
|
217
|
-
jitter = rand(10) * (count + 1)
|
218
|
-
if worker&.sidekiq_retry_in_block
|
219
|
-
custom_retry_in = retry_in(worker, count, exception).to_i
|
220
|
-
return custom_retry_in + jitter if custom_retry_in > 0
|
221
|
-
end
|
222
|
-
(count**4) + 15 + jitter
|
223
|
-
end
|
224
|
-
|
225
|
-
def retry_in(worker, count, exception)
|
226
|
-
worker.sidekiq_retry_in_block.call(count, exception)
|
227
|
-
rescue Exception => e
|
228
|
-
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
|
229
|
-
nil
|
230
|
-
end
|
231
|
-
|
232
255
|
def exception_caused_by_shutdown?(e, checked_causes = [])
|
233
256
|
return false unless e.cause
|
234
257
|
|
@@ -0,0 +1,71 @@
|
|
1
|
+
require "securerandom"
|
2
|
+
require "time"
|
3
|
+
|
4
|
+
module Sidekiq
|
5
|
+
module JobUtil
|
6
|
+
# These functions encapsulate various job utilities.
|
7
|
+
|
8
|
+
TRANSIENT_ATTRIBUTES = %w[]
|
9
|
+
|
10
|
+
def validate(item)
|
11
|
+
raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
|
12
|
+
raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
|
13
|
+
raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
|
14
|
+
raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
|
15
|
+
raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
|
16
|
+
end
|
17
|
+
|
18
|
+
def verify_json(item)
|
19
|
+
job_class = item["wrapped"] || item["class"]
|
20
|
+
if Sidekiq[:on_complex_arguments] == :raise
|
21
|
+
msg = <<~EOM
|
22
|
+
Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
|
23
|
+
To disable this error, remove `Sidekiq.strict_args!` from your initializer.
|
24
|
+
EOM
|
25
|
+
raise(ArgumentError, msg) unless json_safe?(item)
|
26
|
+
elsif Sidekiq[:on_complex_arguments] == :warn
|
27
|
+
Sidekiq.logger.warn <<~EOM unless json_safe?(item)
|
28
|
+
Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
|
29
|
+
Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
|
30
|
+
by calling `Sidekiq.strict_args!` during Sidekiq initialization.
|
31
|
+
EOM
|
32
|
+
end
|
33
|
+
end
|
34
|
+
|
35
|
+
def normalize_item(item)
|
36
|
+
validate(item)
|
37
|
+
|
38
|
+
# merge in the default sidekiq_options for the item's class and/or wrapped element
|
39
|
+
# this allows ActiveJobs to control sidekiq_options too.
|
40
|
+
defaults = normalized_hash(item["class"])
|
41
|
+
defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?(:get_sidekiq_options)
|
42
|
+
item = defaults.merge(item)
|
43
|
+
|
44
|
+
raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
|
45
|
+
|
46
|
+
# remove job attributes which aren't necessary to persist into Redis
|
47
|
+
TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
|
48
|
+
|
49
|
+
item["jid"] ||= SecureRandom.hex(12)
|
50
|
+
item["class"] = item["class"].to_s
|
51
|
+
item["queue"] = item["queue"].to_s
|
52
|
+
item["created_at"] ||= Time.now.to_f
|
53
|
+
item
|
54
|
+
end
|
55
|
+
|
56
|
+
def normalized_hash(item_class)
|
57
|
+
if item_class.is_a?(Class)
|
58
|
+
raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
|
59
|
+
item_class.get_sidekiq_options
|
60
|
+
else
|
61
|
+
Sidekiq.default_job_options
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
private
|
66
|
+
|
67
|
+
def json_safe?(item)
|
68
|
+
JSON.parse(JSON.dump(item["args"])) == item["args"]
|
69
|
+
end
|
70
|
+
end
|
71
|
+
end
|
data/lib/sidekiq/launcher.rb
CHANGED
@@ -3,11 +3,12 @@
|
|
3
3
|
require "sidekiq/manager"
|
4
4
|
require "sidekiq/fetch"
|
5
5
|
require "sidekiq/scheduled"
|
6
|
+
require "sidekiq/ring_buffer"
|
6
7
|
|
7
8
|
module Sidekiq
|
8
9
|
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
|
9
10
|
class Launcher
|
10
|
-
include
|
11
|
+
include Sidekiq::Component
|
11
12
|
|
12
13
|
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
13
14
|
|
@@ -15,18 +16,18 @@ module Sidekiq
|
|
15
16
|
proc { "sidekiq" },
|
16
17
|
proc { Sidekiq::VERSION },
|
17
18
|
proc { |me, data| data["tag"] },
|
18
|
-
proc { |me, data| "[#{Processor::
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
|
19
20
|
proc { |me, data| "stopping" if me.stopping? }
|
20
21
|
]
|
21
22
|
|
22
23
|
attr_accessor :manager, :poller, :fetcher
|
23
24
|
|
24
25
|
def initialize(options)
|
26
|
+
@config = options
|
25
27
|
options[:fetch] ||= BasicFetch.new(options)
|
26
28
|
@manager = Sidekiq::Manager.new(options)
|
27
|
-
@poller = Sidekiq::Scheduled::Poller.new
|
29
|
+
@poller = Sidekiq::Scheduled::Poller.new(options)
|
28
30
|
@done = false
|
29
|
-
@options = options
|
30
31
|
end
|
31
32
|
|
32
33
|
def run
|
@@ -43,11 +44,9 @@ module Sidekiq
|
|
43
44
|
@poller.terminate
|
44
45
|
end
|
45
46
|
|
46
|
-
# Shuts down
|
47
|
-
# return until all work is complete and cleaned up.
|
48
|
-
# It can take up to the timeout to complete.
|
47
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
49
48
|
def stop
|
50
|
-
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @
|
49
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
51
50
|
|
52
51
|
@done = true
|
53
52
|
@manager.quiet
|
@@ -55,10 +54,10 @@ module Sidekiq
|
|
55
54
|
|
56
55
|
@manager.stop(deadline)
|
57
56
|
|
58
|
-
# Requeue everything in case there was a
|
57
|
+
# Requeue everything in case there was a thread which fetched a job while the process was stopped.
|
59
58
|
# This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
|
60
|
-
strategy = @
|
61
|
-
strategy.bulk_requeue([], @
|
59
|
+
strategy = @config[:fetch]
|
60
|
+
strategy.bulk_requeue([], @config)
|
62
61
|
|
63
62
|
clear_heartbeat
|
64
63
|
end
|
@@ -76,17 +75,19 @@ module Sidekiq
|
|
76
75
|
heartbeat
|
77
76
|
sleep BEAT_PAUSE
|
78
77
|
end
|
79
|
-
|
78
|
+
logger.info("Heartbeat stopping...")
|
80
79
|
end
|
81
80
|
|
82
81
|
def clear_heartbeat
|
82
|
+
flush_stats
|
83
|
+
|
83
84
|
# Remove record from Redis since we are shutting down.
|
84
85
|
# Note we don't stop the heartbeat thread; if the process
|
85
86
|
# doesn't actually exit, it'll reappear in the Web UI.
|
86
|
-
|
87
|
-
conn.pipelined do
|
88
|
-
|
89
|
-
|
87
|
+
redis do |conn|
|
88
|
+
conn.pipelined do |pipeline|
|
89
|
+
pipeline.srem("processes", [identity])
|
90
|
+
pipeline.unlink("#{identity}:work")
|
90
91
|
end
|
91
92
|
end
|
92
93
|
rescue
|
@@ -99,7 +100,7 @@ module Sidekiq
|
|
99
100
|
❤
|
100
101
|
end
|
101
102
|
|
102
|
-
def
|
103
|
+
def flush_stats
|
103
104
|
fails = Processor::FAILURE.reset
|
104
105
|
procd = Processor::PROCESSED.reset
|
105
106
|
return if fails + procd == 0
|
@@ -107,14 +108,14 @@ module Sidekiq
|
|
107
108
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
108
109
|
begin
|
109
110
|
Sidekiq.redis do |conn|
|
110
|
-
conn.pipelined do
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
111
|
+
conn.pipelined do |pipeline|
|
112
|
+
pipeline.incrby("stat:processed", procd)
|
113
|
+
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
114
|
+
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
|
115
|
+
|
116
|
+
pipeline.incrby("stat:failed", fails)
|
117
|
+
pipeline.incrby("stat:failed:#{nowdate}", fails)
|
118
|
+
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
|
118
119
|
end
|
119
120
|
end
|
120
121
|
rescue => ex
|
@@ -123,7 +124,6 @@ module Sidekiq
|
|
123
124
|
Sidekiq.logger.warn("Unable to flush stats: #{ex}")
|
124
125
|
end
|
125
126
|
end
|
126
|
-
at_exit(&method(:flush_stats))
|
127
127
|
|
128
128
|
def ❤
|
129
129
|
key = identity
|
@@ -132,26 +132,29 @@ module Sidekiq
|
|
132
132
|
begin
|
133
133
|
fails = Processor::FAILURE.reset
|
134
134
|
procd = Processor::PROCESSED.reset
|
135
|
-
curstate = Processor::
|
135
|
+
curstate = Processor::WORK_STATE.dup
|
136
136
|
|
137
|
-
workers_key = "#{key}:workers"
|
138
137
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
139
138
|
|
140
|
-
|
141
|
-
conn.multi do
|
142
|
-
|
143
|
-
|
144
|
-
|
139
|
+
redis do |conn|
|
140
|
+
conn.multi do |transaction|
|
141
|
+
transaction.incrby("stat:processed", procd)
|
142
|
+
transaction.incrby("stat:processed:#{nowdate}", procd)
|
143
|
+
transaction.expire("stat:processed:#{nowdate}", STATS_TTL)
|
145
144
|
|
146
|
-
|
147
|
-
|
148
|
-
|
145
|
+
transaction.incrby("stat:failed", fails)
|
146
|
+
transaction.incrby("stat:failed:#{nowdate}", fails)
|
147
|
+
transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
|
148
|
+
end
|
149
149
|
|
150
|
-
|
150
|
+
# work is the current set of executing jobs
|
151
|
+
work_key = "#{key}:work"
|
152
|
+
conn.pipelined do |transaction|
|
153
|
+
transaction.unlink(work_key)
|
151
154
|
curstate.each_pair do |tid, hash|
|
152
|
-
|
155
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
153
156
|
end
|
154
|
-
|
157
|
+
transaction.expire(work_key, 60)
|
155
158
|
end
|
156
159
|
end
|
157
160
|
|
@@ -160,23 +163,24 @@ module Sidekiq
|
|
160
163
|
fails = procd = 0
|
161
164
|
kb = memory_usage(::Process.pid)
|
162
165
|
|
163
|
-
_, exists, _, _, msg =
|
164
|
-
conn.multi {
|
165
|
-
|
166
|
-
|
167
|
-
|
166
|
+
_, exists, _, _, msg = redis { |conn|
|
167
|
+
conn.multi { |transaction|
|
168
|
+
transaction.sadd("processes", [key])
|
169
|
+
transaction.exists?(key)
|
170
|
+
transaction.hmset(key, "info", to_json,
|
168
171
|
"busy", curstate.size,
|
169
172
|
"beat", Time.now.to_f,
|
170
173
|
"rtt_us", rtt,
|
171
|
-
"quiet", @done,
|
174
|
+
"quiet", @done.to_s,
|
172
175
|
"rss", kb)
|
173
|
-
|
174
|
-
|
176
|
+
transaction.expire(key, 60)
|
177
|
+
transaction.rpop("#{key}-signals")
|
175
178
|
}
|
176
179
|
}
|
177
180
|
|
178
181
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
179
182
|
fire_event(:heartbeat) unless exists
|
183
|
+
fire_event(:beat, oneshot: false)
|
180
184
|
|
181
185
|
return unless msg
|
182
186
|
|
@@ -198,7 +202,7 @@ module Sidekiq
|
|
198
202
|
|
199
203
|
def check_rtt
|
200
204
|
a = b = 0
|
201
|
-
|
205
|
+
redis do |x|
|
202
206
|
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
203
207
|
x.ping
|
204
208
|
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
@@ -209,12 +213,12 @@ module Sidekiq
|
|
209
213
|
# Workable is < 10,000µs
|
210
214
|
# Log a warning if it's a disaster.
|
211
215
|
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
|
212
|
-
|
216
|
+
logger.warn <<~EOM
|
213
217
|
Your Redis network connection is performing extremely poorly.
|
214
218
|
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
215
219
|
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
216
220
|
If these values are close to 100,000, that means your Sidekiq process may be
|
217
|
-
CPU
|
221
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
|
218
222
|
EOM
|
219
223
|
RTT_READINGS.reset
|
220
224
|
end
|
@@ -246,10 +250,10 @@ module Sidekiq
|
|
246
250
|
"hostname" => hostname,
|
247
251
|
"started_at" => Time.now.to_f,
|
248
252
|
"pid" => ::Process.pid,
|
249
|
-
"tag" => @
|
250
|
-
"concurrency" => @
|
251
|
-
"queues" => @
|
252
|
-
"labels" => @
|
253
|
+
"tag" => @config[:tag] || "",
|
254
|
+
"concurrency" => @config[:concurrency],
|
255
|
+
"queues" => @config[:queues].uniq,
|
256
|
+
"labels" => @config[:labels],
|
253
257
|
"identity" => identity
|
254
258
|
}
|
255
259
|
end
|
data/lib/sidekiq/logger.rb
CHANGED
@@ -16,6 +16,10 @@ module Sidekiq
|
|
16
16
|
def self.current
|
17
17
|
Thread.current[:sidekiq_context] ||= {}
|
18
18
|
end
|
19
|
+
|
20
|
+
def self.add(k, v)
|
21
|
+
current[k] = v
|
22
|
+
end
|
19
23
|
end
|
20
24
|
|
21
25
|
module LoggingUtils
|
@@ -31,24 +35,10 @@ module Sidekiq
|
|
31
35
|
nil
|
32
36
|
end
|
33
37
|
|
34
|
-
|
35
|
-
level
|
36
|
-
|
37
|
-
|
38
|
-
def info?
|
39
|
-
level <= 1
|
40
|
-
end
|
41
|
-
|
42
|
-
def warn?
|
43
|
-
level <= 2
|
44
|
-
end
|
45
|
-
|
46
|
-
def error?
|
47
|
-
level <= 3
|
48
|
-
end
|
49
|
-
|
50
|
-
def fatal?
|
51
|
-
level <= 4
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
52
42
|
end
|
53
43
|
|
54
44
|
def local_level
|