sidekiq 6.1.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/.circleci/config.yml +71 -0
- data/.github/contributing.md +32 -0
- data/.github/issue_template.md +11 -0
- data/.gitignore +13 -0
- data/.standard.yml +20 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/6.0-Upgrade.md +72 -0
- data/COMM-LICENSE +97 -0
- data/Changes.md +1718 -0
- data/Ent-2.0-Upgrade.md +37 -0
- data/Ent-Changes.md +269 -0
- data/Gemfile +24 -0
- data/Gemfile.lock +208 -0
- data/LICENSE +9 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-4.0-Upgrade.md +35 -0
- data/Pro-5.0-Upgrade.md +25 -0
- data/Pro-Changes.md +790 -0
- data/README.md +94 -0
- data/Rakefile +10 -0
- data/bin/sidekiq +42 -0
- data/bin/sidekiqload +157 -0
- data/bin/sidekiqmon +8 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +57 -0
- data/lib/sidekiq.rb +262 -0
- data/lib/sidekiq/api.rb +960 -0
- data/lib/sidekiq/cli.rb +401 -0
- data/lib/sidekiq/client.rb +263 -0
- data/lib/sidekiq/delay.rb +41 -0
- data/lib/sidekiq/exception_handler.rb +27 -0
- data/lib/sidekiq/extensions/action_mailer.rb +47 -0
- data/lib/sidekiq/extensions/active_record.rb +43 -0
- data/lib/sidekiq/extensions/class_methods.rb +43 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
- data/lib/sidekiq/fetch.rb +82 -0
- data/lib/sidekiq/job_logger.rb +63 -0
- data/lib/sidekiq/job_retry.rb +262 -0
- data/lib/sidekiq/launcher.rb +206 -0
- data/lib/sidekiq/logger.rb +165 -0
- data/lib/sidekiq/manager.rb +135 -0
- data/lib/sidekiq/middleware/chain.rb +160 -0
- data/lib/sidekiq/middleware/i18n.rb +40 -0
- data/lib/sidekiq/monitor.rb +133 -0
- data/lib/sidekiq/paginator.rb +47 -0
- data/lib/sidekiq/processor.rb +280 -0
- data/lib/sidekiq/rails.rb +50 -0
- data/lib/sidekiq/redis_connection.rb +146 -0
- data/lib/sidekiq/scheduled.rb +173 -0
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing.rb +344 -0
- data/lib/sidekiq/testing/inline.rb +30 -0
- data/lib/sidekiq/util.rb +67 -0
- data/lib/sidekiq/version.rb +5 -0
- data/lib/sidekiq/web.rb +213 -0
- data/lib/sidekiq/web/action.rb +93 -0
- data/lib/sidekiq/web/application.rb +357 -0
- data/lib/sidekiq/web/csrf_protection.rb +153 -0
- data/lib/sidekiq/web/helpers.rb +333 -0
- data/lib/sidekiq/web/router.rb +101 -0
- data/lib/sidekiq/worker.rb +244 -0
- data/sidekiq.gemspec +20 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status.png +0 -0
- data/web/assets/javascripts/application.js +95 -0
- data/web/assets/javascripts/dashboard.js +296 -0
- data/web/assets/stylesheets/application-dark.css +133 -0
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +1158 -0
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +5 -0
- data/web/locales/ar.yml +81 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +81 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +83 -0
- data/web/locales/es.yml +70 -0
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +78 -0
- data/web/locales/he.yml +79 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +83 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +78 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +76 -0
- data/web/locales/ur.yml +80 -0
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +20 -0
- data/web/views/_job_info.erb +89 -0
- data/web/views/_nav.erb +52 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +101 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +41 -0
- data/web/views/morgue.erb +78 -0
- data/web/views/queue.erb +55 -0
- data/web/views/queues.erb +38 -0
- data/web/views/retries.erb +83 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +57 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +212 -0
@@ -0,0 +1,41 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
module Extensions
|
5
|
+
def self.enable_delay!
|
6
|
+
if defined?(::ActiveSupport)
|
7
|
+
require "sidekiq/extensions/active_record"
|
8
|
+
require "sidekiq/extensions/action_mailer"
|
9
|
+
|
10
|
+
# Need to patch Psych so it can autoload classes whose names are serialized
|
11
|
+
# in the delayed YAML.
|
12
|
+
Psych::Visitors::ToRuby.prepend(Sidekiq::Extensions::PsychAutoload)
|
13
|
+
|
14
|
+
ActiveSupport.on_load(:active_record) do
|
15
|
+
include Sidekiq::Extensions::ActiveRecord
|
16
|
+
end
|
17
|
+
ActiveSupport.on_load(:action_mailer) do
|
18
|
+
extend Sidekiq::Extensions::ActionMailer
|
19
|
+
end
|
20
|
+
end
|
21
|
+
|
22
|
+
require "sidekiq/extensions/class_methods"
|
23
|
+
Module.__send__(:include, Sidekiq::Extensions::Klass)
|
24
|
+
end
|
25
|
+
|
26
|
+
module PsychAutoload
|
27
|
+
def resolve_class(klass_name)
|
28
|
+
return nil if !klass_name || klass_name.empty?
|
29
|
+
# constantize
|
30
|
+
names = klass_name.split("::")
|
31
|
+
names.shift if names.empty? || names.first.empty?
|
32
|
+
|
33
|
+
names.inject(Object) do |constant, name|
|
34
|
+
constant.const_defined?(name) ? constant.const_get(name) : constant.const_missing(name)
|
35
|
+
end
|
36
|
+
rescue NameError
|
37
|
+
super
|
38
|
+
end
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
@@ -0,0 +1,27 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module ExceptionHandler
|
7
|
+
class Logger
|
8
|
+
def call(ex, ctx)
|
9
|
+
Sidekiq.logger.warn(Sidekiq.dump_json(ctx)) unless ctx.empty?
|
10
|
+
Sidekiq.logger.warn("#{ex.class.name}: #{ex.message}")
|
11
|
+
Sidekiq.logger.warn(ex.backtrace.join("\n")) unless ex.backtrace.nil?
|
12
|
+
end
|
13
|
+
|
14
|
+
Sidekiq.error_handlers << Sidekiq::ExceptionHandler::Logger.new
|
15
|
+
end
|
16
|
+
|
17
|
+
def handle_exception(ex, ctx = {})
|
18
|
+
Sidekiq.error_handlers.each do |handler|
|
19
|
+
handler.call(ex, ctx)
|
20
|
+
rescue => ex
|
21
|
+
Sidekiq.logger.error "!!! ERROR HANDLER THREW AN ERROR !!!"
|
22
|
+
Sidekiq.logger.error ex
|
23
|
+
Sidekiq.logger.error ex.backtrace.join("\n") unless ex.backtrace.nil?
|
24
|
+
end
|
25
|
+
end
|
26
|
+
end
|
27
|
+
end
|
@@ -0,0 +1,47 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/extensions/generic_proxy"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module Extensions
|
7
|
+
##
|
8
|
+
# Adds 'delay', 'delay_for' and `delay_until` methods to ActionMailer to offload arbitrary email
|
9
|
+
# delivery to Sidekiq. Example:
|
10
|
+
#
|
11
|
+
# UserMailer.delay.send_welcome_email(new_user)
|
12
|
+
# UserMailer.delay_for(5.days).send_welcome_email(new_user)
|
13
|
+
# UserMailer.delay_until(5.days.from_now).send_welcome_email(new_user)
|
14
|
+
class DelayedMailer
|
15
|
+
include Sidekiq::Worker
|
16
|
+
|
17
|
+
def perform(yml)
|
18
|
+
(target, method_name, args) = YAML.load(yml)
|
19
|
+
msg = target.public_send(method_name, *args)
|
20
|
+
# The email method can return nil, which causes ActionMailer to return
|
21
|
+
# an undeliverable empty message.
|
22
|
+
if msg
|
23
|
+
msg.deliver_now
|
24
|
+
else
|
25
|
+
raise "#{target.name}##{method_name} returned an undeliverable mail object"
|
26
|
+
end
|
27
|
+
end
|
28
|
+
end
|
29
|
+
|
30
|
+
module ActionMailer
|
31
|
+
def sidekiq_delay(options = {})
|
32
|
+
Proxy.new(DelayedMailer, self, options)
|
33
|
+
end
|
34
|
+
|
35
|
+
def sidekiq_delay_for(interval, options = {})
|
36
|
+
Proxy.new(DelayedMailer, self, options.merge("at" => Time.now.to_f + interval.to_f))
|
37
|
+
end
|
38
|
+
|
39
|
+
def sidekiq_delay_until(timestamp, options = {})
|
40
|
+
Proxy.new(DelayedMailer, self, options.merge("at" => timestamp.to_f))
|
41
|
+
end
|
42
|
+
alias_method :delay, :sidekiq_delay
|
43
|
+
alias_method :delay_for, :sidekiq_delay_for
|
44
|
+
alias_method :delay_until, :sidekiq_delay_until
|
45
|
+
end
|
46
|
+
end
|
47
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/extensions/generic_proxy"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module Extensions
|
7
|
+
##
|
8
|
+
# Adds 'delay', 'delay_for' and `delay_until` methods to ActiveRecord to offload instance method
|
9
|
+
# execution to Sidekiq.
|
10
|
+
#
|
11
|
+
# @example
|
12
|
+
# User.recent_signups.each { |user| user.delay.mark_as_awesome }
|
13
|
+
#
|
14
|
+
# Please note, this is not recommended as this will serialize the entire
|
15
|
+
# object to Redis. Your Sidekiq jobs should pass IDs, not entire instances.
|
16
|
+
# This is here for backwards compatibility with Delayed::Job only.
|
17
|
+
class DelayedModel
|
18
|
+
include Sidekiq::Worker
|
19
|
+
|
20
|
+
def perform(yml)
|
21
|
+
(target, method_name, args) = YAML.load(yml)
|
22
|
+
target.__send__(method_name, *args)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
|
26
|
+
module ActiveRecord
|
27
|
+
def sidekiq_delay(options = {})
|
28
|
+
Proxy.new(DelayedModel, self, options)
|
29
|
+
end
|
30
|
+
|
31
|
+
def sidekiq_delay_for(interval, options = {})
|
32
|
+
Proxy.new(DelayedModel, self, options.merge("at" => Time.now.to_f + interval.to_f))
|
33
|
+
end
|
34
|
+
|
35
|
+
def sidekiq_delay_until(timestamp, options = {})
|
36
|
+
Proxy.new(DelayedModel, self, options.merge("at" => timestamp.to_f))
|
37
|
+
end
|
38
|
+
alias_method :delay, :sidekiq_delay
|
39
|
+
alias_method :delay_for, :sidekiq_delay_for
|
40
|
+
alias_method :delay_until, :sidekiq_delay_until
|
41
|
+
end
|
42
|
+
end
|
43
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/extensions/generic_proxy"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module Extensions
|
7
|
+
##
|
8
|
+
# Adds `delay`, `delay_for` and `delay_until` methods to all Classes to offload class method
|
9
|
+
# execution to Sidekiq.
|
10
|
+
#
|
11
|
+
# @example
|
12
|
+
# User.delay.delete_inactive
|
13
|
+
# Wikipedia.delay.download_changes_for(Date.today)
|
14
|
+
#
|
15
|
+
class DelayedClass
|
16
|
+
include Sidekiq::Worker
|
17
|
+
|
18
|
+
def perform(yml)
|
19
|
+
(target, method_name, args) = YAML.load(yml)
|
20
|
+
target.__send__(method_name, *args)
|
21
|
+
end
|
22
|
+
end
|
23
|
+
|
24
|
+
module Klass
|
25
|
+
def sidekiq_delay(options = {})
|
26
|
+
Proxy.new(DelayedClass, self, options)
|
27
|
+
end
|
28
|
+
|
29
|
+
def sidekiq_delay_for(interval, options = {})
|
30
|
+
Proxy.new(DelayedClass, self, options.merge("at" => Time.now.to_f + interval.to_f))
|
31
|
+
end
|
32
|
+
|
33
|
+
def sidekiq_delay_until(timestamp, options = {})
|
34
|
+
Proxy.new(DelayedClass, self, options.merge("at" => timestamp.to_f))
|
35
|
+
end
|
36
|
+
alias_method :delay, :sidekiq_delay
|
37
|
+
alias_method :delay_for, :sidekiq_delay_for
|
38
|
+
alias_method :delay_until, :sidekiq_delay_until
|
39
|
+
end
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
Module.__send__(:include, Sidekiq::Extensions::Klass) unless defined?(::Rails)
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "yaml"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
module Extensions
|
7
|
+
SIZE_LIMIT = 8_192
|
8
|
+
|
9
|
+
class Proxy < BasicObject
|
10
|
+
def initialize(performable, target, options = {})
|
11
|
+
@performable = performable
|
12
|
+
@target = target
|
13
|
+
@opts = options
|
14
|
+
end
|
15
|
+
|
16
|
+
def method_missing(name, *args)
|
17
|
+
# Sidekiq has a limitation in that its message must be JSON.
|
18
|
+
# JSON can't round trip real Ruby objects so we use YAML to
|
19
|
+
# serialize the objects to a String. The YAML will be converted
|
20
|
+
# to JSON and then deserialized on the other side back into a
|
21
|
+
# Ruby object.
|
22
|
+
obj = [@target, name, args]
|
23
|
+
marshalled = ::YAML.dump(obj)
|
24
|
+
if marshalled.size > SIZE_LIMIT
|
25
|
+
::Sidekiq.logger.warn { "#{@target}.#{name} job argument is #{marshalled.bytesize} bytes, you should refactor it to reduce the size" }
|
26
|
+
end
|
27
|
+
@performable.client_push({"class" => @performable, "args" => [marshalled]}.merge(@opts))
|
28
|
+
end
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,82 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq"
|
4
|
+
|
5
|
+
module Sidekiq
|
6
|
+
class BasicFetch
|
7
|
+
# We want the fetch operation to timeout every few seconds so the thread
|
8
|
+
# can check if the process is shutting down.
|
9
|
+
TIMEOUT = 2
|
10
|
+
|
11
|
+
UnitOfWork = Struct.new(:queue, :job) {
|
12
|
+
def acknowledge
|
13
|
+
# nothing to do
|
14
|
+
end
|
15
|
+
|
16
|
+
def queue_name
|
17
|
+
queue.delete_prefix("queue:")
|
18
|
+
end
|
19
|
+
|
20
|
+
def requeue
|
21
|
+
Sidekiq.redis do |conn|
|
22
|
+
conn.rpush(queue, job)
|
23
|
+
end
|
24
|
+
end
|
25
|
+
}
|
26
|
+
|
27
|
+
def initialize(options)
|
28
|
+
raise ArgumentError, "missing queue list" unless options[:queues]
|
29
|
+
@options = options
|
30
|
+
@strictly_ordered_queues = !!@options[:strict]
|
31
|
+
@queues = @options[:queues].map { |q| "queue:#{q}" }
|
32
|
+
if @strictly_ordered_queues
|
33
|
+
@queues.uniq!
|
34
|
+
@queues << TIMEOUT
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
def retrieve_work
|
39
|
+
work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd) }
|
40
|
+
UnitOfWork.new(*work) if work
|
41
|
+
end
|
42
|
+
|
43
|
+
# By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
|
44
|
+
# an instance method will make it async to the Fetcher actor
|
45
|
+
def bulk_requeue(inprogress, options)
|
46
|
+
return if inprogress.empty?
|
47
|
+
|
48
|
+
Sidekiq.logger.debug { "Re-queueing terminated jobs" }
|
49
|
+
jobs_to_requeue = {}
|
50
|
+
inprogress.each do |unit_of_work|
|
51
|
+
jobs_to_requeue[unit_of_work.queue] ||= []
|
52
|
+
jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
|
53
|
+
end
|
54
|
+
|
55
|
+
Sidekiq.redis do |conn|
|
56
|
+
conn.pipelined do
|
57
|
+
jobs_to_requeue.each do |queue, jobs|
|
58
|
+
conn.rpush(queue, jobs)
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
|
63
|
+
rescue => ex
|
64
|
+
Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
|
65
|
+
end
|
66
|
+
|
67
|
+
# Creating the Redis#brpop command takes into account any
|
68
|
+
# configured queue weights. By default Redis#brpop returns
|
69
|
+
# data from the first queue that has pending elements. We
|
70
|
+
# recreate the queue command each time we invoke Redis#brpop
|
71
|
+
# to honor weights and avoid queue starvation.
|
72
|
+
def queues_cmd
|
73
|
+
if @strictly_ordered_queues
|
74
|
+
@queues
|
75
|
+
else
|
76
|
+
queues = @queues.shuffle!.uniq
|
77
|
+
queues << TIMEOUT
|
78
|
+
queues
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
end
|
@@ -0,0 +1,63 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
module Sidekiq
|
4
|
+
class JobLogger
|
5
|
+
def initialize(logger = Sidekiq.logger)
|
6
|
+
@logger = logger
|
7
|
+
end
|
8
|
+
|
9
|
+
def call(item, queue)
|
10
|
+
start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
11
|
+
@logger.info("start")
|
12
|
+
|
13
|
+
yield
|
14
|
+
|
15
|
+
with_elapsed_time_context(start) do
|
16
|
+
@logger.info("done")
|
17
|
+
end
|
18
|
+
rescue Exception
|
19
|
+
with_elapsed_time_context(start) do
|
20
|
+
@logger.info("fail")
|
21
|
+
end
|
22
|
+
|
23
|
+
raise
|
24
|
+
end
|
25
|
+
|
26
|
+
def prepare(job_hash, &block)
|
27
|
+
level = job_hash["log_level"]
|
28
|
+
if level
|
29
|
+
@logger.log_at(level) do
|
30
|
+
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
31
|
+
end
|
32
|
+
else
|
33
|
+
Sidekiq::Context.with(job_hash_context(job_hash), &block)
|
34
|
+
end
|
35
|
+
end
|
36
|
+
|
37
|
+
def job_hash_context(job_hash)
|
38
|
+
# If we're using a wrapper class, like ActiveJob, use the "wrapped"
|
39
|
+
# attribute to expose the underlying thing.
|
40
|
+
h = {
|
41
|
+
class: job_hash["wrapped"] || job_hash["class"],
|
42
|
+
jid: job_hash["jid"]
|
43
|
+
}
|
44
|
+
h[:bid] = job_hash["bid"] if job_hash["bid"]
|
45
|
+
h[:tags] = job_hash["tags"] if job_hash["tags"]
|
46
|
+
h
|
47
|
+
end
|
48
|
+
|
49
|
+
def with_elapsed_time_context(start, &block)
|
50
|
+
Sidekiq::Context.with(elapsed_time_context(start), &block)
|
51
|
+
end
|
52
|
+
|
53
|
+
def elapsed_time_context(start)
|
54
|
+
{elapsed: elapsed(start).to_s}
|
55
|
+
end
|
56
|
+
|
57
|
+
private
|
58
|
+
|
59
|
+
def elapsed(start)
|
60
|
+
(::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
@@ -0,0 +1,262 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/scheduled"
|
4
|
+
require "sidekiq/api"
|
5
|
+
|
6
|
+
require "zlib"
|
7
|
+
require "base64"
|
8
|
+
|
9
|
+
module Sidekiq
|
10
|
+
##
|
11
|
+
# Automatically retry jobs that fail in Sidekiq.
|
12
|
+
# Sidekiq's retry support assumes a typical development lifecycle:
|
13
|
+
#
|
14
|
+
# 0. Push some code changes with a bug in it.
|
15
|
+
# 1. Bug causes job processing to fail, Sidekiq's middleware captures
|
16
|
+
# the job and pushes it onto a retry queue.
|
17
|
+
# 2. Sidekiq retries jobs in the retry queue multiple times with
|
18
|
+
# an exponential delay, the job continues to fail.
|
19
|
+
# 3. After a few days, a developer deploys a fix. The job is
|
20
|
+
# reprocessed successfully.
|
21
|
+
# 4. Once retries are exhausted, Sidekiq will give up and move the
|
22
|
+
# job to the Dead Job Queue (aka morgue) where it must be dealt with
|
23
|
+
# manually in the Web UI.
|
24
|
+
# 5. After 6 months on the DJQ, Sidekiq will discard the job.
|
25
|
+
#
|
26
|
+
# A job looks like:
|
27
|
+
#
|
28
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
|
29
|
+
#
|
30
|
+
# The 'retry' option also accepts a number (in place of 'true'):
|
31
|
+
#
|
32
|
+
# { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
|
33
|
+
#
|
34
|
+
# The job will be retried this number of times before giving up. (If simply
|
35
|
+
# 'true', Sidekiq retries 25 times)
|
36
|
+
#
|
37
|
+
# We'll add a bit more data to the job to support retries:
|
38
|
+
#
|
39
|
+
# * 'queue' - the queue to use
|
40
|
+
# * 'retry_count' - number of times we've retried so far.
|
41
|
+
# * 'error_message' - the message from the exception
|
42
|
+
# * 'error_class' - the exception class
|
43
|
+
# * 'failed_at' - the first time it failed
|
44
|
+
# * 'retried_at' - the last time it was retried
|
45
|
+
# * 'backtrace' - the number of lines of error backtrace to store
|
46
|
+
#
|
47
|
+
# We don't store the backtrace by default as that can add a lot of overhead
|
48
|
+
# to the job and everyone is using an error service, right?
|
49
|
+
#
|
50
|
+
# The default number of retries is 25 which works out to about 3 weeks
|
51
|
+
# You can change the default maximum number of retries in your initializer:
|
52
|
+
#
|
53
|
+
# Sidekiq.options[:max_retries] = 7
|
54
|
+
#
|
55
|
+
# or limit the number of retries for a particular worker with:
|
56
|
+
#
|
57
|
+
# class MyWorker
|
58
|
+
# include Sidekiq::Worker
|
59
|
+
# sidekiq_options :retry => 10
|
60
|
+
# end
|
61
|
+
#
|
62
|
+
class JobRetry
|
63
|
+
class Handled < ::RuntimeError; end
|
64
|
+
class Skip < Handled; end
|
65
|
+
|
66
|
+
include Sidekiq::Util
|
67
|
+
|
68
|
+
DEFAULT_MAX_RETRY_ATTEMPTS = 25
|
69
|
+
|
70
|
+
def initialize(options = {})
|
71
|
+
@max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
|
72
|
+
end
|
73
|
+
|
74
|
+
# The global retry handler requires only the barest of data.
|
75
|
+
# We want to be able to retry as much as possible so we don't
|
76
|
+
# require the worker to be instantiated.
|
77
|
+
def global(jobstr, queue)
|
78
|
+
yield
|
79
|
+
rescue Handled => ex
|
80
|
+
raise ex
|
81
|
+
rescue Sidekiq::Shutdown => ey
|
82
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
83
|
+
raise ey
|
84
|
+
rescue Exception => e
|
85
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
86
|
+
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
87
|
+
|
88
|
+
msg = Sidekiq.load_json(jobstr)
|
89
|
+
if msg["retry"]
|
90
|
+
attempt_retry(nil, msg, queue, e)
|
91
|
+
else
|
92
|
+
Sidekiq.death_handlers.each do |handler|
|
93
|
+
handler.call(msg, e)
|
94
|
+
rescue => handler_ex
|
95
|
+
handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
|
96
|
+
end
|
97
|
+
end
|
98
|
+
|
99
|
+
raise Handled
|
100
|
+
end
|
101
|
+
|
102
|
+
# The local retry support means that any errors that occur within
|
103
|
+
# this block can be associated with the given worker instance.
|
104
|
+
# This is required to support the `sidekiq_retries_exhausted` block.
|
105
|
+
#
|
106
|
+
# Note that any exception from the block is wrapped in the Skip
|
107
|
+
# exception so the global block does not reprocess the error. The
|
108
|
+
# Skip exception is unwrapped within Sidekiq::Processor#process before
|
109
|
+
# calling the handle_exception handlers.
|
110
|
+
def local(worker, jobstr, queue)
|
111
|
+
yield
|
112
|
+
rescue Handled => ex
|
113
|
+
raise ex
|
114
|
+
rescue Sidekiq::Shutdown => ey
|
115
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
116
|
+
raise ey
|
117
|
+
rescue Exception => e
|
118
|
+
# ignore, will be pushed back onto queue during hard_shutdown
|
119
|
+
raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
|
120
|
+
|
121
|
+
msg = Sidekiq.load_json(jobstr)
|
122
|
+
if msg["retry"].nil?
|
123
|
+
msg["retry"] = worker.class.get_sidekiq_options["retry"]
|
124
|
+
end
|
125
|
+
|
126
|
+
raise e unless msg["retry"]
|
127
|
+
attempt_retry(worker, msg, queue, e)
|
128
|
+
# We've handled this error associated with this job, don't
|
129
|
+
# need to handle it at the global level
|
130
|
+
raise Skip
|
131
|
+
end
|
132
|
+
|
133
|
+
private
|
134
|
+
|
135
|
+
# Note that +worker+ can be nil here if an error is raised before we can
|
136
|
+
# instantiate the worker instance. All access must be guarded and
|
137
|
+
# best effort.
|
138
|
+
def attempt_retry(worker, msg, queue, exception)
|
139
|
+
max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
|
140
|
+
|
141
|
+
msg["queue"] = (msg["retry_queue"] || queue)
|
142
|
+
|
143
|
+
m = exception_message(exception)
|
144
|
+
if m.respond_to?(:scrub!)
|
145
|
+
m.force_encoding("utf-8")
|
146
|
+
m.scrub!
|
147
|
+
end
|
148
|
+
|
149
|
+
msg["error_message"] = m
|
150
|
+
msg["error_class"] = exception.class.name
|
151
|
+
count = if msg["retry_count"]
|
152
|
+
msg["retried_at"] = Time.now.to_f
|
153
|
+
msg["retry_count"] += 1
|
154
|
+
else
|
155
|
+
msg["failed_at"] = Time.now.to_f
|
156
|
+
msg["retry_count"] = 0
|
157
|
+
end
|
158
|
+
|
159
|
+
if msg["backtrace"]
|
160
|
+
lines = if msg["backtrace"] == true
|
161
|
+
exception.backtrace
|
162
|
+
else
|
163
|
+
exception.backtrace[0...msg["backtrace"].to_i]
|
164
|
+
end
|
165
|
+
|
166
|
+
msg["error_backtrace"] = compress_backtrace(lines)
|
167
|
+
end
|
168
|
+
|
169
|
+
if count < max_retry_attempts
|
170
|
+
delay = delay_for(worker, count, exception)
|
171
|
+
# Logging here can break retries if the logging device raises ENOSPC #3979
|
172
|
+
# logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
|
173
|
+
retry_at = Time.now.to_f + delay
|
174
|
+
payload = Sidekiq.dump_json(msg)
|
175
|
+
Sidekiq.redis do |conn|
|
176
|
+
conn.zadd("retry", retry_at.to_s, payload)
|
177
|
+
end
|
178
|
+
else
|
179
|
+
# Goodbye dear message, you (re)tried your best I'm sure.
|
180
|
+
retries_exhausted(worker, msg, exception)
|
181
|
+
end
|
182
|
+
end
|
183
|
+
|
184
|
+
def retries_exhausted(worker, msg, exception)
|
185
|
+
begin
|
186
|
+
block = worker&.sidekiq_retries_exhausted_block
|
187
|
+
block&.call(msg, exception)
|
188
|
+
rescue => e
|
189
|
+
handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
|
190
|
+
end
|
191
|
+
|
192
|
+
send_to_morgue(msg) unless msg["dead"] == false
|
193
|
+
|
194
|
+
Sidekiq.death_handlers.each do |handler|
|
195
|
+
handler.call(msg, exception)
|
196
|
+
rescue => e
|
197
|
+
handle_exception(e, {context: "Error calling death handler", job: msg})
|
198
|
+
end
|
199
|
+
end
|
200
|
+
|
201
|
+
def send_to_morgue(msg)
|
202
|
+
logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
|
203
|
+
payload = Sidekiq.dump_json(msg)
|
204
|
+
DeadSet.new.kill(payload, notify_failure: false)
|
205
|
+
end
|
206
|
+
|
207
|
+
def retry_attempts_from(msg_retry, default)
|
208
|
+
if msg_retry.is_a?(Integer)
|
209
|
+
msg_retry
|
210
|
+
else
|
211
|
+
default
|
212
|
+
end
|
213
|
+
end
|
214
|
+
|
215
|
+
def delay_for(worker, count, exception)
|
216
|
+
if worker&.sidekiq_retry_in_block
|
217
|
+
custom_retry_in = retry_in(worker, count, exception).to_i
|
218
|
+
return custom_retry_in if custom_retry_in > 0
|
219
|
+
end
|
220
|
+
seconds_to_delay(count)
|
221
|
+
end
|
222
|
+
|
223
|
+
# delayed_job uses the same basic formula
|
224
|
+
def seconds_to_delay(count)
|
225
|
+
(count**4) + 15 + (rand(30) * (count + 1))
|
226
|
+
end
|
227
|
+
|
228
|
+
def retry_in(worker, count, exception)
|
229
|
+
worker.sidekiq_retry_in_block.call(count, exception)
|
230
|
+
rescue Exception => e
|
231
|
+
handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
|
232
|
+
nil
|
233
|
+
end
|
234
|
+
|
235
|
+
def exception_caused_by_shutdown?(e, checked_causes = [])
|
236
|
+
return false unless e.cause
|
237
|
+
|
238
|
+
# Handle circular causes
|
239
|
+
checked_causes << e.object_id
|
240
|
+
return false if checked_causes.include?(e.cause.object_id)
|
241
|
+
|
242
|
+
e.cause.instance_of?(Sidekiq::Shutdown) ||
|
243
|
+
exception_caused_by_shutdown?(e.cause, checked_causes)
|
244
|
+
end
|
245
|
+
|
246
|
+
# Extract message from exception.
|
247
|
+
# Set a default if the message raises an error
|
248
|
+
def exception_message(exception)
|
249
|
+
# App code can stuff all sorts of crazy binary data into the error message
|
250
|
+
# that won't convert to JSON.
|
251
|
+
exception.message.to_s[0, 10_000]
|
252
|
+
rescue
|
253
|
+
+"!!! ERROR MESSAGE THREW AN ERROR !!!"
|
254
|
+
end
|
255
|
+
|
256
|
+
def compress_backtrace(backtrace)
|
257
|
+
serialized = Sidekiq.dump_json(backtrace)
|
258
|
+
compressed = Zlib::Deflate.deflate(serialized)
|
259
|
+
Base64.encode64(compressed)
|
260
|
+
end
|
261
|
+
end
|
262
|
+
end
|