sidekiq 6.0.0 → 6.5.7

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (124) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +383 -2
  3. data/LICENSE +3 -3
  4. data/README.md +13 -10
  5. data/bin/sidekiq +27 -3
  6. data/bin/sidekiqload +74 -66
  7. data/bin/sidekiqmon +5 -6
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +446 -221
  13. data/lib/sidekiq/cli.rb +112 -63
  14. data/lib/sidekiq/client.rb +57 -60
  15. data/lib/sidekiq/{util.rb → component.rb} +12 -16
  16. data/lib/sidekiq/delay.rb +3 -1
  17. data/lib/sidekiq/extensions/action_mailer.rb +3 -2
  18. data/lib/sidekiq/extensions/active_record.rb +4 -3
  19. data/lib/sidekiq/extensions/class_methods.rb +5 -4
  20. data/lib/sidekiq/extensions/generic_proxy.rb +4 -2
  21. data/lib/sidekiq/fetch.rb +48 -37
  22. data/lib/sidekiq/job.rb +13 -0
  23. data/lib/sidekiq/job_logger.rb +19 -23
  24. data/lib/sidekiq/job_retry.rb +100 -67
  25. data/lib/sidekiq/job_util.rb +71 -0
  26. data/lib/sidekiq/launcher.rb +145 -59
  27. data/lib/sidekiq/logger.rb +99 -12
  28. data/lib/sidekiq/manager.rb +35 -34
  29. data/lib/sidekiq/metrics/deploy.rb +47 -0
  30. data/lib/sidekiq/metrics/query.rb +153 -0
  31. data/lib/sidekiq/metrics/shared.rb +94 -0
  32. data/lib/sidekiq/metrics/tracking.rb +134 -0
  33. data/lib/sidekiq/middleware/chain.rb +99 -44
  34. data/lib/sidekiq/middleware/current_attributes.rb +63 -0
  35. data/lib/sidekiq/middleware/i18n.rb +6 -4
  36. data/lib/sidekiq/middleware/modules.rb +21 -0
  37. data/lib/sidekiq/monitor.rb +4 -19
  38. data/lib/sidekiq/paginator.rb +13 -8
  39. data/lib/sidekiq/processor.rb +64 -60
  40. data/lib/sidekiq/rails.rb +38 -22
  41. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  42. data/lib/sidekiq/redis_connection.rb +91 -54
  43. data/lib/sidekiq/ring_buffer.rb +29 -0
  44. data/lib/sidekiq/scheduled.rb +93 -28
  45. data/lib/sidekiq/sd_notify.rb +149 -0
  46. data/lib/sidekiq/systemd.rb +24 -0
  47. data/lib/sidekiq/testing/inline.rb +4 -4
  48. data/lib/sidekiq/testing.rb +51 -40
  49. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  50. data/lib/sidekiq/version.rb +1 -1
  51. data/lib/sidekiq/web/action.rb +3 -3
  52. data/lib/sidekiq/web/application.rb +57 -34
  53. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  54. data/lib/sidekiq/web/helpers.rb +77 -36
  55. data/lib/sidekiq/web/router.rb +6 -5
  56. data/lib/sidekiq/web.rb +41 -73
  57. data/lib/sidekiq/worker.rb +144 -21
  58. data/lib/sidekiq.rb +129 -32
  59. data/sidekiq.gemspec +14 -7
  60. data/web/assets/images/apple-touch-icon.png +0 -0
  61. data/web/assets/javascripts/application.js +112 -61
  62. data/web/assets/javascripts/chart.min.js +13 -0
  63. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  64. data/web/assets/javascripts/dashboard.js +52 -69
  65. data/web/assets/javascripts/graph.js +16 -0
  66. data/web/assets/javascripts/metrics.js +262 -0
  67. data/web/assets/stylesheets/application-dark.css +143 -0
  68. data/web/assets/stylesheets/application-rtl.css +0 -4
  69. data/web/assets/stylesheets/application.css +88 -233
  70. data/web/locales/ar.yml +8 -2
  71. data/web/locales/de.yml +14 -2
  72. data/web/locales/el.yml +43 -19
  73. data/web/locales/en.yml +13 -1
  74. data/web/locales/es.yml +18 -2
  75. data/web/locales/fr.yml +10 -3
  76. data/web/locales/ja.yml +12 -0
  77. data/web/locales/lt.yml +83 -0
  78. data/web/locales/pl.yml +4 -4
  79. data/web/locales/pt-br.yml +27 -9
  80. data/web/locales/ru.yml +4 -0
  81. data/web/locales/vi.yml +83 -0
  82. data/web/locales/zh-cn.yml +36 -11
  83. data/web/locales/zh-tw.yml +32 -7
  84. data/web/views/_footer.erb +1 -1
  85. data/web/views/_job_info.erb +3 -2
  86. data/web/views/_nav.erb +1 -1
  87. data/web/views/_poll_link.erb +2 -5
  88. data/web/views/_summary.erb +7 -7
  89. data/web/views/busy.erb +56 -22
  90. data/web/views/dashboard.erb +23 -14
  91. data/web/views/dead.erb +3 -3
  92. data/web/views/layout.erb +3 -1
  93. data/web/views/metrics.erb +69 -0
  94. data/web/views/metrics_for_job.erb +87 -0
  95. data/web/views/morgue.erb +9 -6
  96. data/web/views/queue.erb +23 -10
  97. data/web/views/queues.erb +10 -2
  98. data/web/views/retries.erb +11 -8
  99. data/web/views/retry.erb +3 -3
  100. data/web/views/scheduled.erb +5 -2
  101. metadata +57 -58
  102. data/.circleci/config.yml +0 -61
  103. data/.github/contributing.md +0 -32
  104. data/.github/issue_template.md +0 -11
  105. data/.gitignore +0 -13
  106. data/.standard.yml +0 -20
  107. data/3.0-Upgrade.md +0 -70
  108. data/4.0-Upgrade.md +0 -53
  109. data/5.0-Upgrade.md +0 -56
  110. data/6.0-Upgrade.md +0 -70
  111. data/COMM-LICENSE +0 -97
  112. data/Ent-2.0-Upgrade.md +0 -37
  113. data/Ent-Changes.md +0 -250
  114. data/Gemfile +0 -24
  115. data/Gemfile.lock +0 -196
  116. data/Pro-2.0-Upgrade.md +0 -138
  117. data/Pro-3.0-Upgrade.md +0 -44
  118. data/Pro-4.0-Upgrade.md +0 -35
  119. data/Pro-5.0-Upgrade.md +0 -25
  120. data/Pro-Changes.md +0 -768
  121. data/Rakefile +0 -10
  122. data/code_of_conduct.md +0 -50
  123. data/lib/generators/sidekiq/worker_generator.rb +0 -47
  124. data/lib/sidekiq/exception_handler.rb +0 -27
@@ -5,10 +5,11 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to ActiveRecord to offload instance method
9
- # execution to Sidekiq. Examples:
8
+ # Adds +delay+, +delay_for+ and +delay_until+ methods to ActiveRecord to offload instance method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.recent_signups.each { |user| user.delay.mark_as_awesome }
11
+ # @example
12
+ # User.recent_signups.each { |user| user.delay.mark_as_awesome }
12
13
  #
13
14
  # Please note, this is not recommended as this will serialize the entire
14
15
  # object to Redis. Your Sidekiq jobs should pass IDs, not entire instances.
@@ -5,11 +5,12 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to all Classes to offload class method
9
- # execution to Sidekiq. Examples:
8
+ # Adds `delay`, `delay_for` and `delay_until` methods to all Classes to offload class method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.delay.delete_inactive
12
- # Wikipedia.delay.download_changes_for(Date.today)
11
+ # @example
12
+ # User.delay.delete_inactive
13
+ # Wikipedia.delay.download_changes_for(Date.today)
13
14
  #
14
15
  class DelayedClass
15
16
  include Sidekiq::Worker
@@ -10,7 +10,7 @@ module Sidekiq
10
10
  def initialize(performable, target, options = {})
11
11
  @performable = performable
12
12
  @target = target
13
- @opts = options
13
+ @opts = options.transform_keys(&:to_s)
14
14
  end
15
15
 
16
16
  def method_missing(name, *args)
@@ -24,7 +24,9 @@ module Sidekiq
24
24
  if marshalled.size > SIZE_LIMIT
25
25
  ::Sidekiq.logger.warn { "#{@target}.#{name} job argument is #{marshalled.bytesize} bytes, you should refactor it to reduce the size" }
26
26
  end
27
- @performable.client_push({"class" => @performable, "args" => [marshalled]}.merge(@opts))
27
+ @performable.client_push({"class" => @performable,
28
+ "args" => [marshalled],
29
+ "display_class" => "#{@target}.#{name}"}.merge(@opts))
28
30
  end
29
31
  end
30
32
  end
data/lib/sidekiq/fetch.rb CHANGED
@@ -1,80 +1,91 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq"
4
+ require "sidekiq/component"
4
5
 
5
- module Sidekiq
6
+ module Sidekiq # :nodoc:
6
7
  class BasicFetch
8
+ include Sidekiq::Component
7
9
  # We want the fetch operation to timeout every few seconds so the thread
8
10
  # can check if the process is shutting down.
9
11
  TIMEOUT = 2
10
12
 
11
- UnitOfWork = Struct.new(:queue, :job) {
13
+ UnitOfWork = Struct.new(:queue, :job, :config) {
12
14
  def acknowledge
13
15
  # nothing to do
14
16
  end
15
17
 
16
18
  def queue_name
17
- queue.sub(/.*queue:/, "")
19
+ queue.delete_prefix("queue:")
18
20
  end
19
21
 
20
22
  def requeue
21
- Sidekiq.redis do |conn|
22
- conn.rpush("queue:#{queue_name}", job)
23
+ config.redis do |conn|
24
+ conn.rpush(queue, job)
23
25
  end
24
26
  end
25
27
  }
26
28
 
27
- def initialize(options)
28
- @strictly_ordered_queues = !!options[:strict]
29
- @queues = options[:queues].map { |q| "queue:#{q}" }
29
+ def initialize(config)
30
+ raise ArgumentError, "missing queue list" unless config[:queues]
31
+ @config = config
32
+ @strictly_ordered_queues = !!@config[:strict]
33
+ @queues = @config[:queues].map { |q| "queue:#{q}" }
30
34
  if @strictly_ordered_queues
31
- @queues = @queues.uniq
32
- @queues << TIMEOUT
35
+ @queues.uniq!
36
+ @queues << {timeout: TIMEOUT}
33
37
  end
34
38
  end
35
39
 
36
40
  def retrieve_work
37
- work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd) }
38
- UnitOfWork.new(*work) if work
39
- end
40
-
41
- # Creating the Redis#brpop command takes into account any
42
- # configured queue weights. By default Redis#brpop returns
43
- # data from the first queue that has pending elements. We
44
- # recreate the queue command each time we invoke Redis#brpop
45
- # to honor weights and avoid queue starvation.
46
- def queues_cmd
47
- if @strictly_ordered_queues
48
- @queues
49
- else
50
- queues = @queues.shuffle.uniq
51
- queues << TIMEOUT
52
- queues
41
+ qs = queues_cmd
42
+ # 4825 Sidekiq Pro with all queues paused will return an
43
+ # empty set of queues with a trailing TIMEOUT value.
44
+ if qs.size <= 1
45
+ sleep(TIMEOUT)
46
+ return nil
53
47
  end
48
+
49
+ queue, job = redis { |conn| conn.brpop(*qs) }
50
+ UnitOfWork.new(queue, job, config) if queue
54
51
  end
55
52
 
56
- # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
- # an instance method will make it async to the Fetcher actor
58
- def self.bulk_requeue(inprogress, options)
53
+ def bulk_requeue(inprogress, options)
59
54
  return if inprogress.empty?
60
55
 
61
- Sidekiq.logger.debug { "Re-queueing terminated jobs" }
56
+ logger.debug { "Re-queueing terminated jobs" }
62
57
  jobs_to_requeue = {}
63
58
  inprogress.each do |unit_of_work|
64
- jobs_to_requeue[unit_of_work.queue_name] ||= []
65
- jobs_to_requeue[unit_of_work.queue_name] << unit_of_work.job
59
+ jobs_to_requeue[unit_of_work.queue] ||= []
60
+ jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
66
61
  end
67
62
 
68
- Sidekiq.redis do |conn|
69
- conn.pipelined do
63
+ redis do |conn|
64
+ conn.pipelined do |pipeline|
70
65
  jobs_to_requeue.each do |queue, jobs|
71
- conn.rpush("queue:#{queue}", jobs)
66
+ pipeline.rpush(queue, jobs)
72
67
  end
73
68
  end
74
69
  end
75
- Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
70
+ logger.info("Pushed #{inprogress.size} jobs back to Redis")
76
71
  rescue => ex
77
- Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
72
+ logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
73
+ end
74
+
75
+ # Creating the Redis#brpop command takes into account any
76
+ # configured queue weights. By default Redis#brpop returns
77
+ # data from the first queue that has pending elements. We
78
+ # recreate the queue command each time we invoke Redis#brpop
79
+ # to honor weights and avoid queue starvation.
80
+ def queues_cmd
81
+ if @strictly_ordered_queues
82
+ @queues
83
+ else
84
+ permute = @queues.shuffle
85
+ permute.uniq!
86
+ permute << {timeout: TIMEOUT}
87
+ permute
88
+ end
78
89
  end
79
90
  end
80
91
  end
@@ -0,0 +1,13 @@
1
+ require "sidekiq/worker"
2
+
3
+ module Sidekiq
4
+ # Sidekiq::Job is a new alias for Sidekiq::Worker as of Sidekiq 6.3.0.
5
+ # Use `include Sidekiq::Job` rather than `include Sidekiq::Worker`.
6
+ #
7
+ # The term "worker" is too generic and overly confusing, used in several
8
+ # different contexts meaning different things. Many people call a Sidekiq
9
+ # process a "worker". Some people call the thread that executes jobs a
10
+ # "worker". This change brings Sidekiq closer to ActiveJob where your job
11
+ # classes extend ApplicationJob.
12
+ Job = Worker
13
+ end
@@ -12,38 +12,34 @@ module Sidekiq
12
12
 
13
13
  yield
14
14
 
15
- with_elapsed_time_context(start) do
16
- @logger.info("done")
17
- end
15
+ Sidekiq::Context.add(:elapsed, elapsed(start))
16
+ @logger.info("done")
18
17
  rescue Exception
19
- with_elapsed_time_context(start) do
20
- @logger.info("fail")
21
- end
18
+ Sidekiq::Context.add(:elapsed, elapsed(start))
19
+ @logger.info("fail")
22
20
 
23
21
  raise
24
22
  end
25
23
 
26
- def with_job_hash_context(job_hash, &block)
27
- @logger.with_context(job_hash_context(job_hash), &block)
28
- end
29
-
30
- def job_hash_context(job_hash)
24
+ def prepare(job_hash, &block)
31
25
  # If we're using a wrapper class, like ActiveJob, use the "wrapped"
32
26
  # attribute to expose the underlying thing.
33
27
  h = {
34
- class: job_hash["wrapped"] || job_hash["class"],
35
- jid: job_hash["jid"],
28
+ class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
29
+ jid: job_hash["jid"]
36
30
  }
37
- h[:bid] = job_hash["bid"] if job_hash["bid"]
38
- h
39
- end
40
-
41
- def with_elapsed_time_context(start, &block)
42
- @logger.with_context(elapsed_time_context(start), &block)
43
- end
44
-
45
- def elapsed_time_context(start)
46
- {elapsed: elapsed(start).to_s}
31
+ h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
32
+ h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
33
+
34
+ Thread.current[:sidekiq_context] = h
35
+ level = job_hash["log_level"]
36
+ if level
37
+ @logger.log_at(level, &block)
38
+ else
39
+ yield
40
+ end
41
+ ensure
42
+ Thread.current[:sidekiq_context] = nil
47
43
  end
48
44
 
49
45
  private
@@ -1,7 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "sidekiq/scheduled"
4
- require "sidekiq/api"
3
+ require "zlib"
4
+ require "base64"
5
+ require "sidekiq/component"
5
6
 
6
7
  module Sidekiq
7
8
  ##
@@ -22,18 +23,19 @@ module Sidekiq
22
23
  #
23
24
  # A job looks like:
24
25
  #
25
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
26
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
26
27
  #
27
28
  # The 'retry' option also accepts a number (in place of 'true'):
28
29
  #
29
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
30
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
30
31
  #
31
32
  # The job will be retried this number of times before giving up. (If simply
32
33
  # 'true', Sidekiq retries 25 times)
33
34
  #
34
- # We'll add a bit more data to the job to support retries:
35
+ # Relevant options for job retries:
35
36
  #
36
- # * 'queue' - the queue to use
37
+ # * 'queue' - the queue for the initial job
38
+ # * 'retry_queue' - if job retries should be pushed to a different (e.g. lower priority) queue
37
39
  # * 'retry_count' - number of times we've retried so far.
38
40
  # * 'error_message' - the message from the exception
39
41
  # * 'error_class' - the exception class
@@ -49,29 +51,32 @@ module Sidekiq
49
51
  #
50
52
  # Sidekiq.options[:max_retries] = 7
51
53
  #
52
- # or limit the number of retries for a particular worker with:
54
+ # or limit the number of retries for a particular job and send retries to
55
+ # a low priority queue with:
53
56
  #
54
- # class MyWorker
55
- # include Sidekiq::Worker
56
- # sidekiq_options :retry => 10
57
+ # class MyJob
58
+ # include Sidekiq::Job
59
+ # sidekiq_options retry: 10, retry_queue: 'low'
57
60
  # end
58
61
  #
59
62
  class JobRetry
60
63
  class Handled < ::RuntimeError; end
64
+
61
65
  class Skip < Handled; end
62
66
 
63
- include Sidekiq::Util
67
+ include Sidekiq::Component
64
68
 
65
69
  DEFAULT_MAX_RETRY_ATTEMPTS = 25
66
70
 
67
- def initialize(options = {})
68
- @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
71
+ def initialize(options)
72
+ @config = options
73
+ @max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
69
74
  end
70
75
 
71
76
  # The global retry handler requires only the barest of data.
72
77
  # We want to be able to retry as much as possible so we don't
73
- # require the worker to be instantiated.
74
- def global(msg, queue)
78
+ # require the job to be instantiated.
79
+ def global(jobstr, queue)
75
80
  yield
76
81
  rescue Handled => ex
77
82
  raise ex
@@ -82,8 +87,9 @@ module Sidekiq
82
87
  # ignore, will be pushed back onto queue during hard_shutdown
83
88
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
84
89
 
90
+ msg = Sidekiq.load_json(jobstr)
85
91
  if msg["retry"]
86
- attempt_retry(nil, msg, queue, e)
92
+ process_retry(nil, msg, queue, e)
87
93
  else
88
94
  Sidekiq.death_handlers.each do |handler|
89
95
  handler.call(msg, e)
@@ -96,14 +102,14 @@ module Sidekiq
96
102
  end
97
103
 
98
104
  # The local retry support means that any errors that occur within
99
- # this block can be associated with the given worker instance.
105
+ # this block can be associated with the given job instance.
100
106
  # This is required to support the `sidekiq_retries_exhausted` block.
101
107
  #
102
108
  # Note that any exception from the block is wrapped in the Skip
103
109
  # exception so the global block does not reprocess the error. The
104
110
  # Skip exception is unwrapped within Sidekiq::Processor#process before
105
111
  # calling the handle_exception handlers.
106
- def local(worker, msg, queue)
112
+ def local(jobinst, jobstr, queue)
107
113
  yield
108
114
  rescue Handled => ex
109
115
  raise ex
@@ -114,12 +120,13 @@ module Sidekiq
114
120
  # ignore, will be pushed back onto queue during hard_shutdown
115
121
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
116
122
 
123
+ msg = Sidekiq.load_json(jobstr)
117
124
  if msg["retry"].nil?
118
- msg["retry"] = worker.class.get_sidekiq_options["retry"]
125
+ msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
119
126
  end
120
127
 
121
128
  raise e unless msg["retry"]
122
- attempt_retry(worker, msg, queue, e)
129
+ process_retry(jobinst, msg, queue, e)
123
130
  # We've handled this error associated with this job, don't
124
131
  # need to handle it at the global level
125
132
  raise Skip
@@ -127,10 +134,10 @@ module Sidekiq
127
134
 
128
135
  private
129
136
 
130
- # Note that +worker+ can be nil here if an error is raised before we can
131
- # instantiate the worker instance. All access must be guarded and
137
+ # Note that +jobinst+ can be nil here if an error is raised before we can
138
+ # instantiate the job instance. All access must be guarded and
132
139
  # best effort.
133
- def attempt_retry(worker, msg, queue, exception)
140
+ def process_retry(jobinst, msg, queue, exception)
134
141
  max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
135
142
 
136
143
  msg["queue"] = (msg["retry_queue"] || queue)
@@ -151,50 +158,90 @@ module Sidekiq
151
158
  msg["retry_count"] = 0
152
159
  end
153
160
 
154
- if msg["backtrace"] == true
155
- msg["error_backtrace"] = exception.backtrace
156
- elsif !msg["backtrace"]
157
- # do nothing
158
- elsif msg["backtrace"].to_i != 0
159
- msg["error_backtrace"] = exception.backtrace[0...msg["backtrace"].to_i]
161
+ if msg["backtrace"]
162
+ lines = if msg["backtrace"] == true
163
+ exception.backtrace
164
+ else
165
+ exception.backtrace[0...msg["backtrace"].to_i]
166
+ end
167
+
168
+ msg["error_backtrace"] = compress_backtrace(lines)
160
169
  end
161
170
 
162
- if count < max_retry_attempts
163
- delay = delay_for(worker, count, exception)
164
- # Logging here can break retries if the logging device raises ENOSPC #3979
165
- # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
166
- retry_at = Time.now.to_f + delay
167
- payload = Sidekiq.dump_json(msg)
168
- Sidekiq.redis do |conn|
169
- conn.zadd("retry", retry_at.to_s, payload)
170
- end
171
- else
172
- # Goodbye dear message, you (re)tried your best I'm sure.
173
- retries_exhausted(worker, msg, exception)
171
+ # Goodbye dear message, you (re)tried your best I'm sure.
172
+ return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
173
+
174
+ strategy, delay = delay_for(jobinst, count, exception)
175
+ case strategy
176
+ when :discard
177
+ return # poof!
178
+ when :kill
179
+ return retries_exhausted(jobinst, msg, exception)
180
+ end
181
+
182
+ # Logging here can break retries if the logging device raises ENOSPC #3979
183
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
184
+ jitter = rand(10) * (count + 1)
185
+ retry_at = Time.now.to_f + delay + jitter
186
+ payload = Sidekiq.dump_json(msg)
187
+ redis do |conn|
188
+ conn.zadd("retry", retry_at.to_s, payload)
189
+ end
190
+ end
191
+
192
+ # returns (strategy, seconds)
193
+ def delay_for(jobinst, count, exception)
194
+ rv = begin
195
+ # sidekiq_retry_in can return two different things:
196
+ # 1. When to retry next, as an integer of seconds
197
+ # 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
198
+ jobinst&.sidekiq_retry_in_block&.call(count, exception)
199
+ rescue Exception => e
200
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
201
+ nil
174
202
  end
203
+
204
+ delay = (count**4) + 15
205
+ if Integer === rv && rv > 0
206
+ delay = rv
207
+ elsif rv == :discard
208
+ return [:discard, nil] # do nothing, job goes poof
209
+ elsif rv == :kill
210
+ return [:kill, nil]
211
+ end
212
+
213
+ [:default, delay]
175
214
  end
176
215
 
177
- def retries_exhausted(worker, msg, exception)
216
+ def retries_exhausted(jobinst, msg, exception)
178
217
  begin
179
- block = worker&.sidekiq_retries_exhausted_block
218
+ block = jobinst&.sidekiq_retries_exhausted_block
180
219
  block&.call(msg, exception)
181
220
  rescue => e
182
221
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
183
222
  end
184
223
 
185
- Sidekiq.death_handlers.each do |handler|
224
+ send_to_morgue(msg) unless msg["dead"] == false
225
+
226
+ config.death_handlers.each do |handler|
186
227
  handler.call(msg, exception)
187
228
  rescue => e
188
229
  handle_exception(e, {context: "Error calling death handler", job: msg})
189
230
  end
190
-
191
- send_to_morgue(msg) unless msg["dead"] == false
192
231
  end
193
232
 
194
233
  def send_to_morgue(msg)
195
234
  logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
196
235
  payload = Sidekiq.dump_json(msg)
197
- DeadSet.new.kill(payload, notify_failure: false)
236
+ now = Time.now.to_f
237
+
238
+ config.redis do |conn|
239
+ conn.multi do |xa|
240
+ xa.zadd("dead", now.to_s, payload)
241
+ xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
242
+ xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
243
+ end
244
+ end
198
245
  end
199
246
 
200
247
  def retry_attempts_from(msg_retry, default)
@@ -205,26 +252,6 @@ module Sidekiq
205
252
  end
206
253
  end
207
254
 
208
- def delay_for(worker, count, exception)
209
- if worker&.sidekiq_retry_in_block
210
- custom_retry_in = retry_in(worker, count, exception).to_i
211
- return custom_retry_in if custom_retry_in > 0
212
- end
213
- seconds_to_delay(count)
214
- end
215
-
216
- # delayed_job uses the same basic formula
217
- def seconds_to_delay(count)
218
- (count**4) + 15 + (rand(30) * (count + 1))
219
- end
220
-
221
- def retry_in(worker, count, exception)
222
- worker.sidekiq_retry_in_block.call(count, exception)
223
- rescue Exception => e
224
- handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
225
- nil
226
- end
227
-
228
255
  def exception_caused_by_shutdown?(e, checked_causes = [])
229
256
  return false unless e.cause
230
257
 
@@ -245,5 +272,11 @@ module Sidekiq
245
272
  rescue
246
273
  +"!!! ERROR MESSAGE THREW AN ERROR !!!"
247
274
  end
275
+
276
+ def compress_backtrace(backtrace)
277
+ serialized = Sidekiq.dump_json(backtrace)
278
+ compressed = Zlib::Deflate.deflate(serialized)
279
+ Base64.encode64(compressed)
280
+ end
248
281
  end
249
282
  end
@@ -0,0 +1,71 @@
1
+ require "securerandom"
2
+ require "time"
3
+
4
+ module Sidekiq
5
+ module JobUtil
6
+ # These functions encapsulate various job utilities.
7
+
8
+ TRANSIENT_ATTRIBUTES = %w[]
9
+
10
+ def validate(item)
11
+ raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
12
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
13
+ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
14
+ raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
15
+ raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
16
+ end
17
+
18
+ def verify_json(item)
19
+ job_class = item["wrapped"] || item["class"]
20
+ if Sidekiq[:on_complex_arguments] == :raise
21
+ msg = <<~EOM
22
+ Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
23
+ To disable this error, remove `Sidekiq.strict_args!` from your initializer.
24
+ EOM
25
+ raise(ArgumentError, msg) unless json_safe?(item)
26
+ elsif Sidekiq[:on_complex_arguments] == :warn
27
+ Sidekiq.logger.warn <<~EOM unless json_safe?(item)
28
+ Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
29
+ Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
30
+ by calling `Sidekiq.strict_args!` during Sidekiq initialization.
31
+ EOM
32
+ end
33
+ end
34
+
35
+ def normalize_item(item)
36
+ validate(item)
37
+
38
+ # merge in the default sidekiq_options for the item's class and/or wrapped element
39
+ # this allows ActiveJobs to control sidekiq_options too.
40
+ defaults = normalized_hash(item["class"])
41
+ defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?(:get_sidekiq_options)
42
+ item = defaults.merge(item)
43
+
44
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
45
+
46
+ # remove job attributes which aren't necessary to persist into Redis
47
+ TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
48
+
49
+ item["jid"] ||= SecureRandom.hex(12)
50
+ item["class"] = item["class"].to_s
51
+ item["queue"] = item["queue"].to_s
52
+ item["created_at"] ||= Time.now.to_f
53
+ item
54
+ end
55
+
56
+ def normalized_hash(item_class)
57
+ if item_class.is_a?(Class)
58
+ raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
59
+ item_class.get_sidekiq_options
60
+ else
61
+ Sidekiq.default_job_options
62
+ end
63
+ end
64
+
65
+ private
66
+
67
+ def json_safe?(item)
68
+ JSON.parse(JSON.dump(item["args"])) == item["args"]
69
+ end
70
+ end
71
+ end