sidekiq 5.2.10 → 6.5.6

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (124) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +391 -1
  3. data/LICENSE +3 -3
  4. data/README.md +24 -35
  5. data/bin/sidekiq +27 -3
  6. data/bin/sidekiqload +79 -67
  7. data/bin/sidekiqmon +8 -0
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +504 -307
  13. data/lib/sidekiq/cli.rb +190 -206
  14. data/lib/sidekiq/client.rb +77 -81
  15. data/lib/sidekiq/component.rb +65 -0
  16. data/lib/sidekiq/delay.rb +8 -7
  17. data/lib/sidekiq/extensions/action_mailer.rb +13 -22
  18. data/lib/sidekiq/extensions/active_record.rb +13 -10
  19. data/lib/sidekiq/extensions/class_methods.rb +14 -11
  20. data/lib/sidekiq/extensions/generic_proxy.rb +7 -5
  21. data/lib/sidekiq/fetch.rb +50 -40
  22. data/lib/sidekiq/job.rb +13 -0
  23. data/lib/sidekiq/job_logger.rb +33 -7
  24. data/lib/sidekiq/job_retry.rb +126 -106
  25. data/lib/sidekiq/job_util.rb +71 -0
  26. data/lib/sidekiq/launcher.rb +177 -83
  27. data/lib/sidekiq/logger.rb +156 -0
  28. data/lib/sidekiq/manager.rb +40 -41
  29. data/lib/sidekiq/metrics/deploy.rb +47 -0
  30. data/lib/sidekiq/metrics/query.rb +153 -0
  31. data/lib/sidekiq/metrics/shared.rb +94 -0
  32. data/lib/sidekiq/metrics/tracking.rb +134 -0
  33. data/lib/sidekiq/middleware/chain.rb +102 -46
  34. data/lib/sidekiq/middleware/current_attributes.rb +63 -0
  35. data/lib/sidekiq/middleware/i18n.rb +7 -7
  36. data/lib/sidekiq/middleware/modules.rb +21 -0
  37. data/lib/sidekiq/monitor.rb +133 -0
  38. data/lib/sidekiq/paginator.rb +20 -16
  39. data/lib/sidekiq/processor.rb +104 -97
  40. data/lib/sidekiq/rails.rb +47 -37
  41. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  42. data/lib/sidekiq/redis_connection.rb +108 -77
  43. data/lib/sidekiq/ring_buffer.rb +29 -0
  44. data/lib/sidekiq/scheduled.rb +64 -35
  45. data/lib/sidekiq/sd_notify.rb +149 -0
  46. data/lib/sidekiq/systemd.rb +24 -0
  47. data/lib/sidekiq/testing/inline.rb +6 -5
  48. data/lib/sidekiq/testing.rb +68 -58
  49. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  50. data/lib/sidekiq/version.rb +2 -1
  51. data/lib/sidekiq/web/action.rb +15 -11
  52. data/lib/sidekiq/web/application.rb +100 -77
  53. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  54. data/lib/sidekiq/web/helpers.rb +134 -94
  55. data/lib/sidekiq/web/router.rb +23 -19
  56. data/lib/sidekiq/web.rb +65 -105
  57. data/lib/sidekiq/worker.rb +253 -106
  58. data/lib/sidekiq.rb +170 -62
  59. data/sidekiq.gemspec +23 -16
  60. data/web/assets/images/apple-touch-icon.png +0 -0
  61. data/web/assets/javascripts/application.js +112 -61
  62. data/web/assets/javascripts/chart.min.js +13 -0
  63. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  64. data/web/assets/javascripts/dashboard.js +53 -89
  65. data/web/assets/javascripts/graph.js +16 -0
  66. data/web/assets/javascripts/metrics.js +262 -0
  67. data/web/assets/stylesheets/application-dark.css +143 -0
  68. data/web/assets/stylesheets/application-rtl.css +0 -4
  69. data/web/assets/stylesheets/application.css +88 -233
  70. data/web/locales/ar.yml +8 -2
  71. data/web/locales/de.yml +14 -2
  72. data/web/locales/el.yml +43 -19
  73. data/web/locales/en.yml +13 -1
  74. data/web/locales/es.yml +18 -2
  75. data/web/locales/fr.yml +10 -3
  76. data/web/locales/ja.yml +7 -1
  77. data/web/locales/lt.yml +83 -0
  78. data/web/locales/pl.yml +4 -4
  79. data/web/locales/pt-br.yml +27 -9
  80. data/web/locales/ru.yml +4 -0
  81. data/web/locales/vi.yml +83 -0
  82. data/web/views/_footer.erb +1 -1
  83. data/web/views/_job_info.erb +3 -2
  84. data/web/views/_nav.erb +1 -1
  85. data/web/views/_poll_link.erb +2 -5
  86. data/web/views/_summary.erb +7 -7
  87. data/web/views/busy.erb +56 -22
  88. data/web/views/dashboard.erb +23 -14
  89. data/web/views/dead.erb +3 -3
  90. data/web/views/layout.erb +3 -1
  91. data/web/views/metrics.erb +69 -0
  92. data/web/views/metrics_for_job.erb +87 -0
  93. data/web/views/morgue.erb +9 -6
  94. data/web/views/queue.erb +23 -10
  95. data/web/views/queues.erb +10 -2
  96. data/web/views/retries.erb +11 -8
  97. data/web/views/retry.erb +3 -3
  98. data/web/views/scheduled.erb +5 -2
  99. metadata +53 -64
  100. data/.circleci/config.yml +0 -61
  101. data/.github/contributing.md +0 -32
  102. data/.github/issue_template.md +0 -11
  103. data/.gitignore +0 -15
  104. data/.travis.yml +0 -11
  105. data/3.0-Upgrade.md +0 -70
  106. data/4.0-Upgrade.md +0 -53
  107. data/5.0-Upgrade.md +0 -56
  108. data/COMM-LICENSE +0 -97
  109. data/Ent-Changes.md +0 -238
  110. data/Gemfile +0 -19
  111. data/Pro-2.0-Upgrade.md +0 -138
  112. data/Pro-3.0-Upgrade.md +0 -44
  113. data/Pro-4.0-Upgrade.md +0 -35
  114. data/Pro-Changes.md +0 -759
  115. data/Rakefile +0 -9
  116. data/bin/sidekiqctl +0 -20
  117. data/code_of_conduct.md +0 -50
  118. data/lib/generators/sidekiq/worker_generator.rb +0 -49
  119. data/lib/sidekiq/core_ext.rb +0 -1
  120. data/lib/sidekiq/ctl.rb +0 -221
  121. data/lib/sidekiq/exception_handler.rb +0 -29
  122. data/lib/sidekiq/logging.rb +0 -122
  123. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
  124. data/lib/sidekiq/util.rb +0 -66
data/lib/sidekiq/fetch.rb CHANGED
@@ -1,81 +1,91 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq'
3
2
 
4
- module Sidekiq
3
+ require "sidekiq"
4
+ require "sidekiq/component"
5
+
6
+ module Sidekiq # :nodoc:
5
7
  class BasicFetch
8
+ include Sidekiq::Component
6
9
  # We want the fetch operation to timeout every few seconds so the thread
7
10
  # can check if the process is shutting down.
8
11
  TIMEOUT = 2
9
12
 
10
- UnitOfWork = Struct.new(:queue, :job) do
13
+ UnitOfWork = Struct.new(:queue, :job, :config) {
11
14
  def acknowledge
12
15
  # nothing to do
13
16
  end
14
17
 
15
18
  def queue_name
16
- queue.sub(/.*queue:/, '')
19
+ queue.delete_prefix("queue:")
17
20
  end
18
21
 
19
22
  def requeue
20
- Sidekiq.redis do |conn|
21
- conn.rpush("queue:#{queue_name}", job)
23
+ config.redis do |conn|
24
+ conn.rpush(queue, job)
22
25
  end
23
26
  end
24
- end
27
+ }
25
28
 
26
- def initialize(options)
27
- @strictly_ordered_queues = !!options[:strict]
28
- @queues = options[:queues].map { |q| "queue:#{q}" }
29
+ def initialize(config)
30
+ raise ArgumentError, "missing queue list" unless config[:queues]
31
+ @config = config
32
+ @strictly_ordered_queues = !!@config[:strict]
33
+ @queues = @config[:queues].map { |q| "queue:#{q}" }
29
34
  if @strictly_ordered_queues
30
- @queues = @queues.uniq
31
- @queues << TIMEOUT
35
+ @queues.uniq!
36
+ @queues << {timeout: TIMEOUT}
32
37
  end
33
38
  end
34
39
 
35
40
  def retrieve_work
36
- work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd) }
37
- UnitOfWork.new(*work) if work
38
- end
39
-
40
- # Creating the Redis#brpop command takes into account any
41
- # configured queue weights. By default Redis#brpop returns
42
- # data from the first queue that has pending elements. We
43
- # recreate the queue command each time we invoke Redis#brpop
44
- # to honor weights and avoid queue starvation.
45
- def queues_cmd
46
- if @strictly_ordered_queues
47
- @queues
48
- else
49
- queues = @queues.shuffle.uniq
50
- queues << TIMEOUT
51
- queues
41
+ qs = queues_cmd
42
+ # 4825 Sidekiq Pro with all queues paused will return an
43
+ # empty set of queues with a trailing TIMEOUT value.
44
+ if qs.size <= 1
45
+ sleep(TIMEOUT)
46
+ return nil
52
47
  end
53
- end
54
48
 
49
+ queue, job = redis { |conn| conn.brpop(*qs) }
50
+ UnitOfWork.new(queue, job, config) if queue
51
+ end
55
52
 
56
- # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
- # an instance method will make it async to the Fetcher actor
58
- def self.bulk_requeue(inprogress, options)
53
+ def bulk_requeue(inprogress, options)
59
54
  return if inprogress.empty?
60
55
 
61
- Sidekiq.logger.debug { "Re-queueing terminated jobs" }
56
+ logger.debug { "Re-queueing terminated jobs" }
62
57
  jobs_to_requeue = {}
63
58
  inprogress.each do |unit_of_work|
64
- jobs_to_requeue[unit_of_work.queue_name] ||= []
65
- jobs_to_requeue[unit_of_work.queue_name] << unit_of_work.job
59
+ jobs_to_requeue[unit_of_work.queue] ||= []
60
+ jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
66
61
  end
67
62
 
68
- Sidekiq.redis do |conn|
69
- conn.pipelined do
63
+ redis do |conn|
64
+ conn.pipelined do |pipeline|
70
65
  jobs_to_requeue.each do |queue, jobs|
71
- conn.rpush("queue:#{queue}", jobs)
66
+ pipeline.rpush(queue, jobs)
72
67
  end
73
68
  end
74
69
  end
75
- Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
70
+ logger.info("Pushed #{inprogress.size} jobs back to Redis")
76
71
  rescue => ex
77
- Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
72
+ logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
73
  end
79
74
 
75
+ # Creating the Redis#brpop command takes into account any
76
+ # configured queue weights. By default Redis#brpop returns
77
+ # data from the first queue that has pending elements. We
78
+ # recreate the queue command each time we invoke Redis#brpop
79
+ # to honor weights and avoid queue starvation.
80
+ def queues_cmd
81
+ if @strictly_ordered_queues
82
+ @queues
83
+ else
84
+ permute = @queues.shuffle
85
+ permute.uniq!
86
+ permute << {timeout: TIMEOUT}
87
+ permute
88
+ end
89
+ end
80
90
  end
81
91
  end
@@ -0,0 +1,13 @@
1
+ require "sidekiq/worker"
2
+
3
+ module Sidekiq
4
+ # Sidekiq::Job is a new alias for Sidekiq::Worker as of Sidekiq 6.3.0.
5
+ # Use `include Sidekiq::Job` rather than `include Sidekiq::Worker`.
6
+ #
7
+ # The term "worker" is too generic and overly confusing, used in several
8
+ # different contexts meaning different things. Many people call a Sidekiq
9
+ # process a "worker". Some people call the thread that executes jobs a
10
+ # "worker". This change brings Sidekiq closer to ActiveJob where your job
11
+ # classes extend ApplicationJob.
12
+ Job = Worker
13
+ end
@@ -1,25 +1,51 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  class JobLogger
5
+ def initialize(logger = Sidekiq.logger)
6
+ @logger = logger
7
+ end
4
8
 
5
9
  def call(item, queue)
6
10
  start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
- logger.info("start")
11
+ @logger.info("start")
12
+
8
13
  yield
9
- logger.info("done: #{elapsed(start)} sec")
14
+
15
+ Sidekiq::Context.add(:elapsed, elapsed(start))
16
+ @logger.info("done")
10
17
  rescue Exception
11
- logger.info("fail: #{elapsed(start)} sec")
18
+ Sidekiq::Context.add(:elapsed, elapsed(start))
19
+ @logger.info("fail")
20
+
12
21
  raise
13
22
  end
14
23
 
24
+ def prepare(job_hash, &block)
25
+ # If we're using a wrapper class, like ActiveJob, use the "wrapped"
26
+ # attribute to expose the underlying thing.
27
+ h = {
28
+ class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
29
+ jid: job_hash["jid"]
30
+ }
31
+ h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
32
+ h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
33
+
34
+ Thread.current[:sidekiq_context] = h
35
+ level = job_hash["log_level"]
36
+ if level
37
+ @logger.log_at(level, &block)
38
+ else
39
+ yield
40
+ end
41
+ ensure
42
+ Thread.current[:sidekiq_context] = nil
43
+ end
44
+
15
45
  private
16
46
 
17
47
  def elapsed(start)
18
48
  (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
19
49
  end
20
-
21
- def logger
22
- Sidekiq.logger
23
- end
24
50
  end
25
51
  end
@@ -1,6 +1,8 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/scheduled'
3
- require 'sidekiq/api'
2
+
3
+ require "zlib"
4
+ require "base64"
5
+ require "sidekiq/component"
4
6
 
5
7
  module Sidekiq
6
8
  ##
@@ -21,18 +23,19 @@ module Sidekiq
21
23
  #
22
24
  # A job looks like:
23
25
  #
24
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
26
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
25
27
  #
26
28
  # The 'retry' option also accepts a number (in place of 'true'):
27
29
  #
28
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
30
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
29
31
  #
30
32
  # The job will be retried this number of times before giving up. (If simply
31
33
  # 'true', Sidekiq retries 25 times)
32
34
  #
33
- # We'll add a bit more data to the job to support retries:
35
+ # Relevant options for job retries:
34
36
  #
35
- # * 'queue' - the queue to use
37
+ # * 'queue' - the queue for the initial job
38
+ # * 'retry_queue' - if job retries should be pushed to a different (e.g. lower priority) queue
36
39
  # * 'retry_count' - number of times we've retried so far.
37
40
  # * 'error_message' - the message from the exception
38
41
  # * 'error_class' - the exception class
@@ -48,29 +51,32 @@ module Sidekiq
48
51
  #
49
52
  # Sidekiq.options[:max_retries] = 7
50
53
  #
51
- # or limit the number of retries for a particular worker with:
54
+ # or limit the number of retries for a particular job and send retries to
55
+ # a low priority queue with:
52
56
  #
53
- # class MyWorker
54
- # include Sidekiq::Worker
55
- # sidekiq_options :retry => 10
57
+ # class MyJob
58
+ # include Sidekiq::Job
59
+ # sidekiq_options retry: 10, retry_queue: 'low'
56
60
  # end
57
61
  #
58
62
  class JobRetry
59
63
  class Handled < ::RuntimeError; end
64
+
60
65
  class Skip < Handled; end
61
66
 
62
- include Sidekiq::Util
67
+ include Sidekiq::Component
63
68
 
64
69
  DEFAULT_MAX_RETRY_ATTEMPTS = 25
65
70
 
66
- def initialize(options = {})
67
- @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
71
+ def initialize(options)
72
+ @config = options
73
+ @max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
68
74
  end
69
75
 
70
76
  # The global retry handler requires only the barest of data.
71
77
  # We want to be able to retry as much as possible so we don't
72
- # require the worker to be instantiated.
73
- def global(msg, queue)
78
+ # require the job to be instantiated.
79
+ def global(jobstr, queue)
74
80
  yield
75
81
  rescue Handled => ex
76
82
  raise ex
@@ -81,31 +87,29 @@ module Sidekiq
81
87
  # ignore, will be pushed back onto queue during hard_shutdown
82
88
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
83
89
 
84
- if msg['retry']
85
- attempt_retry(nil, msg, queue, e)
90
+ msg = Sidekiq.load_json(jobstr)
91
+ if msg["retry"]
92
+ process_retry(nil, msg, queue, e)
86
93
  else
87
94
  Sidekiq.death_handlers.each do |handler|
88
- begin
89
- handler.call(msg, e)
90
- rescue => handler_ex
91
- handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
- end
95
+ handler.call(msg, e)
96
+ rescue => handler_ex
97
+ handle_exception(handler_ex, {context: "Error calling death handler", job: msg})
93
98
  end
94
99
  end
95
100
 
96
101
  raise Handled
97
102
  end
98
103
 
99
-
100
104
  # The local retry support means that any errors that occur within
101
- # this block can be associated with the given worker instance.
105
+ # this block can be associated with the given job instance.
102
106
  # This is required to support the `sidekiq_retries_exhausted` block.
103
107
  #
104
108
  # Note that any exception from the block is wrapped in the Skip
105
109
  # exception so the global block does not reprocess the error. The
106
110
  # Skip exception is unwrapped within Sidekiq::Processor#process before
107
111
  # calling the handle_exception handlers.
108
- def local(worker, msg, queue)
112
+ def local(jobinst, jobstr, queue)
109
113
  yield
110
114
  rescue Handled => ex
111
115
  raise ex
@@ -116,12 +120,13 @@ module Sidekiq
116
120
  # ignore, will be pushed back onto queue during hard_shutdown
117
121
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
118
122
 
119
- if msg['retry'] == nil
120
- msg['retry'] = worker.class.get_sidekiq_options['retry']
123
+ msg = Sidekiq.load_json(jobstr)
124
+ if msg["retry"].nil?
125
+ msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
121
126
  end
122
127
 
123
- raise e unless msg['retry']
124
- attempt_retry(worker, msg, queue, e)
128
+ raise e unless msg["retry"]
129
+ process_retry(jobinst, msg, queue, e)
125
130
  # We've handled this error associated with this job, don't
126
131
  # need to handle it at the global level
127
132
  raise Skip
@@ -129,17 +134,13 @@ module Sidekiq
129
134
 
130
135
  private
131
136
 
132
- # Note that +worker+ can be nil here if an error is raised before we can
133
- # instantiate the worker instance. All access must be guarded and
137
+ # Note that +jobinst+ can be nil here if an error is raised before we can
138
+ # instantiate the job instance. All access must be guarded and
134
139
  # best effort.
135
- def attempt_retry(worker, msg, queue, exception)
136
- max_retry_attempts = retry_attempts_from(msg['retry'], @max_retries)
140
+ def process_retry(jobinst, msg, queue, exception)
141
+ max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
137
142
 
138
- msg['queue'] = if msg['retry_queue']
139
- msg['retry_queue']
140
- else
141
- queue
142
- end
143
+ msg["queue"] = (msg["retry_queue"] || queue)
143
144
 
144
145
  m = exception_message(exception)
145
146
  if m.respond_to?(:scrub!)
@@ -147,62 +148,100 @@ module Sidekiq
147
148
  m.scrub!
148
149
  end
149
150
 
150
- msg['error_message'] = m
151
- msg['error_class'] = exception.class.name
152
- count = if msg['retry_count']
153
- msg['retried_at'] = Time.now.to_f
154
- msg['retry_count'] += 1
151
+ msg["error_message"] = m
152
+ msg["error_class"] = exception.class.name
153
+ count = if msg["retry_count"]
154
+ msg["retried_at"] = Time.now.to_f
155
+ msg["retry_count"] += 1
155
156
  else
156
- msg['failed_at'] = Time.now.to_f
157
- msg['retry_count'] = 0
157
+ msg["failed_at"] = Time.now.to_f
158
+ msg["retry_count"] = 0
158
159
  end
159
160
 
160
- if msg['backtrace'] == true
161
- msg['error_backtrace'] = exception.backtrace
162
- elsif !msg['backtrace']
163
- # do nothing
164
- elsif msg['backtrace'].to_i != 0
165
- msg['error_backtrace'] = exception.backtrace[0...msg['backtrace'].to_i]
161
+ if msg["backtrace"]
162
+ lines = if msg["backtrace"] == true
163
+ exception.backtrace
164
+ else
165
+ exception.backtrace[0...msg["backtrace"].to_i]
166
+ end
167
+
168
+ msg["error_backtrace"] = compress_backtrace(lines)
166
169
  end
167
170
 
168
- if count < max_retry_attempts
169
- delay = delay_for(worker, count, exception)
170
- # Logging here can break retries if the logging device raises ENOSPC #3979
171
- #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
172
- retry_at = Time.now.to_f + delay
173
- payload = Sidekiq.dump_json(msg)
174
- Sidekiq.redis do |conn|
175
- conn.zadd('retry', retry_at.to_s, payload)
176
- end
177
- else
178
- # Goodbye dear message, you (re)tried your best I'm sure.
179
- retries_exhausted(worker, msg, exception)
171
+ # Goodbye dear message, you (re)tried your best I'm sure.
172
+ return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
173
+
174
+ strategy, delay = delay_for(jobinst, count, exception)
175
+ case strategy
176
+ when :discard
177
+ return # poof!
178
+ when :kill
179
+ return retries_exhausted(jobinst, msg, exception)
180
180
  end
181
+
182
+ # Logging here can break retries if the logging device raises ENOSPC #3979
183
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
184
+ jitter = rand(10) * (count + 1)
185
+ retry_at = Time.now.to_f + delay + jitter
186
+ payload = Sidekiq.dump_json(msg)
187
+ redis do |conn|
188
+ conn.zadd("retry", retry_at.to_s, payload)
189
+ end
190
+ end
191
+
192
+ # returns (strategy, seconds)
193
+ def delay_for(jobinst, count, exception)
194
+ rv = begin
195
+ # sidekiq_retry_in can return two different things:
196
+ # 1. When to retry next, as an integer of seconds
197
+ # 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
198
+ jobinst&.sidekiq_retry_in_block&.call(count, exception)
199
+ rescue Exception => e
200
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
201
+ nil
202
+ end
203
+
204
+ delay = (count**4) + 15
205
+ if Integer === rv && rv > 0
206
+ delay = rv
207
+ elsif rv == :discard
208
+ return [:discard, nil] # do nothing, job goes poof
209
+ elsif rv == :kill
210
+ return [:kill, nil]
211
+ end
212
+
213
+ [:default, delay]
181
214
  end
182
215
 
183
- def retries_exhausted(worker, msg, exception)
216
+ def retries_exhausted(jobinst, msg, exception)
184
217
  begin
185
- block = worker && worker.sidekiq_retries_exhausted_block
186
- block.call(msg, exception) if block
218
+ block = jobinst&.sidekiq_retries_exhausted_block
219
+ block&.call(msg, exception)
187
220
  rescue => e
188
- handle_exception(e, { context: "Error calling retries_exhausted", job: msg })
221
+ handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
189
222
  end
190
223
 
191
- Sidekiq.death_handlers.each do |handler|
192
- begin
193
- handler.call(msg, exception)
194
- rescue => e
195
- handle_exception(e, { context: "Error calling death handler", job: msg })
196
- end
197
- end
224
+ send_to_morgue(msg) unless msg["dead"] == false
198
225
 
199
- send_to_morgue(msg) unless msg['dead'] == false
226
+ config.death_handlers.each do |handler|
227
+ handler.call(msg, exception)
228
+ rescue => e
229
+ handle_exception(e, {context: "Error calling death handler", job: msg})
230
+ end
200
231
  end
201
232
 
202
233
  def send_to_morgue(msg)
203
- logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
234
+ logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
204
235
  payload = Sidekiq.dump_json(msg)
205
- DeadSet.new.kill(payload, notify_failure: false)
236
+ now = Time.now.to_f
237
+
238
+ config.redis do |conn|
239
+ conn.multi do |xa|
240
+ xa.zadd("dead", now.to_s, payload)
241
+ xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
242
+ xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
243
+ end
244
+ end
206
245
  end
207
246
 
208
247
  def retry_attempts_from(msg_retry, default)
@@ -213,28 +252,6 @@ module Sidekiq
213
252
  end
214
253
  end
215
254
 
216
- def delay_for(worker, count, exception)
217
- if worker && worker.sidekiq_retry_in_block
218
- custom_retry_in = retry_in(worker, count, exception).to_i
219
- return custom_retry_in if custom_retry_in > 0
220
- end
221
- seconds_to_delay(count)
222
- end
223
-
224
- # delayed_job uses the same basic formula
225
- def seconds_to_delay(count)
226
- (count ** 4) + 15 + (rand(30)*(count+1))
227
- end
228
-
229
- def retry_in(worker, count, exception)
230
- begin
231
- worker.sidekiq_retry_in_block.call(count, exception)
232
- rescue Exception => e
233
- handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
234
- nil
235
- end
236
- end
237
-
238
255
  def exception_caused_by_shutdown?(e, checked_causes = [])
239
256
  return false unless e.cause
240
257
 
@@ -249,14 +266,17 @@ module Sidekiq
249
266
  # Extract message from exception.
250
267
  # Set a default if the message raises an error
251
268
  def exception_message(exception)
252
- begin
253
- # App code can stuff all sorts of crazy binary data into the error message
254
- # that won't convert to JSON.
255
- exception.message.to_s[0, 10_000]
256
- rescue
257
- "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
- end
269
+ # App code can stuff all sorts of crazy binary data into the error message
270
+ # that won't convert to JSON.
271
+ exception.message.to_s[0, 10_000]
272
+ rescue
273
+ +"!!! ERROR MESSAGE THREW AN ERROR !!!"
259
274
  end
260
275
 
276
+ def compress_backtrace(backtrace)
277
+ serialized = Sidekiq.dump_json(backtrace)
278
+ compressed = Zlib::Deflate.deflate(serialized)
279
+ Base64.encode64(compressed)
280
+ end
261
281
  end
262
282
  end
@@ -0,0 +1,71 @@
1
+ require "securerandom"
2
+ require "time"
3
+
4
+ module Sidekiq
5
+ module JobUtil
6
+ # These functions encapsulate various job utilities.
7
+
8
+ TRANSIENT_ATTRIBUTES = %w[]
9
+
10
+ def validate(item)
11
+ raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
12
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
13
+ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
14
+ raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
15
+ raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
16
+ end
17
+
18
+ def verify_json(item)
19
+ job_class = item["wrapped"] || item["class"]
20
+ if Sidekiq[:on_complex_arguments] == :raise
21
+ msg = <<~EOM
22
+ Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
23
+ To disable this error, remove `Sidekiq.strict_args!` from your initializer.
24
+ EOM
25
+ raise(ArgumentError, msg) unless json_safe?(item)
26
+ elsif Sidekiq[:on_complex_arguments] == :warn
27
+ Sidekiq.logger.warn <<~EOM unless json_safe?(item)
28
+ Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
29
+ Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
30
+ by calling `Sidekiq.strict_args!` during Sidekiq initialization.
31
+ EOM
32
+ end
33
+ end
34
+
35
+ def normalize_item(item)
36
+ validate(item)
37
+
38
+ # merge in the default sidekiq_options for the item's class and/or wrapped element
39
+ # this allows ActiveJobs to control sidekiq_options too.
40
+ defaults = normalized_hash(item["class"])
41
+ defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?(:get_sidekiq_options)
42
+ item = defaults.merge(item)
43
+
44
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
45
+
46
+ # remove job attributes which aren't necessary to persist into Redis
47
+ TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
48
+
49
+ item["jid"] ||= SecureRandom.hex(12)
50
+ item["class"] = item["class"].to_s
51
+ item["queue"] = item["queue"].to_s
52
+ item["created_at"] ||= Time.now.to_f
53
+ item
54
+ end
55
+
56
+ def normalized_hash(item_class)
57
+ if item_class.is_a?(Class)
58
+ raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
59
+ item_class.get_sidekiq_options
60
+ else
61
+ Sidekiq.default_job_options
62
+ end
63
+ end
64
+
65
+ private
66
+
67
+ def json_safe?(item)
68
+ JSON.parse(JSON.dump(item["args"])) == item["args"]
69
+ end
70
+ end
71
+ end