sidekiq 6.1.2 → 6.5.6

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +215 -2
  3. data/LICENSE +3 -3
  4. data/README.md +9 -4
  5. data/bin/sidekiq +3 -3
  6. data/bin/sidekiqload +70 -66
  7. data/bin/sidekiqmon +1 -1
  8. data/lib/generators/sidekiq/job_generator.rb +57 -0
  9. data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
  10. data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
  11. data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
  12. data/lib/sidekiq/api.rb +321 -145
  13. data/lib/sidekiq/cli.rb +73 -40
  14. data/lib/sidekiq/client.rb +48 -72
  15. data/lib/sidekiq/{util.rb → component.rb} +12 -14
  16. data/lib/sidekiq/delay.rb +3 -1
  17. data/lib/sidekiq/extensions/generic_proxy.rb +4 -2
  18. data/lib/sidekiq/fetch.rb +31 -20
  19. data/lib/sidekiq/job.rb +13 -0
  20. data/lib/sidekiq/job_logger.rb +16 -28
  21. data/lib/sidekiq/job_retry.rb +79 -59
  22. data/lib/sidekiq/job_util.rb +71 -0
  23. data/lib/sidekiq/launcher.rb +126 -65
  24. data/lib/sidekiq/logger.rb +11 -20
  25. data/lib/sidekiq/manager.rb +35 -34
  26. data/lib/sidekiq/metrics/deploy.rb +47 -0
  27. data/lib/sidekiq/metrics/query.rb +153 -0
  28. data/lib/sidekiq/metrics/shared.rb +94 -0
  29. data/lib/sidekiq/metrics/tracking.rb +134 -0
  30. data/lib/sidekiq/middleware/chain.rb +87 -41
  31. data/lib/sidekiq/middleware/current_attributes.rb +63 -0
  32. data/lib/sidekiq/middleware/i18n.rb +6 -4
  33. data/lib/sidekiq/middleware/modules.rb +21 -0
  34. data/lib/sidekiq/monitor.rb +1 -1
  35. data/lib/sidekiq/paginator.rb +8 -8
  36. data/lib/sidekiq/processor.rb +47 -41
  37. data/lib/sidekiq/rails.rb +22 -4
  38. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  39. data/lib/sidekiq/redis_connection.rb +84 -55
  40. data/lib/sidekiq/ring_buffer.rb +29 -0
  41. data/lib/sidekiq/scheduled.rb +55 -25
  42. data/lib/sidekiq/testing/inline.rb +4 -4
  43. data/lib/sidekiq/testing.rb +38 -39
  44. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  45. data/lib/sidekiq/version.rb +1 -1
  46. data/lib/sidekiq/web/action.rb +3 -3
  47. data/lib/sidekiq/web/application.rb +37 -13
  48. data/lib/sidekiq/web/csrf_protection.rb +30 -8
  49. data/lib/sidekiq/web/helpers.rb +60 -28
  50. data/lib/sidekiq/web/router.rb +4 -1
  51. data/lib/sidekiq/web.rb +38 -78
  52. data/lib/sidekiq/worker.rb +136 -13
  53. data/lib/sidekiq.rb +114 -31
  54. data/sidekiq.gemspec +12 -4
  55. data/web/assets/images/apple-touch-icon.png +0 -0
  56. data/web/assets/javascripts/application.js +113 -60
  57. data/web/assets/javascripts/chart.min.js +13 -0
  58. data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
  59. data/web/assets/javascripts/dashboard.js +50 -67
  60. data/web/assets/javascripts/graph.js +16 -0
  61. data/web/assets/javascripts/metrics.js +262 -0
  62. data/web/assets/stylesheets/application-dark.css +36 -36
  63. data/web/assets/stylesheets/application-rtl.css +0 -4
  64. data/web/assets/stylesheets/application.css +82 -237
  65. data/web/locales/ar.yml +8 -2
  66. data/web/locales/el.yml +43 -19
  67. data/web/locales/en.yml +11 -1
  68. data/web/locales/es.yml +18 -2
  69. data/web/locales/fr.yml +8 -1
  70. data/web/locales/ja.yml +3 -0
  71. data/web/locales/lt.yml +1 -1
  72. data/web/locales/pt-br.yml +27 -9
  73. data/web/views/_footer.erb +1 -1
  74. data/web/views/_job_info.erb +1 -1
  75. data/web/views/_nav.erb +1 -1
  76. data/web/views/_poll_link.erb +2 -5
  77. data/web/views/_summary.erb +7 -7
  78. data/web/views/busy.erb +50 -19
  79. data/web/views/dashboard.erb +23 -14
  80. data/web/views/dead.erb +1 -1
  81. data/web/views/layout.erb +2 -1
  82. data/web/views/metrics.erb +69 -0
  83. data/web/views/metrics_for_job.erb +87 -0
  84. data/web/views/morgue.erb +6 -6
  85. data/web/views/queue.erb +15 -11
  86. data/web/views/queues.erb +3 -3
  87. data/web/views/retries.erb +7 -7
  88. data/web/views/retry.erb +1 -1
  89. data/web/views/scheduled.erb +1 -1
  90. metadata +43 -36
  91. data/.github/ISSUE_TEMPLATE/bug_report.md +0 -20
  92. data/.github/contributing.md +0 -32
  93. data/.github/workflows/ci.yml +0 -41
  94. data/.gitignore +0 -13
  95. data/.standard.yml +0 -20
  96. data/3.0-Upgrade.md +0 -70
  97. data/4.0-Upgrade.md +0 -53
  98. data/5.0-Upgrade.md +0 -56
  99. data/6.0-Upgrade.md +0 -72
  100. data/COMM-LICENSE +0 -97
  101. data/Ent-2.0-Upgrade.md +0 -37
  102. data/Ent-Changes.md +0 -281
  103. data/Gemfile +0 -24
  104. data/Gemfile.lock +0 -192
  105. data/Pro-2.0-Upgrade.md +0 -138
  106. data/Pro-3.0-Upgrade.md +0 -44
  107. data/Pro-4.0-Upgrade.md +0 -35
  108. data/Pro-5.0-Upgrade.md +0 -25
  109. data/Pro-Changes.md +0 -805
  110. data/Rakefile +0 -10
  111. data/code_of_conduct.md +0 -50
  112. data/lib/generators/sidekiq/worker_generator.rb +0 -57
  113. data/lib/sidekiq/exception_handler.rb +0 -27
data/lib/sidekiq/fetch.rb CHANGED
@@ -1,14 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq"
4
+ require "sidekiq/component"
4
5
 
5
- module Sidekiq
6
+ module Sidekiq # :nodoc:
6
7
  class BasicFetch
8
+ include Sidekiq::Component
7
9
  # We want the fetch operation to timeout every few seconds so the thread
8
10
  # can check if the process is shutting down.
9
11
  TIMEOUT = 2
10
12
 
11
- UnitOfWork = Struct.new(:queue, :job) {
13
+ UnitOfWork = Struct.new(:queue, :job, :config) {
12
14
  def acknowledge
13
15
  # nothing to do
14
16
  end
@@ -18,48 +20,56 @@ module Sidekiq
18
20
  end
19
21
 
20
22
  def requeue
21
- Sidekiq.redis do |conn|
23
+ config.redis do |conn|
22
24
  conn.rpush(queue, job)
23
25
  end
24
26
  end
25
27
  }
26
28
 
27
- def initialize(options)
28
- raise ArgumentError, "missing queue list" unless options[:queues]
29
- @options = options
30
- @strictly_ordered_queues = !!@options[:strict]
31
- @queues = @options[:queues].map { |q| "queue:#{q}" }
29
+ def initialize(config)
30
+ raise ArgumentError, "missing queue list" unless config[:queues]
31
+ @config = config
32
+ @strictly_ordered_queues = !!@config[:strict]
33
+ @queues = @config[:queues].map { |q| "queue:#{q}" }
32
34
  if @strictly_ordered_queues
33
35
  @queues.uniq!
34
- @queues << TIMEOUT
36
+ @queues << {timeout: TIMEOUT}
35
37
  end
36
38
  end
37
39
 
38
40
  def retrieve_work
39
- work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd) }
40
- UnitOfWork.new(*work) if work
41
+ qs = queues_cmd
42
+ # 4825 Sidekiq Pro with all queues paused will return an
43
+ # empty set of queues with a trailing TIMEOUT value.
44
+ if qs.size <= 1
45
+ sleep(TIMEOUT)
46
+ return nil
47
+ end
48
+
49
+ queue, job = redis { |conn| conn.brpop(*qs) }
50
+ UnitOfWork.new(queue, job, config) if queue
41
51
  end
42
52
 
43
53
  def bulk_requeue(inprogress, options)
44
54
  return if inprogress.empty?
45
55
 
46
- Sidekiq.logger.debug { "Re-queueing terminated jobs" }
56
+ logger.debug { "Re-queueing terminated jobs" }
47
57
  jobs_to_requeue = {}
48
58
  inprogress.each do |unit_of_work|
49
59
  jobs_to_requeue[unit_of_work.queue] ||= []
50
60
  jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
51
61
  end
52
62
 
53
- Sidekiq.redis do |conn|
54
- conn.pipelined do
63
+ redis do |conn|
64
+ conn.pipelined do |pipeline|
55
65
  jobs_to_requeue.each do |queue, jobs|
56
- conn.rpush(queue, jobs)
66
+ pipeline.rpush(queue, jobs)
57
67
  end
58
68
  end
59
69
  end
60
- Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
70
+ logger.info("Pushed #{inprogress.size} jobs back to Redis")
61
71
  rescue => ex
62
- Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
72
+ logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
63
73
  end
64
74
 
65
75
  # Creating the Redis#brpop command takes into account any
@@ -71,9 +81,10 @@ module Sidekiq
71
81
  if @strictly_ordered_queues
72
82
  @queues
73
83
  else
74
- queues = @queues.shuffle!.uniq
75
- queues << TIMEOUT
76
- queues
84
+ permute = @queues.shuffle
85
+ permute.uniq!
86
+ permute << {timeout: TIMEOUT}
87
+ permute
77
88
  end
78
89
  end
79
90
  end
@@ -0,0 +1,13 @@
1
+ require "sidekiq/worker"
2
+
3
+ module Sidekiq
4
+ # Sidekiq::Job is a new alias for Sidekiq::Worker as of Sidekiq 6.3.0.
5
+ # Use `include Sidekiq::Job` rather than `include Sidekiq::Worker`.
6
+ #
7
+ # The term "worker" is too generic and overly confusing, used in several
8
+ # different contexts meaning different things. Many people call a Sidekiq
9
+ # process a "worker". Some people call the thread that executes jobs a
10
+ # "worker". This change brings Sidekiq closer to ActiveJob where your job
11
+ # classes extend ApplicationJob.
12
+ Job = Worker
13
+ end
@@ -12,46 +12,34 @@ module Sidekiq
12
12
 
13
13
  yield
14
14
 
15
- with_elapsed_time_context(start) do
16
- @logger.info("done")
17
- end
15
+ Sidekiq::Context.add(:elapsed, elapsed(start))
16
+ @logger.info("done")
18
17
  rescue Exception
19
- with_elapsed_time_context(start) do
20
- @logger.info("fail")
21
- end
18
+ Sidekiq::Context.add(:elapsed, elapsed(start))
19
+ @logger.info("fail")
22
20
 
23
21
  raise
24
22
  end
25
23
 
26
24
  def prepare(job_hash, &block)
27
- level = job_hash["log_level"]
28
- if level
29
- @logger.log_at(level) do
30
- Sidekiq::Context.with(job_hash_context(job_hash), &block)
31
- end
32
- else
33
- Sidekiq::Context.with(job_hash_context(job_hash), &block)
34
- end
35
- end
36
-
37
- def job_hash_context(job_hash)
38
25
  # If we're using a wrapper class, like ActiveJob, use the "wrapped"
39
26
  # attribute to expose the underlying thing.
40
27
  h = {
41
- class: job_hash["wrapped"] || job_hash["class"],
28
+ class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
42
29
  jid: job_hash["jid"]
43
30
  }
44
- h[:bid] = job_hash["bid"] if job_hash["bid"]
45
- h[:tags] = job_hash["tags"] if job_hash["tags"]
46
- h
47
- end
48
-
49
- def with_elapsed_time_context(start, &block)
50
- Sidekiq::Context.with(elapsed_time_context(start), &block)
51
- end
31
+ h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
32
+ h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
52
33
 
53
- def elapsed_time_context(start)
54
- {elapsed: elapsed(start).to_s}
34
+ Thread.current[:sidekiq_context] = h
35
+ level = job_hash["log_level"]
36
+ if level
37
+ @logger.log_at(level, &block)
38
+ else
39
+ yield
40
+ end
41
+ ensure
42
+ Thread.current[:sidekiq_context] = nil
55
43
  end
56
44
 
57
45
  private
@@ -1,10 +1,8 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- require "sidekiq/scheduled"
4
- require "sidekiq/api"
5
-
6
3
  require "zlib"
7
4
  require "base64"
5
+ require "sidekiq/component"
8
6
 
9
7
  module Sidekiq
10
8
  ##
@@ -25,18 +23,19 @@ module Sidekiq
25
23
  #
26
24
  # A job looks like:
27
25
  #
28
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
26
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
29
27
  #
30
28
  # The 'retry' option also accepts a number (in place of 'true'):
31
29
  #
32
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
30
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
33
31
  #
34
32
  # The job will be retried this number of times before giving up. (If simply
35
33
  # 'true', Sidekiq retries 25 times)
36
34
  #
37
- # We'll add a bit more data to the job to support retries:
35
+ # Relevant options for job retries:
38
36
  #
39
- # * 'queue' - the queue to use
37
+ # * 'queue' - the queue for the initial job
38
+ # * 'retry_queue' - if job retries should be pushed to a different (e.g. lower priority) queue
40
39
  # * 'retry_count' - number of times we've retried so far.
41
40
  # * 'error_message' - the message from the exception
42
41
  # * 'error_class' - the exception class
@@ -52,28 +51,31 @@ module Sidekiq
52
51
  #
53
52
  # Sidekiq.options[:max_retries] = 7
54
53
  #
55
- # or limit the number of retries for a particular worker with:
54
+ # or limit the number of retries for a particular job and send retries to
55
+ # a low priority queue with:
56
56
  #
57
- # class MyWorker
58
- # include Sidekiq::Worker
59
- # sidekiq_options :retry => 10
57
+ # class MyJob
58
+ # include Sidekiq::Job
59
+ # sidekiq_options retry: 10, retry_queue: 'low'
60
60
  # end
61
61
  #
62
62
  class JobRetry
63
63
  class Handled < ::RuntimeError; end
64
+
64
65
  class Skip < Handled; end
65
66
 
66
- include Sidekiq::Util
67
+ include Sidekiq::Component
67
68
 
68
69
  DEFAULT_MAX_RETRY_ATTEMPTS = 25
69
70
 
70
- def initialize(options = {})
71
- @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
71
+ def initialize(options)
72
+ @config = options
73
+ @max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
72
74
  end
73
75
 
74
76
  # The global retry handler requires only the barest of data.
75
77
  # We want to be able to retry as much as possible so we don't
76
- # require the worker to be instantiated.
78
+ # require the job to be instantiated.
77
79
  def global(jobstr, queue)
78
80
  yield
79
81
  rescue Handled => ex
@@ -87,7 +89,7 @@ module Sidekiq
87
89
 
88
90
  msg = Sidekiq.load_json(jobstr)
89
91
  if msg["retry"]
90
- attempt_retry(nil, msg, queue, e)
92
+ process_retry(nil, msg, queue, e)
91
93
  else
92
94
  Sidekiq.death_handlers.each do |handler|
93
95
  handler.call(msg, e)
@@ -100,14 +102,14 @@ module Sidekiq
100
102
  end
101
103
 
102
104
  # The local retry support means that any errors that occur within
103
- # this block can be associated with the given worker instance.
105
+ # this block can be associated with the given job instance.
104
106
  # This is required to support the `sidekiq_retries_exhausted` block.
105
107
  #
106
108
  # Note that any exception from the block is wrapped in the Skip
107
109
  # exception so the global block does not reprocess the error. The
108
110
  # Skip exception is unwrapped within Sidekiq::Processor#process before
109
111
  # calling the handle_exception handlers.
110
- def local(worker, jobstr, queue)
112
+ def local(jobinst, jobstr, queue)
111
113
  yield
112
114
  rescue Handled => ex
113
115
  raise ex
@@ -120,11 +122,11 @@ module Sidekiq
120
122
 
121
123
  msg = Sidekiq.load_json(jobstr)
122
124
  if msg["retry"].nil?
123
- msg["retry"] = worker.class.get_sidekiq_options["retry"]
125
+ msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
124
126
  end
125
127
 
126
128
  raise e unless msg["retry"]
127
- attempt_retry(worker, msg, queue, e)
129
+ process_retry(jobinst, msg, queue, e)
128
130
  # We've handled this error associated with this job, don't
129
131
  # need to handle it at the global level
130
132
  raise Skip
@@ -132,10 +134,10 @@ module Sidekiq
132
134
 
133
135
  private
134
136
 
135
- # Note that +worker+ can be nil here if an error is raised before we can
136
- # instantiate the worker instance. All access must be guarded and
137
+ # Note that +jobinst+ can be nil here if an error is raised before we can
138
+ # instantiate the job instance. All access must be guarded and
137
139
  # best effort.
138
- def attempt_retry(worker, msg, queue, exception)
140
+ def process_retry(jobinst, msg, queue, exception)
139
141
  max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
140
142
 
141
143
  msg["queue"] = (msg["retry_queue"] || queue)
@@ -166,24 +168,54 @@ module Sidekiq
166
168
  msg["error_backtrace"] = compress_backtrace(lines)
167
169
  end
168
170
 
169
- if count < max_retry_attempts
170
- delay = delay_for(worker, count, exception)
171
- # Logging here can break retries if the logging device raises ENOSPC #3979
172
- # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
173
- retry_at = Time.now.to_f + delay
174
- payload = Sidekiq.dump_json(msg)
175
- Sidekiq.redis do |conn|
176
- conn.zadd("retry", retry_at.to_s, payload)
177
- end
178
- else
179
- # Goodbye dear message, you (re)tried your best I'm sure.
180
- retries_exhausted(worker, msg, exception)
171
+ # Goodbye dear message, you (re)tried your best I'm sure.
172
+ return retries_exhausted(jobinst, msg, exception) if count >= max_retry_attempts
173
+
174
+ strategy, delay = delay_for(jobinst, count, exception)
175
+ case strategy
176
+ when :discard
177
+ return # poof!
178
+ when :kill
179
+ return retries_exhausted(jobinst, msg, exception)
180
+ end
181
+
182
+ # Logging here can break retries if the logging device raises ENOSPC #3979
183
+ # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
184
+ jitter = rand(10) * (count + 1)
185
+ retry_at = Time.now.to_f + delay + jitter
186
+ payload = Sidekiq.dump_json(msg)
187
+ redis do |conn|
188
+ conn.zadd("retry", retry_at.to_s, payload)
181
189
  end
182
190
  end
183
191
 
184
- def retries_exhausted(worker, msg, exception)
192
+ # returns (strategy, seconds)
193
+ def delay_for(jobinst, count, exception)
194
+ rv = begin
195
+ # sidekiq_retry_in can return two different things:
196
+ # 1. When to retry next, as an integer of seconds
197
+ # 2. A symbol which re-routes the job elsewhere, e.g. :discard, :kill, :default
198
+ jobinst&.sidekiq_retry_in_block&.call(count, exception)
199
+ rescue Exception => e
200
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
201
+ nil
202
+ end
203
+
204
+ delay = (count**4) + 15
205
+ if Integer === rv && rv > 0
206
+ delay = rv
207
+ elsif rv == :discard
208
+ return [:discard, nil] # do nothing, job goes poof
209
+ elsif rv == :kill
210
+ return [:kill, nil]
211
+ end
212
+
213
+ [:default, delay]
214
+ end
215
+
216
+ def retries_exhausted(jobinst, msg, exception)
185
217
  begin
186
- block = worker&.sidekiq_retries_exhausted_block
218
+ block = jobinst&.sidekiq_retries_exhausted_block
187
219
  block&.call(msg, exception)
188
220
  rescue => e
189
221
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
@@ -191,7 +223,7 @@ module Sidekiq
191
223
 
192
224
  send_to_morgue(msg) unless msg["dead"] == false
193
225
 
194
- Sidekiq.death_handlers.each do |handler|
226
+ config.death_handlers.each do |handler|
195
227
  handler.call(msg, exception)
196
228
  rescue => e
197
229
  handle_exception(e, {context: "Error calling death handler", job: msg})
@@ -201,7 +233,15 @@ module Sidekiq
201
233
  def send_to_morgue(msg)
202
234
  logger.info { "Adding dead #{msg["class"]} job #{msg["jid"]}" }
203
235
  payload = Sidekiq.dump_json(msg)
204
- DeadSet.new.kill(payload, notify_failure: false)
236
+ now = Time.now.to_f
237
+
238
+ config.redis do |conn|
239
+ conn.multi do |xa|
240
+ xa.zadd("dead", now.to_s, payload)
241
+ xa.zremrangebyscore("dead", "-inf", now - config[:dead_timeout_in_seconds])
242
+ xa.zremrangebyrank("dead", 0, - config[:dead_max_jobs])
243
+ end
244
+ end
205
245
  end
206
246
 
207
247
  def retry_attempts_from(msg_retry, default)
@@ -212,26 +252,6 @@ module Sidekiq
212
252
  end
213
253
  end
214
254
 
215
- def delay_for(worker, count, exception)
216
- if worker&.sidekiq_retry_in_block
217
- custom_retry_in = retry_in(worker, count, exception).to_i
218
- return custom_retry_in if custom_retry_in > 0
219
- end
220
- seconds_to_delay(count)
221
- end
222
-
223
- # delayed_job uses the same basic formula
224
- def seconds_to_delay(count)
225
- (count**4) + 15 + (rand(30) * (count + 1))
226
- end
227
-
228
- def retry_in(worker, count, exception)
229
- worker.sidekiq_retry_in_block.call(count, exception)
230
- rescue Exception => e
231
- handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
232
- nil
233
- end
234
-
235
255
  def exception_caused_by_shutdown?(e, checked_causes = [])
236
256
  return false unless e.cause
237
257
 
@@ -0,0 +1,71 @@
1
+ require "securerandom"
2
+ require "time"
3
+
4
+ module Sidekiq
5
+ module JobUtil
6
+ # These functions encapsulate various job utilities.
7
+
8
+ TRANSIENT_ATTRIBUTES = %w[]
9
+
10
+ def validate(item)
11
+ raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
12
+ raise(ArgumentError, "Job args must be an Array: `#{item}`") unless item["args"].is_a?(Array)
13
+ raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
14
+ raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
15
+ raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
16
+ end
17
+
18
+ def verify_json(item)
19
+ job_class = item["wrapped"] || item["class"]
20
+ if Sidekiq[:on_complex_arguments] == :raise
21
+ msg = <<~EOM
22
+ Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
23
+ To disable this error, remove `Sidekiq.strict_args!` from your initializer.
24
+ EOM
25
+ raise(ArgumentError, msg) unless json_safe?(item)
26
+ elsif Sidekiq[:on_complex_arguments] == :warn
27
+ Sidekiq.logger.warn <<~EOM unless json_safe?(item)
28
+ Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
29
+ Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
30
+ by calling `Sidekiq.strict_args!` during Sidekiq initialization.
31
+ EOM
32
+ end
33
+ end
34
+
35
+ def normalize_item(item)
36
+ validate(item)
37
+
38
+ # merge in the default sidekiq_options for the item's class and/or wrapped element
39
+ # this allows ActiveJobs to control sidekiq_options too.
40
+ defaults = normalized_hash(item["class"])
41
+ defaults = defaults.merge(item["wrapped"].get_sidekiq_options) if item["wrapped"].respond_to?(:get_sidekiq_options)
42
+ item = defaults.merge(item)
43
+
44
+ raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
45
+
46
+ # remove job attributes which aren't necessary to persist into Redis
47
+ TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
48
+
49
+ item["jid"] ||= SecureRandom.hex(12)
50
+ item["class"] = item["class"].to_s
51
+ item["queue"] = item["queue"].to_s
52
+ item["created_at"] ||= Time.now.to_f
53
+ item
54
+ end
55
+
56
+ def normalized_hash(item_class)
57
+ if item_class.is_a?(Class)
58
+ raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
59
+ item_class.get_sidekiq_options
60
+ else
61
+ Sidekiq.default_job_options
62
+ end
63
+ end
64
+
65
+ private
66
+
67
+ def json_safe?(item)
68
+ JSON.parse(JSON.dump(item["args"])) == item["args"]
69
+ end
70
+ end
71
+ end