sidekiq 6.4.0 → 6.5.1

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (52) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +54 -1
  3. data/README.md +6 -1
  4. data/bin/sidekiq +3 -3
  5. data/bin/sidekiqload +70 -66
  6. data/bin/sidekiqmon +1 -1
  7. data/lib/sidekiq/.DS_Store +0 -0
  8. data/lib/sidekiq/api.rb +109 -78
  9. data/lib/sidekiq/cli.rb +47 -38
  10. data/lib/sidekiq/client.rb +42 -28
  11. data/lib/sidekiq/component.rb +64 -0
  12. data/lib/sidekiq/delay.rb +2 -2
  13. data/lib/sidekiq/extensions/action_mailer.rb +2 -2
  14. data/lib/sidekiq/extensions/active_record.rb +2 -2
  15. data/lib/sidekiq/extensions/class_methods.rb +2 -2
  16. data/lib/sidekiq/extensions/generic_proxy.rb +3 -3
  17. data/lib/sidekiq/fetch.rb +18 -16
  18. data/lib/sidekiq/job_logger.rb +15 -27
  19. data/lib/sidekiq/job_retry.rb +29 -28
  20. data/lib/sidekiq/job_util.rb +15 -9
  21. data/lib/sidekiq/launcher.rb +54 -52
  22. data/lib/sidekiq/logger.rb +8 -18
  23. data/lib/sidekiq/manager.rb +28 -25
  24. data/lib/sidekiq/middleware/chain.rb +22 -13
  25. data/lib/sidekiq/middleware/current_attributes.rb +4 -0
  26. data/lib/sidekiq/middleware/i18n.rb +6 -4
  27. data/lib/sidekiq/middleware/modules.rb +21 -0
  28. data/lib/sidekiq/monitor.rb +1 -1
  29. data/lib/sidekiq/paginator.rb +8 -8
  30. data/lib/sidekiq/processor.rb +38 -38
  31. data/lib/sidekiq/rails.rb +15 -8
  32. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  33. data/lib/sidekiq/redis_connection.rb +81 -48
  34. data/lib/sidekiq/ring_buffer.rb +29 -0
  35. data/lib/sidekiq/scheduled.rb +11 -10
  36. data/lib/sidekiq/testing/inline.rb +4 -4
  37. data/lib/sidekiq/testing.rb +37 -36
  38. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  39. data/lib/sidekiq/version.rb +1 -1
  40. data/lib/sidekiq/web/csrf_protection.rb +2 -2
  41. data/lib/sidekiq/web/helpers.rb +5 -5
  42. data/lib/sidekiq/web.rb +3 -3
  43. data/lib/sidekiq/worker.rb +20 -17
  44. data/lib/sidekiq.rb +98 -30
  45. data/web/assets/javascripts/application.js +58 -26
  46. data/web/assets/stylesheets/application.css +1 -0
  47. data/web/locales/pt-br.yml +27 -9
  48. data/web/views/_summary.erb +1 -1
  49. data/web/views/busy.erb +3 -3
  50. metadata +9 -5
  51. data/lib/sidekiq/exception_handler.rb +0 -27
  52. data/lib/sidekiq/util.rb +0 -108
@@ -15,7 +15,7 @@ module Sidekiq
15
15
  # client.middleware do |chain|
16
16
  # chain.use MyClientMiddleware
17
17
  # end
18
- # client.push('class' => 'SomeWorker', 'args' => [1,2,3])
18
+ # client.push('class' => 'SomeJob', 'args' => [1,2,3])
19
19
  #
20
20
  # All client instances default to the globally-defined
21
21
  # Sidekiq.client_middleware but you can change as necessary.
@@ -49,16 +49,16 @@ module Sidekiq
49
49
  # The main method used to push a job to Redis. Accepts a number of options:
50
50
  #
51
51
  # queue - the named queue to use, default 'default'
52
- # class - the worker class to call, required
52
+ # class - the job class to call, required
53
53
  # args - an array of simple arguments to the perform method, must be JSON-serializable
54
54
  # at - timestamp to schedule the job (optional), must be Numeric (e.g. Time.now.to_f)
55
55
  # retry - whether to retry this job if it fails, default true or an integer number of retries
56
56
  # backtrace - whether to save any error backtrace, default false
57
57
  #
58
58
  # If class is set to the class name, the jobs' options will be based on Sidekiq's default
59
- # worker options. Otherwise, they will be based on the job class's options.
59
+ # job options. Otherwise, they will be based on the job class's options.
60
60
  #
61
- # Any options valid for a worker class's sidekiq_options are also available here.
61
+ # Any options valid for a job class's sidekiq_options are also available here.
62
62
  #
63
63
  # All options must be strings, not symbols. NB: because we are serializing to JSON, all
64
64
  # symbols in 'args' will be converted to strings. Note that +backtrace: true+ can take quite a bit of
@@ -67,13 +67,15 @@ module Sidekiq
67
67
  # Returns a unique Job ID. If middleware stops the job, nil will be returned instead.
68
68
  #
69
69
  # Example:
70
- # push('queue' => 'my_queue', 'class' => MyWorker, 'args' => ['foo', 1, :bat => 'bar'])
70
+ # push('queue' => 'my_queue', 'class' => MyJob, 'args' => ['foo', 1, :bat => 'bar'])
71
71
  #
72
72
  def push(item)
73
73
  normed = normalize_item(item)
74
- payload = process_single(item["class"], normed)
75
-
74
+ payload = middleware.invoke(item["class"], normed, normed["queue"], @redis_pool) do
75
+ normed
76
+ end
76
77
  if payload
78
+ verify_json(payload)
77
79
  raw_push([payload])
78
80
  payload["jid"]
79
81
  end
@@ -101,12 +103,17 @@ module Sidekiq
101
103
  raise ArgumentError, "Job 'at' must be a Numeric or an Array of Numeric timestamps" if at && (Array(at).empty? || !Array(at).all? { |entry| entry.is_a?(Numeric) })
102
104
  raise ArgumentError, "Job 'at' Array must have same size as 'args' Array" if at.is_a?(Array) && at.size != args.size
103
105
 
106
+ jid = items.delete("jid")
107
+ raise ArgumentError, "Explicitly passing 'jid' when pushing more than one job is not supported" if jid && args.size > 1
108
+
104
109
  normed = normalize_item(items)
105
110
  payloads = args.map.with_index { |job_args, index|
106
- copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12), "enqueued_at" => Time.now.to_f)
111
+ copy = normed.merge("args" => job_args, "jid" => SecureRandom.hex(12))
107
112
  copy["at"] = (at.is_a?(Array) ? at[index] : at) if at
108
-
109
- result = process_single(items["class"], copy)
113
+ result = middleware.invoke(items["class"], copy, copy["queue"], @redis_pool) do
114
+ verify_json(copy)
115
+ copy
116
+ end
110
117
  result || nil
111
118
  }.compact
112
119
 
@@ -119,8 +126,8 @@ module Sidekiq
119
126
  #
120
127
  # pool = ConnectionPool.new { Redis.new }
121
128
  # Sidekiq::Client.via(pool) do
122
- # SomeWorker.perform_async(1,2,3)
123
- # SomeOtherWorker.perform_async(1,2,3)
129
+ # SomeJob.perform_async(1,2,3)
130
+ # SomeOtherJob.perform_async(1,2,3)
124
131
  # end
125
132
  #
126
133
  # Generally this is only needed for very large Sidekiq installs processing
@@ -145,10 +152,10 @@ module Sidekiq
145
152
  end
146
153
 
147
154
  # Resque compatibility helpers. Note all helpers
148
- # should go through Worker#client_push.
155
+ # should go through Sidekiq::Job#client_push.
149
156
  #
150
157
  # Example usage:
151
- # Sidekiq::Client.enqueue(MyWorker, 'foo', 1, :bat => 'bar')
158
+ # Sidekiq::Client.enqueue(MyJob, 'foo', 1, :bat => 'bar')
152
159
  #
153
160
  # Messages are enqueued to the 'default' queue.
154
161
  #
@@ -157,14 +164,14 @@ module Sidekiq
157
164
  end
158
165
 
159
166
  # Example usage:
160
- # Sidekiq::Client.enqueue_to(:queue_name, MyWorker, 'foo', 1, :bat => 'bar')
167
+ # Sidekiq::Client.enqueue_to(:queue_name, MyJob, 'foo', 1, :bat => 'bar')
161
168
  #
162
169
  def enqueue_to(queue, klass, *args)
163
170
  klass.client_push("queue" => queue, "class" => klass, "args" => args)
164
171
  end
165
172
 
166
173
  # Example usage:
167
- # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
174
+ # Sidekiq::Client.enqueue_to_in(:queue_name, 3.minutes, MyJob, 'foo', 1, :bat => 'bar')
168
175
  #
169
176
  def enqueue_to_in(queue, interval, klass, *args)
170
177
  int = interval.to_f
@@ -178,7 +185,7 @@ module Sidekiq
178
185
  end
179
186
 
180
187
  # Example usage:
181
- # Sidekiq::Client.enqueue_in(3.minutes, MyWorker, 'foo', 1, :bat => 'bar')
188
+ # Sidekiq::Client.enqueue_in(3.minutes, MyJob, 'foo', 1, :bat => 'bar')
182
189
  #
183
190
  def enqueue_in(interval, klass, *args)
184
191
  klass.perform_in(interval, *args)
@@ -189,8 +196,23 @@ module Sidekiq
189
196
 
190
197
  def raw_push(payloads)
191
198
  @redis_pool.with do |conn|
192
- conn.pipelined do
193
- atomic_push(conn, payloads)
199
+ retryable = true
200
+ begin
201
+ conn.pipelined do |pipeline|
202
+ atomic_push(pipeline, payloads)
203
+ end
204
+ rescue RedisConnection.adapter::BaseError => ex
205
+ # 2550 Failover can cause the server to become a replica, need
206
+ # to disconnect and reopen the socket to get back to the primary.
207
+ # 4495 Use the same logic if we have a "Not enough replicas" error from the primary
208
+ # 4985 Use the same logic when a blocking command is force-unblocked
209
+ # The retry logic is copied from sidekiq.rb
210
+ if retryable && ex.message =~ /READONLY|NOREPLICAS|UNBLOCKED/
211
+ conn.disconnect!
212
+ retryable = false
213
+ retry
214
+ end
215
+ raise
194
216
  end
195
217
  end
196
218
  true
@@ -198,7 +220,7 @@ module Sidekiq
198
220
 
199
221
  def atomic_push(conn, payloads)
200
222
  if payloads.first.key?("at")
201
- conn.zadd("schedule", payloads.map { |hash|
223
+ conn.zadd("schedule", payloads.flat_map { |hash|
202
224
  at = hash.delete("at").to_s
203
225
  [at, Sidekiq.dump_json(hash)]
204
226
  })
@@ -213,13 +235,5 @@ module Sidekiq
213
235
  conn.lpush("queue:#{queue}", to_push)
214
236
  end
215
237
  end
216
-
217
- def process_single(worker_class, item)
218
- queue = item["queue"]
219
-
220
- middleware.invoke(worker_class, item, queue, @redis_pool) do
221
- item
222
- end
223
- end
224
238
  end
225
239
  end
@@ -0,0 +1,64 @@
1
+ module Sidekiq
2
+ ##
3
+ # Sidekiq::Component assumes a config instance is available at @config
4
+ module Component # :nodoc:
5
+ attr_reader :config
6
+
7
+ def watchdog(last_words)
8
+ yield
9
+ rescue Exception => ex
10
+ handle_exception(ex, {context: last_words})
11
+ raise ex
12
+ end
13
+
14
+ def safe_thread(name, &block)
15
+ Thread.new do
16
+ Thread.current.name = name
17
+ watchdog(name, &block)
18
+ end
19
+ end
20
+
21
+ def logger
22
+ config.logger
23
+ end
24
+
25
+ def redis(&block)
26
+ config.redis(&block)
27
+ end
28
+
29
+ def tid
30
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
31
+ end
32
+
33
+ def hostname
34
+ ENV["DYNO"] || Socket.gethostname
35
+ end
36
+
37
+ def process_nonce
38
+ @@process_nonce ||= SecureRandom.hex(6)
39
+ end
40
+
41
+ def identity
42
+ @@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
43
+ end
44
+
45
+ def handle_exception(ex, ctx = {})
46
+ config.handle_exception(ex, ctx)
47
+ end
48
+
49
+ def fire_event(event, options = {})
50
+ reverse = options[:reverse]
51
+ reraise = options[:reraise]
52
+
53
+ arr = config[:lifecycle_events][event]
54
+ arr.reverse! if reverse
55
+ arr.each do |block|
56
+ block.call
57
+ rescue => ex
58
+ handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
59
+ raise ex if reraise
60
+ end
61
+ arr.clear # once we've fired an event, we never fire it again
62
+ end
63
+ end
64
+ end
data/lib/sidekiq/delay.rb CHANGED
@@ -1,9 +1,9 @@
1
1
  # frozen_string_literal: true
2
2
 
3
- module Sidekiq
3
+ module Sidekiq # :nodoc:
4
4
  module Extensions
5
5
  def self.enable_delay!
6
- Sidekiq.logger.error "Sidekiq's Delayed Extensions will be removed in Sidekiq 7.0. #{caller(1..1).first}"
6
+ warn "Sidekiq's Delayed Extensions will be removed in Sidekiq 7.0", uplevel: 1
7
7
 
8
8
  if defined?(::ActiveSupport)
9
9
  require "sidekiq/extensions/active_record"
@@ -16,8 +16,8 @@ module Sidekiq
16
16
  include Sidekiq::Worker
17
17
 
18
18
  def perform(yml)
19
- (target, method_name, args, kwargs) = YAML.load(yml)
20
- msg = kwargs.empty? ? target.public_send(method_name, *args) : target.public_send(method_name, *args, **kwargs)
19
+ (target, method_name, args) = YAML.load(yml)
20
+ msg = target.public_send(method_name, *args)
21
21
  # The email method can return nil, which causes ActionMailer to return
22
22
  # an undeliverable empty message.
23
23
  if msg
@@ -18,8 +18,8 @@ module Sidekiq
18
18
  include Sidekiq::Worker
19
19
 
20
20
  def perform(yml)
21
- (target, method_name, args, kwargs) = YAML.load(yml)
22
- kwargs.empty? ? target.__send__(method_name, *args) : target.__send__(method_name, *args, **kwargs)
21
+ (target, method_name, args) = YAML.load(yml)
22
+ target.__send__(method_name, *args)
23
23
  end
24
24
  end
25
25
 
@@ -16,8 +16,8 @@ module Sidekiq
16
16
  include Sidekiq::Worker
17
17
 
18
18
  def perform(yml)
19
- (target, method_name, args, kwargs) = YAML.load(yml)
20
- kwargs.empty? ? target.__send__(method_name, *args) : target.__send__(method_name, *args, **kwargs)
19
+ (target, method_name, args) = YAML.load(yml)
20
+ target.__send__(method_name, *args)
21
21
  end
22
22
  end
23
23
 
@@ -10,16 +10,16 @@ module Sidekiq
10
10
  def initialize(performable, target, options = {})
11
11
  @performable = performable
12
12
  @target = target
13
- @opts = options
13
+ @opts = options.transform_keys(&:to_s)
14
14
  end
15
15
 
16
- def method_missing(name, *args, **kwargs)
16
+ def method_missing(name, *args)
17
17
  # Sidekiq has a limitation in that its message must be JSON.
18
18
  # JSON can't round trip real Ruby objects so we use YAML to
19
19
  # serialize the objects to a String. The YAML will be converted
20
20
  # to JSON and then deserialized on the other side back into a
21
21
  # Ruby object.
22
- obj = [@target, name, args, kwargs]
22
+ obj = [@target, name, args]
23
23
  marshalled = ::YAML.dump(obj)
24
24
  if marshalled.size > SIZE_LIMIT
25
25
  ::Sidekiq.logger.warn { "#{@target}.#{name} job argument is #{marshalled.bytesize} bytes, you should refactor it to reduce the size" }
data/lib/sidekiq/fetch.rb CHANGED
@@ -1,14 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq"
4
+ require "sidekiq/component"
4
5
 
5
- module Sidekiq
6
+ module Sidekiq # :nodoc:
6
7
  class BasicFetch
8
+ include Sidekiq::Component
7
9
  # We want the fetch operation to timeout every few seconds so the thread
8
10
  # can check if the process is shutting down.
9
11
  TIMEOUT = 2
10
12
 
11
- UnitOfWork = Struct.new(:queue, :job) {
13
+ UnitOfWork = Struct.new(:queue, :job, :config) {
12
14
  def acknowledge
13
15
  # nothing to do
14
16
  end
@@ -18,17 +20,17 @@ module Sidekiq
18
20
  end
19
21
 
20
22
  def requeue
21
- Sidekiq.redis do |conn|
23
+ config.redis do |conn|
22
24
  conn.rpush(queue, job)
23
25
  end
24
26
  end
25
27
  }
26
28
 
27
- def initialize(options)
28
- raise ArgumentError, "missing queue list" unless options[:queues]
29
- @options = options
30
- @strictly_ordered_queues = !!@options[:strict]
31
- @queues = @options[:queues].map { |q| "queue:#{q}" }
29
+ def initialize(config)
30
+ raise ArgumentError, "missing queue list" unless config[:queues]
31
+ @config = config
32
+ @strictly_ordered_queues = !!@config[:strict]
33
+ @queues = @config[:queues].map { |q| "queue:#{q}" }
32
34
  if @strictly_ordered_queues
33
35
  @queues.uniq!
34
36
  @queues << TIMEOUT
@@ -44,30 +46,30 @@ module Sidekiq
44
46
  return nil
45
47
  end
46
48
 
47
- work = Sidekiq.redis { |conn| conn.brpop(*qs) }
48
- UnitOfWork.new(*work) if work
49
+ queue, job = redis { |conn| conn.brpop(*qs) }
50
+ UnitOfWork.new(queue, job, config) if queue
49
51
  end
50
52
 
51
53
  def bulk_requeue(inprogress, options)
52
54
  return if inprogress.empty?
53
55
 
54
- Sidekiq.logger.debug { "Re-queueing terminated jobs" }
56
+ logger.debug { "Re-queueing terminated jobs" }
55
57
  jobs_to_requeue = {}
56
58
  inprogress.each do |unit_of_work|
57
59
  jobs_to_requeue[unit_of_work.queue] ||= []
58
60
  jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
59
61
  end
60
62
 
61
- Sidekiq.redis do |conn|
62
- conn.pipelined do
63
+ redis do |conn|
64
+ conn.pipelined do |pipeline|
63
65
  jobs_to_requeue.each do |queue, jobs|
64
- conn.rpush(queue, jobs)
66
+ pipeline.rpush(queue, jobs)
65
67
  end
66
68
  end
67
69
  end
68
- Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
70
+ logger.info("Pushed #{inprogress.size} jobs back to Redis")
69
71
  rescue => ex
70
- Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
72
+ logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
71
73
  end
72
74
 
73
75
  # Creating the Redis#brpop command takes into account any
@@ -12,46 +12,34 @@ module Sidekiq
12
12
 
13
13
  yield
14
14
 
15
- with_elapsed_time_context(start) do
16
- @logger.info("done")
17
- end
15
+ Sidekiq::Context.add(:elapsed, elapsed(start))
16
+ @logger.info("done")
18
17
  rescue Exception
19
- with_elapsed_time_context(start) do
20
- @logger.info("fail")
21
- end
18
+ Sidekiq::Context.add(:elapsed, elapsed(start))
19
+ @logger.info("fail")
22
20
 
23
21
  raise
24
22
  end
25
23
 
26
24
  def prepare(job_hash, &block)
27
- level = job_hash["log_level"]
28
- if level
29
- @logger.log_at(level) do
30
- Sidekiq::Context.with(job_hash_context(job_hash), &block)
31
- end
32
- else
33
- Sidekiq::Context.with(job_hash_context(job_hash), &block)
34
- end
35
- end
36
-
37
- def job_hash_context(job_hash)
38
25
  # If we're using a wrapper class, like ActiveJob, use the "wrapped"
39
26
  # attribute to expose the underlying thing.
40
27
  h = {
41
28
  class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
42
29
  jid: job_hash["jid"]
43
30
  }
44
- h[:bid] = job_hash["bid"] if job_hash["bid"]
45
- h[:tags] = job_hash["tags"] if job_hash["tags"]
46
- h
47
- end
48
-
49
- def with_elapsed_time_context(start, &block)
50
- Sidekiq::Context.with(elapsed_time_context(start), &block)
51
- end
31
+ h[:bid] = job_hash["bid"] if job_hash.has_key?("bid")
32
+ h[:tags] = job_hash["tags"] if job_hash.has_key?("tags")
52
33
 
53
- def elapsed_time_context(start)
54
- {elapsed: elapsed(start).to_s}
34
+ Thread.current[:sidekiq_context] = h
35
+ level = job_hash["log_level"]
36
+ if level
37
+ @logger.log_at(level, &block)
38
+ else
39
+ yield
40
+ end
41
+ ensure
42
+ Thread.current[:sidekiq_context] = nil
55
43
  end
56
44
 
57
45
  private
@@ -25,11 +25,11 @@ module Sidekiq
25
25
  #
26
26
  # A job looks like:
27
27
  #
28
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
28
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
29
29
  #
30
30
  # The 'retry' option also accepts a number (in place of 'true'):
31
31
  #
32
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
32
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
33
33
  #
34
34
  # The job will be retried this number of times before giving up. (If simply
35
35
  # 'true', Sidekiq retries 25 times)
@@ -53,11 +53,11 @@ module Sidekiq
53
53
  #
54
54
  # Sidekiq.options[:max_retries] = 7
55
55
  #
56
- # or limit the number of retries for a particular worker and send retries to
56
+ # or limit the number of retries for a particular job and send retries to
57
57
  # a low priority queue with:
58
58
  #
59
- # class MyWorker
60
- # include Sidekiq::Worker
59
+ # class MyJob
60
+ # include Sidekiq::Job
61
61
  # sidekiq_options retry: 10, retry_queue: 'low'
62
62
  # end
63
63
  #
@@ -66,17 +66,18 @@ module Sidekiq
66
66
 
67
67
  class Skip < Handled; end
68
68
 
69
- include Sidekiq::Util
69
+ include Sidekiq::Component
70
70
 
71
71
  DEFAULT_MAX_RETRY_ATTEMPTS = 25
72
72
 
73
- def initialize(options = {})
74
- @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
73
+ def initialize(options)
74
+ @config = options
75
+ @max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
75
76
  end
76
77
 
77
78
  # The global retry handler requires only the barest of data.
78
79
  # We want to be able to retry as much as possible so we don't
79
- # require the worker to be instantiated.
80
+ # require the job to be instantiated.
80
81
  def global(jobstr, queue)
81
82
  yield
82
83
  rescue Handled => ex
@@ -103,14 +104,14 @@ module Sidekiq
103
104
  end
104
105
 
105
106
  # The local retry support means that any errors that occur within
106
- # this block can be associated with the given worker instance.
107
+ # this block can be associated with the given job instance.
107
108
  # This is required to support the `sidekiq_retries_exhausted` block.
108
109
  #
109
110
  # Note that any exception from the block is wrapped in the Skip
110
111
  # exception so the global block does not reprocess the error. The
111
112
  # Skip exception is unwrapped within Sidekiq::Processor#process before
112
113
  # calling the handle_exception handlers.
113
- def local(worker, jobstr, queue)
114
+ def local(jobinst, jobstr, queue)
114
115
  yield
115
116
  rescue Handled => ex
116
117
  raise ex
@@ -123,11 +124,11 @@ module Sidekiq
123
124
 
124
125
  msg = Sidekiq.load_json(jobstr)
125
126
  if msg["retry"].nil?
126
- msg["retry"] = worker.class.get_sidekiq_options["retry"]
127
+ msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
127
128
  end
128
129
 
129
130
  raise e unless msg["retry"]
130
- attempt_retry(worker, msg, queue, e)
131
+ attempt_retry(jobinst, msg, queue, e)
131
132
  # We've handled this error associated with this job, don't
132
133
  # need to handle it at the global level
133
134
  raise Skip
@@ -135,10 +136,10 @@ module Sidekiq
135
136
 
136
137
  private
137
138
 
138
- # Note that +worker+ can be nil here if an error is raised before we can
139
- # instantiate the worker instance. All access must be guarded and
139
+ # Note that +jobinst+ can be nil here if an error is raised before we can
140
+ # instantiate the job instance. All access must be guarded and
140
141
  # best effort.
141
- def attempt_retry(worker, msg, queue, exception)
142
+ def attempt_retry(jobinst, msg, queue, exception)
142
143
  max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
143
144
 
144
145
  msg["queue"] = (msg["retry_queue"] || queue)
@@ -170,23 +171,23 @@ module Sidekiq
170
171
  end
171
172
 
172
173
  if count < max_retry_attempts
173
- delay = delay_for(worker, count, exception)
174
+ delay = delay_for(jobinst, count, exception)
174
175
  # Logging here can break retries if the logging device raises ENOSPC #3979
175
176
  # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
176
177
  retry_at = Time.now.to_f + delay
177
178
  payload = Sidekiq.dump_json(msg)
178
- Sidekiq.redis do |conn|
179
+ redis do |conn|
179
180
  conn.zadd("retry", retry_at.to_s, payload)
180
181
  end
181
182
  else
182
183
  # Goodbye dear message, you (re)tried your best I'm sure.
183
- retries_exhausted(worker, msg, exception)
184
+ retries_exhausted(jobinst, msg, exception)
184
185
  end
185
186
  end
186
187
 
187
- def retries_exhausted(worker, msg, exception)
188
+ def retries_exhausted(jobinst, msg, exception)
188
189
  begin
189
- block = worker&.sidekiq_retries_exhausted_block
190
+ block = jobinst&.sidekiq_retries_exhausted_block
190
191
  block&.call(msg, exception)
191
192
  rescue => e
192
193
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
@@ -194,7 +195,7 @@ module Sidekiq
194
195
 
195
196
  send_to_morgue(msg) unless msg["dead"] == false
196
197
 
197
- Sidekiq.death_handlers.each do |handler|
198
+ config.death_handlers.each do |handler|
198
199
  handler.call(msg, exception)
199
200
  rescue => e
200
201
  handle_exception(e, {context: "Error calling death handler", job: msg})
@@ -215,19 +216,19 @@ module Sidekiq
215
216
  end
216
217
  end
217
218
 
218
- def delay_for(worker, count, exception)
219
+ def delay_for(jobinst, count, exception)
219
220
  jitter = rand(10) * (count + 1)
220
- if worker&.sidekiq_retry_in_block
221
- custom_retry_in = retry_in(worker, count, exception).to_i
221
+ if jobinst&.sidekiq_retry_in_block
222
+ custom_retry_in = retry_in(jobinst, count, exception).to_i
222
223
  return custom_retry_in + jitter if custom_retry_in > 0
223
224
  end
224
225
  (count**4) + 15 + jitter
225
226
  end
226
227
 
227
- def retry_in(worker, count, exception)
228
- worker.sidekiq_retry_in_block.call(count, exception)
228
+ def retry_in(jobinst, count, exception)
229
+ jobinst.sidekiq_retry_in_block.call(count, exception)
229
230
  rescue Exception => e
230
- handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
231
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
231
232
  nil
232
233
  end
233
234
 
@@ -4,7 +4,8 @@ require "time"
4
4
  module Sidekiq
5
5
  module JobUtil
6
6
  # These functions encapsulate various job utilities.
7
- # They must be simple and free from side effects.
7
+
8
+ TRANSIENT_ATTRIBUTES = %w[]
8
9
 
9
10
  def validate(item)
10
11
  raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
@@ -12,16 +13,19 @@ module Sidekiq
12
13
  raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
13
14
  raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
14
15
  raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
16
+ end
15
17
 
16
- if Sidekiq.options[:on_complex_arguments] == :raise
18
+ def verify_json(item)
19
+ job_class = item["wrapped"] || item["class"]
20
+ if Sidekiq[:on_complex_arguments] == :raise
17
21
  msg = <<~EOM
18
- Job arguments to #{item["class"]} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
22
+ Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
19
23
  To disable this error, remove `Sidekiq.strict_args!` from your initializer.
20
24
  EOM
21
25
  raise(ArgumentError, msg) unless json_safe?(item)
22
- elsif Sidekiq.options[:on_complex_arguments] == :warn
26
+ elsif Sidekiq[:on_complex_arguments] == :warn
23
27
  Sidekiq.logger.warn <<~EOM unless json_safe?(item)
24
- Job arguments to #{item["class"]} do not serialize to JSON safely. This will raise an error in
28
+ Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
25
29
  Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
26
30
  by calling `Sidekiq.strict_args!` during Sidekiq initialization.
27
31
  EOM
@@ -39,20 +43,22 @@ module Sidekiq
39
43
 
40
44
  raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
41
45
 
46
+ # remove job attributes which aren't necessary to persist into Redis
47
+ TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
48
+
49
+ item["jid"] ||= SecureRandom.hex(12)
42
50
  item["class"] = item["class"].to_s
43
51
  item["queue"] = item["queue"].to_s
44
- item["jid"] ||= SecureRandom.hex(12)
45
52
  item["created_at"] ||= Time.now.to_f
46
-
47
53
  item
48
54
  end
49
55
 
50
56
  def normalized_hash(item_class)
51
57
  if item_class.is_a?(Class)
52
- raise(ArgumentError, "Message must include a Sidekiq::Worker class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
58
+ raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
53
59
  item_class.get_sidekiq_options
54
60
  else
55
- Sidekiq.default_worker_options
61
+ Sidekiq.default_job_options
56
62
  end
57
63
  end
58
64