sidekiq 6.4.1 → 6.5.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (43) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +43 -1
  3. data/README.md +1 -1
  4. data/bin/sidekiqload +16 -10
  5. data/lib/sidekiq/api.rb +42 -33
  6. data/lib/sidekiq/cli.rb +37 -36
  7. data/lib/sidekiq/client.rb +25 -26
  8. data/lib/sidekiq/component.rb +64 -0
  9. data/lib/sidekiq/extensions/generic_proxy.rb +1 -1
  10. data/lib/sidekiq/fetch.rb +15 -13
  11. data/lib/sidekiq/job_retry.rb +27 -26
  12. data/lib/sidekiq/job_util.rb +15 -9
  13. data/lib/sidekiq/launcher.rb +31 -29
  14. data/lib/sidekiq/logger.rb +5 -19
  15. data/lib/sidekiq/manager.rb +28 -25
  16. data/lib/sidekiq/middleware/chain.rb +22 -13
  17. data/lib/sidekiq/middleware/current_attributes.rb +4 -0
  18. data/lib/sidekiq/middleware/i18n.rb +6 -4
  19. data/lib/sidekiq/middleware/modules.rb +19 -0
  20. data/lib/sidekiq/monitor.rb +1 -1
  21. data/lib/sidekiq/paginator.rb +2 -2
  22. data/lib/sidekiq/processor.rb +38 -38
  23. data/lib/sidekiq/rails.rb +15 -8
  24. data/lib/sidekiq/redis_client_adapter.rb +154 -0
  25. data/lib/sidekiq/redis_connection.rb +81 -48
  26. data/lib/sidekiq/ring_buffer.rb +29 -0
  27. data/lib/sidekiq/scheduled.rb +11 -10
  28. data/lib/sidekiq/testing/inline.rb +4 -4
  29. data/lib/sidekiq/testing.rb +37 -36
  30. data/lib/sidekiq/transaction_aware_client.rb +45 -0
  31. data/lib/sidekiq/version.rb +1 -1
  32. data/lib/sidekiq/web/csrf_protection.rb +2 -2
  33. data/lib/sidekiq/web/helpers.rb +4 -4
  34. data/lib/sidekiq/worker.rb +18 -13
  35. data/lib/sidekiq.rb +97 -30
  36. data/web/assets/javascripts/application.js +58 -26
  37. data/web/assets/stylesheets/application.css +1 -2
  38. data/web/locales/pt-br.yml +27 -9
  39. data/web/views/_summary.erb +1 -1
  40. data/web/views/busy.erb +3 -3
  41. metadata +7 -4
  42. data/lib/sidekiq/exception_handler.rb +0 -27
  43. data/lib/sidekiq/util.rb +0 -108
@@ -0,0 +1,64 @@
1
+ module Sidekiq
2
+ ##
3
+ # Sidekiq::Component assumes a config instance is available at @config
4
+ module Component
5
+ attr_reader :config
6
+
7
+ def watchdog(last_words)
8
+ yield
9
+ rescue Exception => ex
10
+ handle_exception(ex, {context: last_words})
11
+ raise ex
12
+ end
13
+
14
+ def safe_thread(name, &block)
15
+ Thread.new do
16
+ Thread.current.name = name
17
+ watchdog(name, &block)
18
+ end
19
+ end
20
+
21
+ def logger
22
+ config.logger
23
+ end
24
+
25
+ def redis(&block)
26
+ config.redis(&block)
27
+ end
28
+
29
+ def tid
30
+ Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
31
+ end
32
+
33
+ def hostname
34
+ ENV["DYNO"] || Socket.gethostname
35
+ end
36
+
37
+ def process_nonce
38
+ @@process_nonce ||= SecureRandom.hex(6)
39
+ end
40
+
41
+ def identity
42
+ @@identity ||= "#{hostname}:#{::Process.pid}:#{process_nonce}"
43
+ end
44
+
45
+ def handle_exception(ex, ctx = {})
46
+ config.handle_exception(ex, ctx)
47
+ end
48
+
49
+ def fire_event(event, options = {})
50
+ reverse = options[:reverse]
51
+ reraise = options[:reraise]
52
+
53
+ arr = config[:lifecycle_events][event]
54
+ arr.reverse! if reverse
55
+ arr.each do |block|
56
+ block.call
57
+ rescue => ex
58
+ handle_exception(ex, {context: "Exception during Sidekiq lifecycle event.", event: event})
59
+ raise ex if reraise
60
+ end
61
+ arr.clear # once we've fired an event, we never fire it again
62
+ end
63
+ end
64
+ end
@@ -10,7 +10,7 @@ module Sidekiq
10
10
  def initialize(performable, target, options = {})
11
11
  @performable = performable
12
12
  @target = target
13
- @opts = options
13
+ @opts = options.transform_keys(&:to_s)
14
14
  end
15
15
 
16
16
  def method_missing(name, *args)
data/lib/sidekiq/fetch.rb CHANGED
@@ -1,14 +1,16 @@
1
1
  # frozen_string_literal: true
2
2
 
3
3
  require "sidekiq"
4
+ require "sidekiq/component"
4
5
 
5
6
  module Sidekiq
6
7
  class BasicFetch
8
+ include Sidekiq::Component
7
9
  # We want the fetch operation to timeout every few seconds so the thread
8
10
  # can check if the process is shutting down.
9
11
  TIMEOUT = 2
10
12
 
11
- UnitOfWork = Struct.new(:queue, :job) {
13
+ UnitOfWork = Struct.new(:queue, :job, :config) {
12
14
  def acknowledge
13
15
  # nothing to do
14
16
  end
@@ -18,17 +20,17 @@ module Sidekiq
18
20
  end
19
21
 
20
22
  def requeue
21
- Sidekiq.redis do |conn|
23
+ config.redis do |conn|
22
24
  conn.rpush(queue, job)
23
25
  end
24
26
  end
25
27
  }
26
28
 
27
- def initialize(options)
28
- raise ArgumentError, "missing queue list" unless options[:queues]
29
- @options = options
30
- @strictly_ordered_queues = !!@options[:strict]
31
- @queues = @options[:queues].map { |q| "queue:#{q}" }
29
+ def initialize(config)
30
+ raise ArgumentError, "missing queue list" unless config[:queues]
31
+ @config = config
32
+ @strictly_ordered_queues = !!@config[:strict]
33
+ @queues = @config[:queues].map { |q| "queue:#{q}" }
32
34
  if @strictly_ordered_queues
33
35
  @queues.uniq!
34
36
  @queues << TIMEOUT
@@ -44,30 +46,30 @@ module Sidekiq
44
46
  return nil
45
47
  end
46
48
 
47
- work = Sidekiq.redis { |conn| conn.brpop(*qs) }
48
- UnitOfWork.new(*work) if work
49
+ queue, job = redis { |conn| conn.brpop(*qs) }
50
+ UnitOfWork.new(queue, job, config) if queue
49
51
  end
50
52
 
51
53
  def bulk_requeue(inprogress, options)
52
54
  return if inprogress.empty?
53
55
 
54
- Sidekiq.logger.debug { "Re-queueing terminated jobs" }
56
+ logger.debug { "Re-queueing terminated jobs" }
55
57
  jobs_to_requeue = {}
56
58
  inprogress.each do |unit_of_work|
57
59
  jobs_to_requeue[unit_of_work.queue] ||= []
58
60
  jobs_to_requeue[unit_of_work.queue] << unit_of_work.job
59
61
  end
60
62
 
61
- Sidekiq.redis do |conn|
63
+ redis do |conn|
62
64
  conn.pipelined do |pipeline|
63
65
  jobs_to_requeue.each do |queue, jobs|
64
66
  pipeline.rpush(queue, jobs)
65
67
  end
66
68
  end
67
69
  end
68
- Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
70
+ logger.info("Pushed #{inprogress.size} jobs back to Redis")
69
71
  rescue => ex
70
- Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
72
+ logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
71
73
  end
72
74
 
73
75
  # Creating the Redis#brpop command takes into account any
@@ -25,11 +25,11 @@ module Sidekiq
25
25
  #
26
26
  # A job looks like:
27
27
  #
28
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => true }
28
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => true }
29
29
  #
30
30
  # The 'retry' option also accepts a number (in place of 'true'):
31
31
  #
32
- # { 'class' => 'HardWorker', 'args' => [1, 2, 'foo'], 'retry' => 5 }
32
+ # { 'class' => 'HardJob', 'args' => [1, 2, 'foo'], 'retry' => 5 }
33
33
  #
34
34
  # The job will be retried this number of times before giving up. (If simply
35
35
  # 'true', Sidekiq retries 25 times)
@@ -53,11 +53,11 @@ module Sidekiq
53
53
  #
54
54
  # Sidekiq.options[:max_retries] = 7
55
55
  #
56
- # or limit the number of retries for a particular worker and send retries to
56
+ # or limit the number of retries for a particular job and send retries to
57
57
  # a low priority queue with:
58
58
  #
59
- # class MyWorker
60
- # include Sidekiq::Worker
59
+ # class MyJob
60
+ # include Sidekiq::Job
61
61
  # sidekiq_options retry: 10, retry_queue: 'low'
62
62
  # end
63
63
  #
@@ -66,17 +66,18 @@ module Sidekiq
66
66
 
67
67
  class Skip < Handled; end
68
68
 
69
- include Sidekiq::Util
69
+ include Sidekiq::Component
70
70
 
71
71
  DEFAULT_MAX_RETRY_ATTEMPTS = 25
72
72
 
73
- def initialize(options = {})
74
- @max_retries = Sidekiq.options.merge(options).fetch(:max_retries, DEFAULT_MAX_RETRY_ATTEMPTS)
73
+ def initialize(options)
74
+ @config = options
75
+ @max_retries = @config[:max_retries] || DEFAULT_MAX_RETRY_ATTEMPTS
75
76
  end
76
77
 
77
78
  # The global retry handler requires only the barest of data.
78
79
  # We want to be able to retry as much as possible so we don't
79
- # require the worker to be instantiated.
80
+ # require the job to be instantiated.
80
81
  def global(jobstr, queue)
81
82
  yield
82
83
  rescue Handled => ex
@@ -103,14 +104,14 @@ module Sidekiq
103
104
  end
104
105
 
105
106
  # The local retry support means that any errors that occur within
106
- # this block can be associated with the given worker instance.
107
+ # this block can be associated with the given job instance.
107
108
  # This is required to support the `sidekiq_retries_exhausted` block.
108
109
  #
109
110
  # Note that any exception from the block is wrapped in the Skip
110
111
  # exception so the global block does not reprocess the error. The
111
112
  # Skip exception is unwrapped within Sidekiq::Processor#process before
112
113
  # calling the handle_exception handlers.
113
- def local(worker, jobstr, queue)
114
+ def local(jobinst, jobstr, queue)
114
115
  yield
115
116
  rescue Handled => ex
116
117
  raise ex
@@ -123,11 +124,11 @@ module Sidekiq
123
124
 
124
125
  msg = Sidekiq.load_json(jobstr)
125
126
  if msg["retry"].nil?
126
- msg["retry"] = worker.class.get_sidekiq_options["retry"]
127
+ msg["retry"] = jobinst.class.get_sidekiq_options["retry"]
127
128
  end
128
129
 
129
130
  raise e unless msg["retry"]
130
- attempt_retry(worker, msg, queue, e)
131
+ attempt_retry(jobinst, msg, queue, e)
131
132
  # We've handled this error associated with this job, don't
132
133
  # need to handle it at the global level
133
134
  raise Skip
@@ -135,10 +136,10 @@ module Sidekiq
135
136
 
136
137
  private
137
138
 
138
- # Note that +worker+ can be nil here if an error is raised before we can
139
- # instantiate the worker instance. All access must be guarded and
139
+ # Note that +jobinst+ can be nil here if an error is raised before we can
140
+ # instantiate the job instance. All access must be guarded and
140
141
  # best effort.
141
- def attempt_retry(worker, msg, queue, exception)
142
+ def attempt_retry(jobinst, msg, queue, exception)
142
143
  max_retry_attempts = retry_attempts_from(msg["retry"], @max_retries)
143
144
 
144
145
  msg["queue"] = (msg["retry_queue"] || queue)
@@ -170,7 +171,7 @@ module Sidekiq
170
171
  end
171
172
 
172
173
  if count < max_retry_attempts
173
- delay = delay_for(worker, count, exception)
174
+ delay = delay_for(jobinst, count, exception)
174
175
  # Logging here can break retries if the logging device raises ENOSPC #3979
175
176
  # logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
176
177
  retry_at = Time.now.to_f + delay
@@ -180,13 +181,13 @@ module Sidekiq
180
181
  end
181
182
  else
182
183
  # Goodbye dear message, you (re)tried your best I'm sure.
183
- retries_exhausted(worker, msg, exception)
184
+ retries_exhausted(jobinst, msg, exception)
184
185
  end
185
186
  end
186
187
 
187
- def retries_exhausted(worker, msg, exception)
188
+ def retries_exhausted(jobinst, msg, exception)
188
189
  begin
189
- block = worker&.sidekiq_retries_exhausted_block
190
+ block = jobinst&.sidekiq_retries_exhausted_block
190
191
  block&.call(msg, exception)
191
192
  rescue => e
192
193
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
@@ -215,19 +216,19 @@ module Sidekiq
215
216
  end
216
217
  end
217
218
 
218
- def delay_for(worker, count, exception)
219
+ def delay_for(jobinst, count, exception)
219
220
  jitter = rand(10) * (count + 1)
220
- if worker&.sidekiq_retry_in_block
221
- custom_retry_in = retry_in(worker, count, exception).to_i
221
+ if jobinst&.sidekiq_retry_in_block
222
+ custom_retry_in = retry_in(jobinst, count, exception).to_i
222
223
  return custom_retry_in + jitter if custom_retry_in > 0
223
224
  end
224
225
  (count**4) + 15 + jitter
225
226
  end
226
227
 
227
- def retry_in(worker, count, exception)
228
- worker.sidekiq_retry_in_block.call(count, exception)
228
+ def retry_in(jobinst, count, exception)
229
+ jobinst.sidekiq_retry_in_block.call(count, exception)
229
230
  rescue Exception => e
230
- handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default"})
231
+ handle_exception(e, {context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{jobinst.class.name}, falling back to default"})
231
232
  nil
232
233
  end
233
234
 
@@ -4,7 +4,8 @@ require "time"
4
4
  module Sidekiq
5
5
  module JobUtil
6
6
  # These functions encapsulate various job utilities.
7
- # They must be simple and free from side effects.
7
+
8
+ TRANSIENT_ATTRIBUTES = %w[]
8
9
 
9
10
  def validate(item)
10
11
  raise(ArgumentError, "Job must be a Hash with 'class' and 'args' keys: `#{item}`") unless item.is_a?(Hash) && item.key?("class") && item.key?("args")
@@ -12,16 +13,19 @@ module Sidekiq
12
13
  raise(ArgumentError, "Job class must be either a Class or String representation of the class name: `#{item}`") unless item["class"].is_a?(Class) || item["class"].is_a?(String)
13
14
  raise(ArgumentError, "Job 'at' must be a Numeric timestamp: `#{item}`") if item.key?("at") && !item["at"].is_a?(Numeric)
14
15
  raise(ArgumentError, "Job tags must be an Array: `#{item}`") if item["tags"] && !item["tags"].is_a?(Array)
16
+ end
15
17
 
16
- if Sidekiq.options[:on_complex_arguments] == :raise
18
+ def verify_json(item)
19
+ job_class = item["wrapped"] || item["class"]
20
+ if Sidekiq[:on_complex_arguments] == :raise
17
21
  msg = <<~EOM
18
- Job arguments to #{item["class"]} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
22
+ Job arguments to #{job_class} must be native JSON types, see https://github.com/mperham/sidekiq/wiki/Best-Practices.
19
23
  To disable this error, remove `Sidekiq.strict_args!` from your initializer.
20
24
  EOM
21
25
  raise(ArgumentError, msg) unless json_safe?(item)
22
- elsif Sidekiq.options[:on_complex_arguments] == :warn
26
+ elsif Sidekiq[:on_complex_arguments] == :warn
23
27
  Sidekiq.logger.warn <<~EOM unless json_safe?(item)
24
- Job arguments to #{item["class"]} do not serialize to JSON safely. This will raise an error in
28
+ Job arguments to #{job_class} do not serialize to JSON safely. This will raise an error in
25
29
  Sidekiq 7.0. See https://github.com/mperham/sidekiq/wiki/Best-Practices or raise an error today
26
30
  by calling `Sidekiq.strict_args!` during Sidekiq initialization.
27
31
  EOM
@@ -39,20 +43,22 @@ module Sidekiq
39
43
 
40
44
  raise(ArgumentError, "Job must include a valid queue name") if item["queue"].nil? || item["queue"] == ""
41
45
 
46
+ # remove job attributes which aren't necessary to persist into Redis
47
+ TRANSIENT_ATTRIBUTES.each { |key| item.delete(key) }
48
+
49
+ item["jid"] ||= SecureRandom.hex(12)
42
50
  item["class"] = item["class"].to_s
43
51
  item["queue"] = item["queue"].to_s
44
- item["jid"] ||= SecureRandom.hex(12)
45
52
  item["created_at"] ||= Time.now.to_f
46
-
47
53
  item
48
54
  end
49
55
 
50
56
  def normalized_hash(item_class)
51
57
  if item_class.is_a?(Class)
52
- raise(ArgumentError, "Message must include a Sidekiq::Worker class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
58
+ raise(ArgumentError, "Message must include a Sidekiq::Job class, not class name: #{item_class.ancestors.inspect}") unless item_class.respond_to?(:get_sidekiq_options)
53
59
  item_class.get_sidekiq_options
54
60
  else
55
- Sidekiq.default_worker_options
61
+ Sidekiq.default_job_options
56
62
  end
57
63
  end
58
64
 
@@ -3,11 +3,12 @@
3
3
  require "sidekiq/manager"
4
4
  require "sidekiq/fetch"
5
5
  require "sidekiq/scheduled"
6
+ require "sidekiq/ring_buffer"
6
7
 
7
8
  module Sidekiq
8
9
  # The Launcher starts the Manager and Poller threads and provides the process heartbeat.
9
10
  class Launcher
10
- include Util
11
+ include Sidekiq::Component
11
12
 
12
13
  STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
13
14
 
@@ -15,18 +16,18 @@ module Sidekiq
15
16
  proc { "sidekiq" },
16
17
  proc { Sidekiq::VERSION },
17
18
  proc { |me, data| data["tag"] },
18
- proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
+ proc { |me, data| "[#{Processor::WORK_STATE.size} of #{data["concurrency"]} busy]" },
19
20
  proc { |me, data| "stopping" if me.stopping? }
20
21
  ]
21
22
 
22
23
  attr_accessor :manager, :poller, :fetcher
23
24
 
24
25
  def initialize(options)
26
+ @config = options
25
27
  options[:fetch] ||= BasicFetch.new(options)
26
28
  @manager = Sidekiq::Manager.new(options)
27
- @poller = Sidekiq::Scheduled::Poller.new
29
+ @poller = Sidekiq::Scheduled::Poller.new(options)
28
30
  @done = false
29
- @options = options
30
31
  end
31
32
 
32
33
  def run
@@ -43,11 +44,9 @@ module Sidekiq
43
44
  @poller.terminate
44
45
  end
45
46
 
46
- # Shuts down the process. This method does not
47
- # return until all work is complete and cleaned up.
48
- # It can take up to the timeout to complete.
47
+ # Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
49
48
  def stop
50
- deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
49
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
51
50
 
52
51
  @done = true
53
52
  @manager.quiet
@@ -55,10 +54,10 @@ module Sidekiq
55
54
 
56
55
  @manager.stop(deadline)
57
56
 
58
- # Requeue everything in case there was a worker who grabbed work while stopped
57
+ # Requeue everything in case there was a thread which fetched a job while the process was stopped.
59
58
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
60
- strategy = @options[:fetch]
61
- strategy.bulk_requeue([], @options)
59
+ strategy = @config[:fetch]
60
+ strategy.bulk_requeue([], @config)
62
61
 
63
62
  clear_heartbeat
64
63
  end
@@ -76,17 +75,17 @@ module Sidekiq
76
75
  heartbeat
77
76
  sleep BEAT_PAUSE
78
77
  end
79
- Sidekiq.logger.info("Heartbeat stopping...")
78
+ logger.info("Heartbeat stopping...")
80
79
  end
81
80
 
82
81
  def clear_heartbeat
83
82
  # Remove record from Redis since we are shutting down.
84
83
  # Note we don't stop the heartbeat thread; if the process
85
84
  # doesn't actually exit, it'll reappear in the Web UI.
86
- Sidekiq.redis do |conn|
85
+ redis do |conn|
87
86
  conn.pipelined do |pipeline|
88
87
  pipeline.srem("processes", identity)
89
- pipeline.unlink("#{identity}:workers")
88
+ pipeline.unlink("#{identity}:work")
90
89
  end
91
90
  end
92
91
  rescue
@@ -132,12 +131,11 @@ module Sidekiq
132
131
  begin
133
132
  fails = Processor::FAILURE.reset
134
133
  procd = Processor::PROCESSED.reset
135
- curstate = Processor::WORKER_STATE.dup
134
+ curstate = Processor::WORK_STATE.dup
136
135
 
137
- workers_key = "#{key}:workers"
138
136
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
139
137
 
140
- Sidekiq.redis do |conn|
138
+ redis do |conn|
141
139
  conn.multi do |transaction|
142
140
  transaction.incrby("stat:processed", procd)
143
141
  transaction.incrby("stat:processed:#{nowdate}", procd)
@@ -146,12 +144,16 @@ module Sidekiq
146
144
  transaction.incrby("stat:failed", fails)
147
145
  transaction.incrby("stat:failed:#{nowdate}", fails)
148
146
  transaction.expire("stat:failed:#{nowdate}", STATS_TTL)
147
+ end
149
148
 
150
- transaction.unlink(workers_key)
149
+ # work is the current set of executing jobs
150
+ work_key = "#{key}:work"
151
+ conn.pipelined do |transaction|
152
+ transaction.unlink(work_key)
151
153
  curstate.each_pair do |tid, hash|
152
- transaction.hset(workers_key, tid, Sidekiq.dump_json(hash))
154
+ transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
153
155
  end
154
- transaction.expire(workers_key, 60)
156
+ transaction.expire(work_key, 60)
155
157
  end
156
158
  end
157
159
 
@@ -160,7 +162,7 @@ module Sidekiq
160
162
  fails = procd = 0
161
163
  kb = memory_usage(::Process.pid)
162
164
 
163
- _, exists, _, _, msg = Sidekiq.redis { |conn|
165
+ _, exists, _, _, msg = redis { |conn|
164
166
  conn.multi { |transaction|
165
167
  transaction.sadd("processes", key)
166
168
  transaction.exists?(key)
@@ -168,7 +170,7 @@ module Sidekiq
168
170
  "busy", curstate.size,
169
171
  "beat", Time.now.to_f,
170
172
  "rtt_us", rtt,
171
- "quiet", @done,
173
+ "quiet", @done.to_s,
172
174
  "rss", kb)
173
175
  transaction.expire(key, 60)
174
176
  transaction.rpop("#{key}-signals")
@@ -198,7 +200,7 @@ module Sidekiq
198
200
 
199
201
  def check_rtt
200
202
  a = b = 0
201
- Sidekiq.redis do |x|
203
+ redis do |x|
202
204
  a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
203
205
  x.ping
204
206
  b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
@@ -209,12 +211,12 @@ module Sidekiq
209
211
  # Workable is < 10,000µs
210
212
  # Log a warning if it's a disaster.
211
213
  if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
212
- Sidekiq.logger.warn <<~EOM
214
+ logger.warn <<~EOM
213
215
  Your Redis network connection is performing extremely poorly.
214
216
  Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
215
217
  Ensure Redis is running in the same AZ or datacenter as Sidekiq.
216
218
  If these values are close to 100,000, that means your Sidekiq process may be
217
- CPU overloaded; see https://github.com/mperham/sidekiq/discussions/5039
219
+ CPU-saturated; reduce your concurrency and/or see https://github.com/mperham/sidekiq/discussions/5039
218
220
  EOM
219
221
  RTT_READINGS.reset
220
222
  end
@@ -246,10 +248,10 @@ module Sidekiq
246
248
  "hostname" => hostname,
247
249
  "started_at" => Time.now.to_f,
248
250
  "pid" => ::Process.pid,
249
- "tag" => @options[:tag] || "",
250
- "concurrency" => @options[:concurrency],
251
- "queues" => @options[:queues].uniq,
252
- "labels" => @options[:labels],
251
+ "tag" => @config[:tag] || "",
252
+ "concurrency" => @config[:concurrency],
253
+ "queues" => @config[:queues].uniq,
254
+ "labels" => @config[:labels],
253
255
  "identity" => identity
254
256
  }
255
257
  end
@@ -18,7 +18,7 @@ module Sidekiq
18
18
  end
19
19
 
20
20
  def self.add(k, v)
21
- Thread.current[:sidekiq_context][k] = v
21
+ current[k] = v
22
22
  end
23
23
  end
24
24
 
@@ -35,24 +35,10 @@ module Sidekiq
35
35
  nil
36
36
  end
37
37
 
38
- def debug?
39
- level <= 0
40
- end
41
-
42
- def info?
43
- level <= 1
44
- end
45
-
46
- def warn?
47
- level <= 2
48
- end
49
-
50
- def error?
51
- level <= 3
52
- end
53
-
54
- def fatal?
55
- level <= 4
38
+ LEVELS.each do |level, numeric_level|
39
+ define_method("#{level}?") do
40
+ local_level.nil? ? super() : local_level <= numeric_level
41
+ end
56
42
  end
57
43
 
58
44
  def local_level