sidekiq 6.0.1 → 6.2.2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (87) hide show
  1. checksums.yaml +4 -4
  2. data/Changes.md +147 -2
  3. data/LICENSE +1 -1
  4. data/README.md +4 -7
  5. data/bin/sidekiq +26 -2
  6. data/lib/generators/sidekiq/worker_generator.rb +1 -1
  7. data/lib/sidekiq/api.rb +151 -111
  8. data/lib/sidekiq/cli.rb +39 -10
  9. data/lib/sidekiq/client.rb +26 -15
  10. data/lib/sidekiq/extensions/action_mailer.rb +3 -2
  11. data/lib/sidekiq/extensions/active_record.rb +4 -3
  12. data/lib/sidekiq/extensions/class_methods.rb +5 -4
  13. data/lib/sidekiq/extensions/generic_proxy.rb +3 -1
  14. data/lib/sidekiq/fetch.rb +29 -21
  15. data/lib/sidekiq/job.rb +8 -0
  16. data/lib/sidekiq/job_logger.rb +2 -2
  17. data/lib/sidekiq/job_retry.rb +11 -12
  18. data/lib/sidekiq/launcher.rb +104 -24
  19. data/lib/sidekiq/logger.rb +12 -11
  20. data/lib/sidekiq/manager.rb +4 -4
  21. data/lib/sidekiq/middleware/chain.rb +6 -4
  22. data/lib/sidekiq/monitor.rb +2 -17
  23. data/lib/sidekiq/processor.rb +17 -39
  24. data/lib/sidekiq/rails.rb +16 -18
  25. data/lib/sidekiq/redis_connection.rb +21 -13
  26. data/lib/sidekiq/scheduled.rb +7 -1
  27. data/lib/sidekiq/sd_notify.rb +149 -0
  28. data/lib/sidekiq/systemd.rb +24 -0
  29. data/lib/sidekiq/testing.rb +2 -4
  30. data/lib/sidekiq/util.rb +28 -2
  31. data/lib/sidekiq/version.rb +1 -1
  32. data/lib/sidekiq/web/action.rb +2 -2
  33. data/lib/sidekiq/web/application.rb +30 -19
  34. data/lib/sidekiq/web/csrf_protection.rb +180 -0
  35. data/lib/sidekiq/web/helpers.rb +35 -24
  36. data/lib/sidekiq/web/router.rb +6 -5
  37. data/lib/sidekiq/web.rb +37 -73
  38. data/lib/sidekiq/worker.rb +4 -7
  39. data/lib/sidekiq.rb +14 -8
  40. data/sidekiq.gemspec +12 -5
  41. data/web/assets/images/apple-touch-icon.png +0 -0
  42. data/web/assets/javascripts/application.js +25 -27
  43. data/web/assets/stylesheets/application-dark.css +146 -124
  44. data/web/assets/stylesheets/application.css +35 -135
  45. data/web/locales/ar.yml +8 -2
  46. data/web/locales/de.yml +14 -2
  47. data/web/locales/en.yml +5 -0
  48. data/web/locales/es.yml +18 -2
  49. data/web/locales/fr.yml +10 -3
  50. data/web/locales/ja.yml +5 -0
  51. data/web/locales/lt.yml +83 -0
  52. data/web/locales/pl.yml +4 -4
  53. data/web/locales/ru.yml +4 -0
  54. data/web/locales/vi.yml +83 -0
  55. data/web/views/_job_info.erb +1 -1
  56. data/web/views/busy.erb +50 -19
  57. data/web/views/dashboard.erb +14 -6
  58. data/web/views/dead.erb +1 -1
  59. data/web/views/layout.erb +2 -1
  60. data/web/views/morgue.erb +6 -6
  61. data/web/views/queue.erb +1 -1
  62. data/web/views/queues.erb +10 -2
  63. data/web/views/retries.erb +7 -7
  64. data/web/views/retry.erb +1 -1
  65. data/web/views/scheduled.erb +1 -1
  66. metadata +26 -50
  67. data/.circleci/config.yml +0 -82
  68. data/.github/contributing.md +0 -32
  69. data/.github/issue_template.md +0 -11
  70. data/.gitignore +0 -13
  71. data/.standard.yml +0 -20
  72. data/3.0-Upgrade.md +0 -70
  73. data/4.0-Upgrade.md +0 -53
  74. data/5.0-Upgrade.md +0 -56
  75. data/6.0-Upgrade.md +0 -72
  76. data/COMM-LICENSE +0 -97
  77. data/Ent-2.0-Upgrade.md +0 -37
  78. data/Ent-Changes.md +0 -256
  79. data/Gemfile +0 -24
  80. data/Gemfile.lock +0 -196
  81. data/Pro-2.0-Upgrade.md +0 -138
  82. data/Pro-3.0-Upgrade.md +0 -44
  83. data/Pro-4.0-Upgrade.md +0 -35
  84. data/Pro-5.0-Upgrade.md +0 -25
  85. data/Pro-Changes.md +0 -776
  86. data/Rakefile +0 -10
  87. data/code_of_conduct.md +0 -50
@@ -5,9 +5,10 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to ActionMailer to offload arbitrary email
9
- # delivery to Sidekiq. Example:
8
+ # Adds +delay+, +delay_for+ and +delay_until+ methods to ActionMailer to offload arbitrary email
9
+ # delivery to Sidekiq.
10
10
  #
11
+ # @example
11
12
  # UserMailer.delay.send_welcome_email(new_user)
12
13
  # UserMailer.delay_for(5.days).send_welcome_email(new_user)
13
14
  # UserMailer.delay_until(5.days.from_now).send_welcome_email(new_user)
@@ -5,10 +5,11 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to ActiveRecord to offload instance method
9
- # execution to Sidekiq. Examples:
8
+ # Adds +delay+, +delay_for+ and +delay_until+ methods to ActiveRecord to offload instance method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.recent_signups.each { |user| user.delay.mark_as_awesome }
11
+ # @example
12
+ # User.recent_signups.each { |user| user.delay.mark_as_awesome }
12
13
  #
13
14
  # Please note, this is not recommended as this will serialize the entire
14
15
  # object to Redis. Your Sidekiq jobs should pass IDs, not entire instances.
@@ -5,11 +5,12 @@ require "sidekiq/extensions/generic_proxy"
5
5
  module Sidekiq
6
6
  module Extensions
7
7
  ##
8
- # Adds 'delay', 'delay_for' and `delay_until` methods to all Classes to offload class method
9
- # execution to Sidekiq. Examples:
8
+ # Adds `delay`, `delay_for` and `delay_until` methods to all Classes to offload class method
9
+ # execution to Sidekiq.
10
10
  #
11
- # User.delay.delete_inactive
12
- # Wikipedia.delay.download_changes_for(Date.today)
11
+ # @example
12
+ # User.delay.delete_inactive
13
+ # Wikipedia.delay.download_changes_for(Date.today)
13
14
  #
14
15
  class DelayedClass
15
16
  include Sidekiq::Worker
@@ -24,7 +24,9 @@ module Sidekiq
24
24
  if marshalled.size > SIZE_LIMIT
25
25
  ::Sidekiq.logger.warn { "#{@target}.#{name} job argument is #{marshalled.bytesize} bytes, you should refactor it to reduce the size" }
26
26
  end
27
- @performable.client_push({"class" => @performable, "args" => [marshalled]}.merge(@opts))
27
+ @performable.client_push({"class" => @performable,
28
+ "args" => [marshalled],
29
+ "display_class" => "#{@target}.#{name}"}.merge(@opts))
28
30
  end
29
31
  end
30
32
  end
data/lib/sidekiq/fetch.rb CHANGED
@@ -25,8 +25,10 @@ module Sidekiq
25
25
  }
26
26
 
27
27
  def initialize(options)
28
- @strictly_ordered_queues = !!options[:strict]
29
- @queues = options[:queues].map { |q| "queue:#{q}" }
28
+ raise ArgumentError, "missing queue list" unless options[:queues]
29
+ @options = options
30
+ @strictly_ordered_queues = !!@options[:strict]
31
+ @queues = @options[:queues].map { |q| "queue:#{q}" }
30
32
  if @strictly_ordered_queues
31
33
  @queues.uniq!
32
34
  @queues << TIMEOUT
@@ -34,28 +36,19 @@ module Sidekiq
34
36
  end
35
37
 
36
38
  def retrieve_work
37
- work = Sidekiq.redis { |conn| conn.brpop(*queues_cmd) }
38
- UnitOfWork.new(*work) if work
39
- end
40
-
41
- # Creating the Redis#brpop command takes into account any
42
- # configured queue weights. By default Redis#brpop returns
43
- # data from the first queue that has pending elements. We
44
- # recreate the queue command each time we invoke Redis#brpop
45
- # to honor weights and avoid queue starvation.
46
- def queues_cmd
47
- if @strictly_ordered_queues
48
- @queues
49
- else
50
- queues = @queues.shuffle!.uniq
51
- queues << TIMEOUT
52
- queues
39
+ qs = queues_cmd
40
+ # 4825 Sidekiq Pro with all queues paused will return an
41
+ # empty set of queues with a trailing TIMEOUT value.
42
+ if qs.size <= 1
43
+ sleep(TIMEOUT)
44
+ return nil
53
45
  end
46
+
47
+ work = Sidekiq.redis { |conn| conn.brpop(*qs) }
48
+ UnitOfWork.new(*work) if work
54
49
  end
55
50
 
56
- # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
57
- # an instance method will make it async to the Fetcher actor
58
- def self.bulk_requeue(inprogress, options)
51
+ def bulk_requeue(inprogress, options)
59
52
  return if inprogress.empty?
60
53
 
61
54
  Sidekiq.logger.debug { "Re-queueing terminated jobs" }
@@ -76,5 +69,20 @@ module Sidekiq
76
69
  rescue => ex
77
70
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
78
71
  end
72
+
73
+ # Creating the Redis#brpop command takes into account any
74
+ # configured queue weights. By default Redis#brpop returns
75
+ # data from the first queue that has pending elements. We
76
+ # recreate the queue command each time we invoke Redis#brpop
77
+ # to honor weights and avoid queue starvation.
78
+ def queues_cmd
79
+ if @strictly_ordered_queues
80
+ @queues
81
+ else
82
+ queues = @queues.shuffle!.uniq
83
+ queues << TIMEOUT
84
+ queues
85
+ end
86
+ end
79
87
  end
80
88
  end
@@ -0,0 +1,8 @@
1
+ require "sidekiq/worker"
2
+
3
+ module Sidekiq
4
+ # Sidekiq::Job is a new alias for Sidekiq::Worker, coming in 6.3.0.
5
+ # You can opt into this by requiring 'sidekiq/job' in your initializer
6
+ # and then using `include Sidekiq::Job` rather than `Sidekiq::Worker`.
7
+ Job = Worker
8
+ end
@@ -38,8 +38,8 @@ module Sidekiq
38
38
  # If we're using a wrapper class, like ActiveJob, use the "wrapped"
39
39
  # attribute to expose the underlying thing.
40
40
  h = {
41
- class: job_hash["wrapped"] || job_hash["class"],
42
- jid: job_hash["jid"],
41
+ class: job_hash["display_class"] || job_hash["wrapped"] || job_hash["class"],
42
+ jid: job_hash["jid"]
43
43
  }
44
44
  h[:bid] = job_hash["bid"] if job_hash["bid"]
45
45
  h[:tags] = job_hash["tags"] if job_hash["tags"]
@@ -61,6 +61,7 @@ module Sidekiq
61
61
  #
62
62
  class JobRetry
63
63
  class Handled < ::RuntimeError; end
64
+
64
65
  class Skip < Handled; end
65
66
 
66
67
  include Sidekiq::Util
@@ -74,7 +75,7 @@ module Sidekiq
74
75
  # The global retry handler requires only the barest of data.
75
76
  # We want to be able to retry as much as possible so we don't
76
77
  # require the worker to be instantiated.
77
- def global(msg, queue)
78
+ def global(jobstr, queue)
78
79
  yield
79
80
  rescue Handled => ex
80
81
  raise ex
@@ -85,6 +86,7 @@ module Sidekiq
85
86
  # ignore, will be pushed back onto queue during hard_shutdown
86
87
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
87
88
 
89
+ msg = Sidekiq.load_json(jobstr)
88
90
  if msg["retry"]
89
91
  attempt_retry(nil, msg, queue, e)
90
92
  else
@@ -106,7 +108,7 @@ module Sidekiq
106
108
  # exception so the global block does not reprocess the error. The
107
109
  # Skip exception is unwrapped within Sidekiq::Processor#process before
108
110
  # calling the handle_exception handlers.
109
- def local(worker, msg, queue)
111
+ def local(worker, jobstr, queue)
110
112
  yield
111
113
  rescue Handled => ex
112
114
  raise ex
@@ -117,6 +119,7 @@ module Sidekiq
117
119
  # ignore, will be pushed back onto queue during hard_shutdown
118
120
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
119
121
 
122
+ msg = Sidekiq.load_json(jobstr)
120
123
  if msg["retry"].nil?
121
124
  msg["retry"] = worker.class.get_sidekiq_options["retry"]
122
125
  end
@@ -187,13 +190,13 @@ module Sidekiq
187
190
  handle_exception(e, {context: "Error calling retries_exhausted", job: msg})
188
191
  end
189
192
 
193
+ send_to_morgue(msg) unless msg["dead"] == false
194
+
190
195
  Sidekiq.death_handlers.each do |handler|
191
196
  handler.call(msg, exception)
192
197
  rescue => e
193
198
  handle_exception(e, {context: "Error calling death handler", job: msg})
194
199
  end
195
-
196
- send_to_morgue(msg) unless msg["dead"] == false
197
200
  end
198
201
 
199
202
  def send_to_morgue(msg)
@@ -211,16 +214,12 @@ module Sidekiq
211
214
  end
212
215
 
213
216
  def delay_for(worker, count, exception)
217
+ jitter = rand(10) * (count + 1)
214
218
  if worker&.sidekiq_retry_in_block
215
219
  custom_retry_in = retry_in(worker, count, exception).to_i
216
- return custom_retry_in if custom_retry_in > 0
220
+ return custom_retry_in + jitter if custom_retry_in > 0
217
221
  end
218
- seconds_to_delay(count)
219
- end
220
-
221
- # delayed_job uses the same basic formula
222
- def seconds_to_delay(count)
223
- (count**4) + 15 + (rand(30) * (count + 1))
222
+ (count**4) + 15 + jitter
224
223
  end
225
224
 
226
225
  def retry_in(worker, count, exception)
@@ -252,7 +251,7 @@ module Sidekiq
252
251
  end
253
252
 
254
253
  def compress_backtrace(backtrace)
255
- serialized = Marshal.dump(backtrace)
254
+ serialized = Sidekiq.dump_json(backtrace)
256
255
  compressed = Zlib::Deflate.deflate(serialized)
257
256
  Base64.encode64(compressed)
258
257
  end
@@ -16,12 +16,13 @@ module Sidekiq
16
16
  proc { Sidekiq::VERSION },
17
17
  proc { |me, data| data["tag"] },
18
18
  proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
19
- proc { |me, data| "stopping" if me.stopping? },
19
+ proc { |me, data| "stopping" if me.stopping? }
20
20
  ]
21
21
 
22
22
  attr_accessor :manager, :poller, :fetcher
23
23
 
24
24
  def initialize(options)
25
+ options[:fetch] ||= BasicFetch.new(options)
25
26
  @manager = Sidekiq::Manager.new(options)
26
27
  @poller = Sidekiq::Scheduled::Poller.new
27
28
  @done = false
@@ -56,7 +57,7 @@ module Sidekiq
56
57
 
57
58
  # Requeue everything in case there was a worker who grabbed work while stopped
58
59
  # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
59
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
60
+ strategy = @options[:fetch]
60
61
  strategy.bulk_requeue([], @options)
61
62
 
62
63
  clear_heartbeat
@@ -83,7 +84,7 @@ module Sidekiq
83
84
  Sidekiq.redis do |conn|
84
85
  conn.pipelined do
85
86
  conn.srem("processes", identity)
86
- conn.del("#{identity}:workers")
87
+ conn.unlink("#{identity}:workers")
87
88
  end
88
89
  end
89
90
  rescue
@@ -96,6 +97,32 @@ module Sidekiq
96
97
 
97
98
  end
98
99
 
100
+ def self.flush_stats
101
+ fails = Processor::FAILURE.reset
102
+ procd = Processor::PROCESSED.reset
103
+ return if fails + procd == 0
104
+
105
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
106
+ begin
107
+ Sidekiq.redis do |conn|
108
+ conn.pipelined do
109
+ conn.incrby("stat:processed", procd)
110
+ conn.incrby("stat:processed:#{nowdate}", procd)
111
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
112
+
113
+ conn.incrby("stat:failed", fails)
114
+ conn.incrby("stat:failed:#{nowdate}", fails)
115
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
116
+ end
117
+ end
118
+ rescue => ex
119
+ # we're exiting the process, things might be shut down so don't
120
+ # try to handle the exception
121
+ Sidekiq.logger.warn("Unable to flush stats: #{ex}")
122
+ end
123
+ end
124
+ at_exit(&method(:flush_stats))
125
+
99
126
  def ❤
100
127
  key = identity
101
128
  fails = procd = 0
@@ -118,7 +145,7 @@ module Sidekiq
118
145
  conn.incrby("stat:failed:#{nowdate}", fails)
119
146
  conn.expire("stat:failed:#{nowdate}", STATS_TTL)
120
147
 
121
- conn.del(workers_key)
148
+ conn.unlink(workers_key)
122
149
  curstate.each_pair do |tid, hash|
123
150
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
124
151
  end
@@ -126,13 +153,21 @@ module Sidekiq
126
153
  end
127
154
  end
128
155
 
156
+ rtt = check_rtt
157
+
129
158
  fails = procd = 0
159
+ kb = memory_usage(::Process.pid)
130
160
 
131
161
  _, exists, _, _, msg = Sidekiq.redis { |conn|
132
162
  conn.multi {
133
163
  conn.sadd("processes", key)
134
- conn.exists(key)
135
- conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
164
+ conn.exists?(key)
165
+ conn.hmset(key, "info", to_json,
166
+ "busy", curstate.size,
167
+ "beat", Time.now.to_f,
168
+ "rtt_us", rtt,
169
+ "quiet", @done,
170
+ "rss", kb)
136
171
  conn.expire(key, 60)
137
172
  conn.rpop("#{key}-signals")
138
173
  }
@@ -146,34 +181,79 @@ module Sidekiq
146
181
  ::Process.kill(msg, ::Process.pid)
147
182
  rescue => e
148
183
  # ignore all redis/network issues
149
- logger.error("heartbeat: #{e.message}")
184
+ logger.error("heartbeat: #{e}")
150
185
  # don't lose the counts if there was a network issue
151
186
  Processor::PROCESSED.incr(procd)
152
187
  Processor::FAILURE.incr(fails)
153
188
  end
154
189
  end
155
190
 
156
- def to_data
157
- @data ||= begin
158
- {
159
- "hostname" => hostname,
160
- "started_at" => Time.now.to_f,
161
- "pid" => ::Process.pid,
162
- "tag" => @options[:tag] || "",
163
- "concurrency" => @options[:concurrency],
164
- "queues" => @options[:queues].uniq,
165
- "labels" => @options[:labels],
166
- "identity" => identity,
167
- }
191
+ # We run the heartbeat every five seconds.
192
+ # Capture five samples of RTT, log a warning if each sample
193
+ # is above our warning threshold.
194
+ RTT_READINGS = RingBuffer.new(5)
195
+ RTT_WARNING_LEVEL = 50_000
196
+
197
+ def check_rtt
198
+ a = b = 0
199
+ Sidekiq.redis do |x|
200
+ a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
201
+ x.ping
202
+ b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
168
203
  end
204
+ rtt = b - a
205
+ RTT_READINGS << rtt
206
+ # Ideal RTT for Redis is < 1000µs
207
+ # Workable is < 10,000µs
208
+ # Log a warning if it's a disaster.
209
+ if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
210
+ Sidekiq.logger.warn <<~EOM
211
+ Your Redis network connection is performing extremely poorly.
212
+ Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
213
+ Ensure Redis is running in the same AZ or datacenter as Sidekiq.
214
+ EOM
215
+ RTT_READINGS.reset
216
+ end
217
+ rtt
218
+ end
219
+
220
+ MEMORY_GRABBER = case RUBY_PLATFORM
221
+ when /linux/
222
+ ->(pid) {
223
+ IO.readlines("/proc/#{$$}/status").each do |line|
224
+ next unless line.start_with?("VmRSS:")
225
+ break line.split[1].to_i
226
+ end
227
+ }
228
+ when /darwin|bsd/
229
+ ->(pid) {
230
+ `ps -o pid,rss -p #{pid}`.lines.last.split.last.to_i
231
+ }
232
+ else
233
+ ->(pid) { 0 }
234
+ end
235
+
236
+ def memory_usage(pid)
237
+ MEMORY_GRABBER.call(pid)
238
+ end
239
+
240
+ def to_data
241
+ @data ||= {
242
+ "hostname" => hostname,
243
+ "started_at" => Time.now.to_f,
244
+ "pid" => ::Process.pid,
245
+ "tag" => @options[:tag] || "",
246
+ "concurrency" => @options[:concurrency],
247
+ "queues" => @options[:queues].uniq,
248
+ "labels" => @options[:labels],
249
+ "identity" => identity
250
+ }
169
251
  end
170
252
 
171
253
  def to_json
172
- @json ||= begin
173
- # this data changes infrequently so dump it to a string
174
- # now so we don't need to dump it every heartbeat.
175
- Sidekiq.dump_json(to_data)
176
- end
254
+ # this data changes infrequently so dump it to a string
255
+ # now so we don't need to dump it every heartbeat.
256
+ @json ||= Sidekiq.dump_json(to_data)
177
257
  end
178
258
  end
179
259
  end
@@ -6,10 +6,11 @@ require "time"
6
6
  module Sidekiq
7
7
  module Context
8
8
  def self.with(hash)
9
+ orig_context = current.dup
9
10
  current.merge!(hash)
10
11
  yield
11
12
  ensure
12
- hash.each_key { |key| current.delete(key) }
13
+ Thread.current[:sidekiq_context] = orig_context
13
14
  end
14
15
 
15
16
  def self.current
@@ -23,7 +24,7 @@ module Sidekiq
23
24
  "info" => 1,
24
25
  "warn" => 2,
25
26
  "error" => 3,
26
- "fatal" => 4,
27
+ "fatal" => 4
27
28
  }
28
29
  LEVELS.default_proc = proc do |_, level|
29
30
  Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
@@ -31,23 +32,23 @@ module Sidekiq
31
32
  end
32
33
 
33
34
  def debug?
34
- level >= 0
35
+ level <= 0
35
36
  end
36
37
 
37
38
  def info?
38
- level >= 1
39
+ level <= 1
39
40
  end
40
41
 
41
42
  def warn?
42
- level >= 2
43
+ level <= 2
43
44
  end
44
45
 
45
46
  def error?
46
- level >= 3
47
+ level <= 3
47
48
  end
48
49
 
49
50
  def fatal?
50
- level >= 4
51
+ level <= 4
51
52
  end
52
53
 
53
54
  def local_level
@@ -83,13 +84,13 @@ module Sidekiq
83
84
  # Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
84
85
  # FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
85
86
  def add(severity, message = nil, progname = nil, &block)
86
- severity ||= UNKNOWN
87
+ severity ||= ::Logger::UNKNOWN
87
88
  progname ||= @progname
88
89
 
89
90
  return true if @logdev.nil? || severity < level
90
91
 
91
92
  if message.nil?
92
- if block_given?
93
+ if block
93
94
  message = yield
94
95
  else
95
96
  message = progname
@@ -104,7 +105,7 @@ module Sidekiq
104
105
  class Logger < ::Logger
105
106
  include LoggingUtils
106
107
 
107
- def initialize(*args)
108
+ def initialize(*args, **kwargs)
108
109
  super
109
110
  self.formatter = Sidekiq.log_formatter
110
111
  end
@@ -152,7 +153,7 @@ module Sidekiq
152
153
  pid: ::Process.pid,
153
154
  tid: tid,
154
155
  lvl: severity,
155
- msg: message,
156
+ msg: message
156
157
  }
157
158
  c = ctx
158
159
  hash["ctx"] = c unless c.empty?
@@ -35,7 +35,7 @@ module Sidekiq
35
35
  @done = false
36
36
  @workers = Set.new
37
37
  @count.times do
38
- @workers << Processor.new(self)
38
+ @workers << Processor.new(self, options)
39
39
  end
40
40
  @plock = Mutex.new
41
41
  end
@@ -56,7 +56,7 @@ module Sidekiq
56
56
  end
57
57
 
58
58
  # hack for quicker development / testing environment #2774
59
- PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
59
+ PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
60
60
 
61
61
  def stop(deadline)
62
62
  quiet
@@ -90,7 +90,7 @@ module Sidekiq
90
90
  @plock.synchronize do
91
91
  @workers.delete(processor)
92
92
  unless @done
93
- p = Processor.new(self)
93
+ p = Processor.new(self, options)
94
94
  @workers << p
95
95
  p.start
96
96
  end
@@ -123,7 +123,7 @@ module Sidekiq
123
123
  # contract says that jobs are run AT LEAST once. Process termination
124
124
  # is delayed until we're certain the jobs are back in Redis because
125
125
  # it is worse to lose a job than to run it twice.
126
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
126
+ strategy = @options[:fetch]
127
127
  strategy.bulk_requeue(jobs, @options)
128
128
  end
129
129
 
@@ -90,12 +90,12 @@ module Sidekiq
90
90
  end
91
91
 
92
92
  def add(klass, *args)
93
- remove(klass) if exists?(klass)
93
+ remove(klass)
94
94
  entries << Entry.new(klass, *args)
95
95
  end
96
96
 
97
97
  def prepend(klass, *args)
98
- remove(klass) if exists?(klass)
98
+ remove(klass)
99
99
  entries.insert(0, Entry.new(klass, *args))
100
100
  end
101
101
 
@@ -132,8 +132,8 @@ module Sidekiq
132
132
  def invoke(*args)
133
133
  return yield if empty?
134
134
 
135
- chain = retrieve.dup
136
- traverse_chain = lambda do
135
+ chain = retrieve
136
+ traverse_chain = proc do
137
137
  if chain.empty?
138
138
  yield
139
139
  else
@@ -144,6 +144,8 @@ module Sidekiq
144
144
  end
145
145
  end
146
146
 
147
+ private
148
+
147
149
  class Entry
148
150
  attr_reader :klass
149
151
 
@@ -4,21 +4,6 @@ require "fileutils"
4
4
  require "sidekiq/api"
5
5
 
6
6
  class Sidekiq::Monitor
7
- CMD = File.basename($PROGRAM_NAME)
8
-
9
- attr_reader :stage
10
-
11
- def self.print_usage
12
- puts "#{CMD} - monitor Sidekiq from the command line."
13
- puts
14
- puts "Usage: #{CMD} <section>"
15
- puts
16
- puts " <section> (optional) view a specific section of the status output"
17
- puts " Valid sections are: #{Sidekiq::Monitor::Status::VALID_SECTIONS.join(", ")}"
18
- puts
19
- puts "Set REDIS_URL to the location of your Redis server if not monitoring localhost."
20
- end
21
-
22
7
  class Status
23
8
  VALID_SECTIONS = %w[all version overview processes queues]
24
9
  COL_PAD = 2
@@ -77,7 +62,7 @@ class Sidekiq::Monitor
77
62
  columns = {
78
63
  name: [:ljust, (["name"] + queue_data.map(&:name)).map(&:length).max + COL_PAD],
79
64
  size: [:rjust, (["size"] + queue_data.map(&:size)).map(&:length).max + COL_PAD],
80
- latency: [:rjust, (["latency"] + queue_data.map(&:latency)).map(&:length).max + COL_PAD],
65
+ latency: [:rjust, (["latency"] + queue_data.map(&:latency)).map(&:length).max + COL_PAD]
81
66
  }
82
67
  columns.each { |col, (dir, width)| print col.to_s.upcase.public_send(dir, width) }
83
68
  puts
@@ -116,7 +101,7 @@ class Sidekiq::Monitor
116
101
  tags = [
117
102
  process["tag"],
118
103
  process["labels"],
119
- (process["quiet"] == "true" ? "quiet" : nil),
104
+ (process["quiet"] == "true" ? "quiet" : nil)
120
105
  ].flatten.compact
121
106
  tags.any? ? "[#{tags.join("] [")}]" : nil
122
107
  end