sidekiq 5.1.3 → 5.2.8

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (46) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +61 -0
  3. data/.gitignore +2 -0
  4. data/.travis.yml +2 -5
  5. data/COMM-LICENSE +11 -9
  6. data/Changes.md +67 -0
  7. data/Ent-Changes.md +22 -0
  8. data/Gemfile +20 -5
  9. data/Pro-Changes.md +30 -0
  10. data/README.md +1 -1
  11. data/Rakefile +2 -1
  12. data/bin/sidekiqctl +13 -92
  13. data/bin/sidekiqload +1 -1
  14. data/lib/sidekiq/api.rb +47 -14
  15. data/lib/sidekiq/cli.rb +64 -58
  16. data/lib/sidekiq/client.rb +4 -3
  17. data/lib/sidekiq/ctl.rb +221 -0
  18. data/lib/sidekiq/job_logger.rb +2 -2
  19. data/lib/sidekiq/job_retry.rb +33 -12
  20. data/lib/sidekiq/launcher.rb +14 -7
  21. data/lib/sidekiq/manager.rb +3 -3
  22. data/lib/sidekiq/middleware/server/active_record.rb +1 -1
  23. data/lib/sidekiq/processor.rb +76 -25
  24. data/lib/sidekiq/rails.rb +2 -1
  25. data/lib/sidekiq/redis_connection.rb +20 -1
  26. data/lib/sidekiq/scheduled.rb +32 -3
  27. data/lib/sidekiq/testing.rb +4 -4
  28. data/lib/sidekiq/version.rb +1 -1
  29. data/lib/sidekiq/web/application.rb +22 -0
  30. data/lib/sidekiq/web/helpers.rb +13 -6
  31. data/lib/sidekiq/worker.rb +24 -8
  32. data/lib/sidekiq.rb +5 -3
  33. data/sidekiq.gemspec +5 -12
  34. data/web/assets/javascripts/application.js +0 -0
  35. data/web/assets/javascripts/dashboard.js +15 -5
  36. data/web/assets/stylesheets/application.css +35 -2
  37. data/web/assets/stylesheets/bootstrap.css +2 -2
  38. data/web/locales/ar.yml +1 -0
  39. data/web/locales/en.yml +1 -0
  40. data/web/locales/es.yml +3 -3
  41. data/web/views/_nav.erb +3 -17
  42. data/web/views/layout.erb +1 -1
  43. data/web/views/queue.erb +1 -0
  44. data/web/views/queues.erb +1 -1
  45. data/web/views/retries.erb +4 -0
  46. metadata +12 -79
data/lib/sidekiq/cli.rb CHANGED
@@ -9,6 +9,7 @@ require 'fileutils'
9
9
 
10
10
  require 'sidekiq'
11
11
  require 'sidekiq/util'
12
+ require 'sidekiq/launcher'
12
13
 
13
14
  module Sidekiq
14
15
  class CLI
@@ -23,23 +24,13 @@ module Sidekiq
23
24
  proc { |me, data| "stopping" if me.stopping? },
24
25
  ]
25
26
 
26
- # Used for CLI testing
27
- attr_accessor :code
28
27
  attr_accessor :launcher
29
28
  attr_accessor :environment
30
29
 
31
- def initialize
32
- @code = nil
33
- end
34
-
35
- def parse(args=ARGV)
36
- @code = nil
37
-
30
+ def parse(args = ARGV)
38
31
  setup_options(args)
39
32
  initialize_logger
40
33
  validate!
41
- daemonize
42
- write_pid
43
34
  end
44
35
 
45
36
  def jruby?
@@ -50,8 +41,10 @@ module Sidekiq
50
41
  # global process state irreversibly. PRs which improve the
51
42
  # test coverage of Sidekiq::CLI are welcomed.
52
43
  def run
44
+ daemonize if options[:daemon]
45
+ write_pid
53
46
  boot_system
54
- print_banner
47
+ print_banner if environment == 'development' && $stdout.tty?
55
48
 
56
49
  self_read, self_write = IO.pipe
57
50
  sigs = %w(INT TERM TTIN TSTP)
@@ -79,6 +72,13 @@ module Sidekiq
79
72
  # fire startup and start multithreading.
80
73
  ver = Sidekiq.redis_info['redis_version']
81
74
  raise "You are using Redis v#{ver}, Sidekiq requires Redis v2.8.0 or greater" if ver < '2.8'
75
+ logger.warn "Sidekiq 6.0 will require Redis 4.0+, you are using Redis v#{ver}" if ver < '4'
76
+
77
+ # Since the user can pass us a connection pool explicitly in the initializer, we
78
+ # need to verify the size is large enough or else Sidekiq's performance is dramatically slowed.
79
+ cursize = Sidekiq.redis_pool.size
80
+ needed = Sidekiq.options[:concurrency] + 2
81
+ raise "Your pool of #{cursize} Redis connections is too small, please increase the size to at least #{needed}" if cursize < needed
82
82
 
83
83
  # cache process identity
84
84
  Sidekiq.options[:identity] = identity
@@ -93,11 +93,14 @@ module Sidekiq
93
93
  logger.debug { "Client Middleware: #{Sidekiq.client_middleware.map(&:klass).join(', ')}" }
94
94
  logger.debug { "Server Middleware: #{Sidekiq.server_middleware.map(&:klass).join(', ')}" }
95
95
 
96
+ launch(self_read)
97
+ end
98
+
99
+ def launch(self_read)
96
100
  if !options[:daemon]
97
101
  logger.info 'Starting processing, hit Ctrl-C to stop'
98
102
  end
99
103
 
100
- require 'sidekiq/launcher'
101
104
  @launcher = Sidekiq::Launcher.new(options)
102
105
 
103
106
  begin
@@ -179,23 +182,15 @@ module Sidekiq
179
182
  private
180
183
 
181
184
  def print_banner
182
- # Print logo and banner for development
183
- if environment == 'development' && $stdout.tty?
184
- puts "\e[#{31}m"
185
- puts Sidekiq::CLI.banner
186
- puts "\e[0m"
187
- end
185
+ puts "\e[#{31}m"
186
+ puts Sidekiq::CLI.banner
187
+ puts "\e[0m"
188
188
  end
189
189
 
190
190
  def daemonize
191
- return unless options[:daemon]
192
-
193
191
  raise ArgumentError, "You really should set a logfile if you're going to daemonize" unless options[:logfile]
194
- files_to_reopen = []
195
- ObjectSpace.each_object(File) do |file|
196
- files_to_reopen << file unless file.closed?
197
- end
198
192
 
193
+ files_to_reopen = ObjectSpace.each_object(File).reject { |f| f.closed? }
199
194
  ::Process.daemon(true, true)
200
195
 
201
196
  files_to_reopen.each do |file|
@@ -233,15 +228,38 @@ module Sidekiq
233
228
  alias_method :☠, :exit
234
229
 
235
230
  def setup_options(args)
231
+ # parse CLI options
236
232
  opts = parse_options(args)
233
+
237
234
  set_environment opts[:environment]
238
235
 
239
- cfile = opts[:config_file]
240
- opts = parse_config(cfile).merge(opts) if cfile
236
+ # check config file presence
237
+ if opts[:config_file]
238
+ if opts[:config_file] && !File.exist?(opts[:config_file])
239
+ raise ArgumentError, "No such file #{opts[:config_file]}"
240
+ end
241
+ else
242
+ config_dir = if File.directory?(opts[:require].to_s)
243
+ File.join(opts[:require], 'config')
244
+ else
245
+ File.join(options[:require], 'config')
246
+ end
241
247
 
248
+ %w[sidekiq.yml sidekiq.yml.erb].each do |config_file|
249
+ path = File.join(config_dir, config_file)
250
+ opts[:config_file] ||= path if File.exist?(path)
251
+ end
252
+ end
253
+
254
+ # parse config file options
255
+ opts = parse_config(opts[:config_file]).merge(opts) if opts[:config_file]
256
+
257
+ # set defaults
258
+ opts[:queues] = Array(opts[:queues]) << 'default' if opts[:queues].nil? || opts[:queues].empty?
242
259
  opts[:strict] = true if opts[:strict].nil?
243
- opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if !opts[:concurrency] && ENV["RAILS_MAX_THREADS"]
260
+ opts[:concurrency] = Integer(ENV["RAILS_MAX_THREADS"]) if opts[:concurrency].nil? && ENV["RAILS_MAX_THREADS"]
244
261
 
262
+ # merge with defaults
245
263
  options.merge!(opts)
246
264
  end
247
265
 
@@ -252,8 +270,6 @@ module Sidekiq
252
270
  def boot_system
253
271
  ENV['RACK_ENV'] = ENV['RAILS_ENV'] = environment
254
272
 
255
- raise ArgumentError, "#{options[:require]} does not exist" unless File.exist?(options[:require])
256
-
257
273
  if File.directory?(options[:require])
258
274
  require 'rails'
259
275
  if ::Rails::VERSION::MAJOR < 4
@@ -273,10 +289,7 @@ module Sidekiq
273
289
  end
274
290
  options[:tag] ||= default_tag
275
291
  else
276
- not_required_message = "#{options[:require]} was not required, you should use an explicit path: " +
277
- "./#{options[:require]} or /path/to/#{options[:require]}"
278
-
279
- require(options[:require]) || raise(ArgumentError, not_required_message)
292
+ require options[:require]
280
293
  end
281
294
  end
282
295
 
@@ -292,8 +305,6 @@ module Sidekiq
292
305
  end
293
306
 
294
307
  def validate!
295
- options[:queues] << 'default' if options[:queues].empty?
296
-
297
308
  if !File.exist?(options[:require]) ||
298
309
  (File.directory?(options[:require]) && !File.exist?("#{options[:require]}/config/application.rb"))
299
310
  logger.info "=================================================================="
@@ -319,6 +330,7 @@ module Sidekiq
319
330
 
320
331
  o.on '-d', '--daemon', "Daemonize process" do |arg|
321
332
  opts[:daemon] = arg
333
+ puts "WARNING: Daemonization mode will be removed in Sidekiq 6.0, see #4045. Please use a proper process supervisor to start and manage your services"
322
334
  end
323
335
 
324
336
  o.on '-e', '--environment ENV', "Application environment" do |arg|
@@ -358,10 +370,12 @@ module Sidekiq
358
370
 
359
371
  o.on '-L', '--logfile PATH', "path to writable logfile" do |arg|
360
372
  opts[:logfile] = arg
373
+ puts "WARNING: Logfile redirection will be removed in Sidekiq 6.0, see #4045. Sidekiq will only log to STDOUT"
361
374
  end
362
375
 
363
376
  o.on '-P', '--pidfile PATH', "path to pidfile" do |arg|
364
377
  opts[:pidfile] = arg
378
+ puts "WARNING: PID file creation will be removed in Sidekiq 6.0, see #4045. Please use a proper process supervisor to start and manage your services"
365
379
  end
366
380
 
367
381
  o.on '-V', '--version', "Print version and exit" do |arg|
@@ -375,11 +389,8 @@ module Sidekiq
375
389
  logger.info @parser
376
390
  die 1
377
391
  end
378
- @parser.parse!(argv)
379
392
 
380
- %w[config/sidekiq.yml config/sidekiq.yml.erb].each do |filename|
381
- opts[:config_file] ||= filename if File.exist?(filename)
382
- end
393
+ @parser.parse!(argv)
383
394
 
384
395
  opts
385
396
  end
@@ -399,23 +410,18 @@ module Sidekiq
399
410
  end
400
411
  end
401
412
 
402
- def parse_config(cfile)
403
- opts = {}
404
- if File.exist?(cfile)
405
- opts = YAML.load(ERB.new(IO.read(cfile)).result) || opts
406
-
407
- if opts.respond_to? :deep_symbolize_keys!
408
- opts.deep_symbolize_keys!
409
- else
410
- symbolize_keys_deep!(opts)
411
- end
413
+ def parse_config(path)
414
+ opts = YAML.load(ERB.new(File.read(path)).result) || {}
412
415
 
413
- opts = opts.merge(opts.delete(environment.to_sym) || {})
414
- parse_queues(opts, opts.delete(:queues) || [])
416
+ if opts.respond_to? :deep_symbolize_keys!
417
+ opts.deep_symbolize_keys!
415
418
  else
416
- # allow a non-existent config file so Sidekiq
417
- # can be deployed by cap with just the defaults.
419
+ symbolize_keys_deep!(opts)
418
420
  end
421
+
422
+ opts = opts.merge(opts.delete(environment.to_sym) || {})
423
+ parse_queues(opts, opts.delete(:queues) || [])
424
+
419
425
  ns = opts.delete(:namespace)
420
426
  if ns
421
427
  # logger hasn't been initialized yet, puts is all we have.
@@ -429,10 +435,10 @@ module Sidekiq
429
435
  queues_and_weights.each { |queue_and_weight| parse_queue(opts, *queue_and_weight) }
430
436
  end
431
437
 
432
- def parse_queue(opts, q, weight=nil)
433
- [weight.to_i, 1].max.times do
434
- (opts[:queues] ||= []) << q
435
- end
438
+ def parse_queue(opts, queue, weight = nil)
439
+ opts[:queues] ||= []
440
+ raise ArgumentError, "queues: #{queue} cannot be defined twice" if opts[:queues].include?(queue)
441
+ [weight.to_i, 1].max.times { opts[:queues] << queue }
436
442
  opts[:strict] = false if weight.to_i > 0
437
443
  end
438
444
  end
@@ -77,9 +77,10 @@ module Sidekiq
77
77
  end
78
78
 
79
79
  ##
80
- # Push a large number of jobs to Redis. In practice this method is only
81
- # useful if you are pushing thousands of jobs or more. This method
82
- # cuts out the redis network round trip latency.
80
+ # Push a large number of jobs to Redis. This method cuts out the redis
81
+ # network round trip latency. I wouldn't recommend pushing more than
82
+ # 1000 per call but YMMV based on network quality, size of job args, etc.
83
+ # A large number of jobs can cause a bit of Redis command processing latency.
83
84
  #
84
85
  # Takes the same arguments as #push except that args is expected to be
85
86
  # an Array of Arrays. All other keys are duplicated for each job. Each job
@@ -0,0 +1,221 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'fileutils'
4
+ require 'sidekiq/api'
5
+
6
+ class Sidekiq::Ctl
7
+ DEFAULT_KILL_TIMEOUT = 10
8
+ CMD = File.basename($0)
9
+
10
+ attr_reader :stage, :pidfile, :kill_timeout
11
+
12
+ def self.print_usage
13
+ puts "#{CMD} - control Sidekiq from the command line."
14
+ puts
15
+ puts "Usage: #{CMD} quiet <pidfile> <kill_timeout>"
16
+ puts " #{CMD} stop <pidfile> <kill_timeout>"
17
+ puts " #{CMD} status <section>"
18
+ puts
19
+ puts " <pidfile> is path to a pidfile"
20
+ puts " <kill_timeout> is number of seconds to wait until Sidekiq exits"
21
+ puts " (default: #{Sidekiq::Ctl::DEFAULT_KILL_TIMEOUT}), after which Sidekiq will be KILL'd"
22
+ puts
23
+ puts " <section> (optional) view a specific section of the status output"
24
+ puts " Valid sections are: #{Sidekiq::Ctl::Status::VALID_SECTIONS.join(', ')}"
25
+ puts
26
+ puts "Be sure to set the kill_timeout LONGER than Sidekiq's -t timeout. If you want"
27
+ puts "to wait 60 seconds for jobs to finish, use `sidekiq -t 60` and `sidekiqctl stop"
28
+ puts " path_to_pidfile 61`"
29
+ puts
30
+ end
31
+
32
+ def initialize(stage, pidfile, timeout)
33
+ @stage = stage
34
+ @pidfile = pidfile
35
+ @kill_timeout = timeout
36
+
37
+ done('No pidfile given', :error) if !pidfile
38
+ done("Pidfile #{pidfile} does not exist", :warn) if !File.exist?(pidfile)
39
+ done('Invalid pidfile content', :error) if pid == 0
40
+
41
+ fetch_process
42
+
43
+ begin
44
+ send(stage)
45
+ rescue NoMethodError
46
+ done "Invalid command: #{stage}", :error
47
+ end
48
+ end
49
+
50
+ def fetch_process
51
+ Process.kill(0, pid)
52
+ rescue Errno::ESRCH
53
+ done "Process doesn't exist", :error
54
+ # We were not allowed to send a signal, but the process must have existed
55
+ # when Process.kill() was called.
56
+ rescue Errno::EPERM
57
+ return pid
58
+ end
59
+
60
+ def done(msg, error = nil)
61
+ puts msg
62
+ exit(exit_signal(error))
63
+ end
64
+
65
+ def exit_signal(error)
66
+ (error == :error) ? 1 : 0
67
+ end
68
+
69
+ def pid
70
+ @pid ||= File.read(pidfile).to_i
71
+ end
72
+
73
+ def quiet
74
+ `kill -TSTP #{pid}`
75
+ end
76
+
77
+ def stop
78
+ `kill -TERM #{pid}`
79
+ kill_timeout.times do
80
+ begin
81
+ Process.kill(0, pid)
82
+ rescue Errno::ESRCH
83
+ FileUtils.rm_f pidfile
84
+ done 'Sidekiq shut down gracefully.'
85
+ rescue Errno::EPERM
86
+ done 'Not permitted to shut down Sidekiq.'
87
+ end
88
+ sleep 1
89
+ end
90
+ `kill -9 #{pid}`
91
+ FileUtils.rm_f pidfile
92
+ done 'Sidekiq shut down forcefully.'
93
+ end
94
+ alias_method :shutdown, :stop
95
+
96
+ class Status
97
+ VALID_SECTIONS = %w[all version overview processes queues]
98
+ def display(section = nil)
99
+ section ||= 'all'
100
+ unless VALID_SECTIONS.include? section
101
+ puts "I don't know how to check the status of '#{section}'!"
102
+ puts "Try one of these: #{VALID_SECTIONS.join(', ')}"
103
+ return
104
+ end
105
+ send(section)
106
+ rescue StandardError => e
107
+ puts "Couldn't get status: #{e}"
108
+ end
109
+
110
+ def all
111
+ version
112
+ puts
113
+ overview
114
+ puts
115
+ processes
116
+ puts
117
+ queues
118
+ end
119
+
120
+ def version
121
+ puts "Sidekiq #{Sidekiq::VERSION}"
122
+ puts Time.now
123
+ end
124
+
125
+ def overview
126
+ puts '---- Overview ----'
127
+ puts " Processed: #{delimit stats.processed}"
128
+ puts " Failed: #{delimit stats.failed}"
129
+ puts " Busy: #{delimit stats.workers_size}"
130
+ puts " Enqueued: #{delimit stats.enqueued}"
131
+ puts " Retries: #{delimit stats.retry_size}"
132
+ puts " Scheduled: #{delimit stats.scheduled_size}"
133
+ puts " Dead: #{delimit stats.dead_size}"
134
+ end
135
+
136
+ def processes
137
+ puts "---- Processes (#{process_set.size}) ----"
138
+ process_set.each_with_index do |process, index|
139
+ puts "#{process['identity']} #{tags_for(process)}"
140
+ puts " Started: #{Time.at(process['started_at'])} (#{time_ago(process['started_at'])})"
141
+ puts " Threads: #{process['concurrency']} (#{process['busy']} busy)"
142
+ puts " Queues: #{split_multiline(process['queues'].sort, pad: 11)}"
143
+ puts '' unless (index+1) == process_set.size
144
+ end
145
+ end
146
+
147
+ COL_PAD = 2
148
+ def queues
149
+ puts "---- Queues (#{queue_data.size}) ----"
150
+ columns = {
151
+ name: [:ljust, (['name'] + queue_data.map(&:name)).map(&:length).max + COL_PAD],
152
+ size: [:rjust, (['size'] + queue_data.map(&:size)).map(&:length).max + COL_PAD],
153
+ latency: [:rjust, (['latency'] + queue_data.map(&:latency)).map(&:length).max + COL_PAD]
154
+ }
155
+ columns.each { |col, (dir, width)| print col.to_s.upcase.public_send(dir, width) }
156
+ puts
157
+ queue_data.each do |q|
158
+ columns.each do |col, (dir, width)|
159
+ print q.send(col).public_send(dir, width)
160
+ end
161
+ puts
162
+ end
163
+ end
164
+
165
+ private
166
+
167
+ def delimit(number)
168
+ number.to_s.reverse.scan(/.{1,3}/).join(',').reverse
169
+ end
170
+
171
+ def split_multiline(values, opts = {})
172
+ return 'none' unless values
173
+ pad = opts[:pad] || 0
174
+ max_length = opts[:max_length] || (80 - pad)
175
+ out = []
176
+ line = ''
177
+ values.each do |value|
178
+ if (line.length + value.length) > max_length
179
+ out << line
180
+ line = ' ' * pad
181
+ end
182
+ line << value + ', '
183
+ end
184
+ out << line[0..-3]
185
+ out.join("\n")
186
+ end
187
+
188
+ def tags_for(process)
189
+ tags = [
190
+ process['tag'],
191
+ process['labels'],
192
+ (process['quiet'] == 'true' ? 'quiet' : nil)
193
+ ].flatten.compact
194
+ tags.any? ? "[#{tags.join('] [')}]" : nil
195
+ end
196
+
197
+ def time_ago(timestamp)
198
+ seconds = Time.now - Time.at(timestamp)
199
+ return 'just now' if seconds < 60
200
+ return 'a minute ago' if seconds < 120
201
+ return "#{seconds.floor / 60} minutes ago" if seconds < 3600
202
+ return 'an hour ago' if seconds < 7200
203
+ "#{seconds.floor / 60 / 60} hours ago"
204
+ end
205
+
206
+ QUEUE_STRUCT = Struct.new(:name, :size, :latency)
207
+ def queue_data
208
+ @queue_data ||= Sidekiq::Queue.all.map do |q|
209
+ QUEUE_STRUCT.new(q.name, q.size.to_s, sprintf('%#.2f', q.latency))
210
+ end
211
+ end
212
+
213
+ def process_set
214
+ @process_set ||= Sidekiq::ProcessSet.new
215
+ end
216
+
217
+ def stats
218
+ @stats ||= Sidekiq::Stats.new
219
+ end
220
+ end
221
+ end
@@ -3,7 +3,7 @@ module Sidekiq
3
3
  class JobLogger
4
4
 
5
5
  def call(item, queue)
6
- start = Time.now
6
+ start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
7
  logger.info("start")
8
8
  yield
9
9
  logger.info("done: #{elapsed(start)} sec")
@@ -15,7 +15,7 @@ module Sidekiq
15
15
  private
16
16
 
17
17
  def elapsed(start)
18
- (Time.now - start).round(3)
18
+ (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
19
19
  end
20
20
 
21
21
  def logger
@@ -56,7 +56,8 @@ module Sidekiq
56
56
  # end
57
57
  #
58
58
  class JobRetry
59
- class Skip < ::RuntimeError; end
59
+ class Handled < ::RuntimeError; end
60
+ class Skip < Handled; end
60
61
 
61
62
  include Sidekiq::Util
62
63
 
@@ -71,7 +72,7 @@ module Sidekiq
71
72
  # require the worker to be instantiated.
72
73
  def global(msg, queue)
73
74
  yield
74
- rescue Skip => ex
75
+ rescue Handled => ex
75
76
  raise ex
76
77
  rescue Sidekiq::Shutdown => ey
77
78
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -80,9 +81,19 @@ module Sidekiq
80
81
  # ignore, will be pushed back onto queue during hard_shutdown
81
82
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
82
83
 
83
- raise e unless msg['retry']
84
- attempt_retry(nil, msg, queue, e)
85
- raise e
84
+ if msg['retry']
85
+ attempt_retry(nil, msg, queue, e)
86
+ else
87
+ Sidekiq.death_handlers.each do |handler|
88
+ begin
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
+ end
93
+ end
94
+ end
95
+
96
+ raise Handled
86
97
  end
87
98
 
88
99
 
@@ -96,7 +107,7 @@ module Sidekiq
96
107
  # calling the handle_exception handlers.
97
108
  def local(worker, msg, queue)
98
109
  yield
99
- rescue Skip => ex
110
+ rescue Handled => ex
100
111
  raise ex
101
112
  rescue Sidekiq::Shutdown => ey
102
113
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -130,9 +141,7 @@ module Sidekiq
130
141
  queue
131
142
  end
132
143
 
133
- # App code can stuff all sorts of crazy binary data into the error message
134
- # that won't convert to JSON.
135
- m = exception.message.to_s[0, 10_000]
144
+ m = exception_message(exception)
136
145
  if m.respond_to?(:scrub!)
137
146
  m.force_encoding("utf-8")
138
147
  m.scrub!
@@ -158,7 +167,8 @@ module Sidekiq
158
167
 
159
168
  if count < max_retry_attempts
160
169
  delay = delay_for(worker, count, exception)
161
- logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
170
+ # Logging here can break retries if the logging device raises ENOSPC #3979
171
+ #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
162
172
  retry_at = Time.now.to_f + delay
163
173
  payload = Sidekiq.dump_json(msg)
164
174
  Sidekiq.redis do |conn|
@@ -171,7 +181,6 @@ module Sidekiq
171
181
  end
172
182
 
173
183
  def retries_exhausted(worker, msg, exception)
174
- logger.debug { "Retries exhausted for job" }
175
184
  begin
176
185
  block = worker && worker.sidekiq_retries_exhausted_block
177
186
  block.call(msg, exception) if block
@@ -191,7 +200,7 @@ module Sidekiq
191
200
  end
192
201
 
193
202
  def send_to_morgue(msg)
194
- Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
203
+ logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
195
204
  payload = Sidekiq.dump_json(msg)
196
205
  DeadSet.new.kill(payload, notify_failure: false)
197
206
  end
@@ -237,5 +246,17 @@ module Sidekiq
237
246
  exception_caused_by_shutdown?(e.cause, checked_causes)
238
247
  end
239
248
 
249
+ # Extract message from exception.
250
+ # Set a default if the message raises an error
251
+ def exception_message(exception)
252
+ begin
253
+ # App code can stuff all sorts of crazy binary data into the error message
254
+ # that won't convert to JSON.
255
+ exception.message.to_s[0, 10_000]
256
+ rescue
257
+ "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
+ end
259
+ end
260
+
240
261
  end
241
262
  end
@@ -13,6 +13,8 @@ module Sidekiq
13
13
 
14
14
  attr_accessor :manager, :poller, :fetcher
15
15
 
16
+ STATS_TTL = 5*365*24*60*60
17
+
16
18
  def initialize(options)
17
19
  @manager = Sidekiq::Manager.new(options)
18
20
  @poller = Sidekiq::Scheduled::Poller.new
@@ -38,7 +40,7 @@ module Sidekiq
38
40
  # return until all work is complete and cleaned up.
39
41
  # It can take up to the timeout to complete.
40
42
  def stop
41
- deadline = Time.now + @options[:timeout]
43
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
42
44
 
43
45
  @done = true
44
46
  @manager.quiet
@@ -72,8 +74,9 @@ module Sidekiq
72
74
  key = identity
73
75
  fails = procd = 0
74
76
  begin
75
- Processor::FAILURE.update {|curr| fails = curr; 0 }
76
- Processor::PROCESSED.update {|curr| procd = curr; 0 }
77
+ fails = Processor::FAILURE.reset
78
+ procd = Processor::PROCESSED.reset
79
+ curstate = Processor::WORKER_STATE.dup
77
80
 
78
81
  workers_key = "#{key}:workers"
79
82
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
@@ -81,10 +84,14 @@ module Sidekiq
81
84
  conn.multi do
82
85
  conn.incrby("stat:processed", procd)
83
86
  conn.incrby("stat:processed:#{nowdate}", procd)
87
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
88
+
84
89
  conn.incrby("stat:failed", fails)
85
90
  conn.incrby("stat:failed:#{nowdate}", fails)
91
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
92
+
86
93
  conn.del(workers_key)
87
- Processor::WORKER_STATE.each_pair do |tid, hash|
94
+ curstate.each_pair do |tid, hash|
88
95
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
89
96
  end
90
97
  conn.expire(workers_key, 60)
@@ -96,7 +103,7 @@ module Sidekiq
96
103
  conn.multi do
97
104
  conn.sadd('processes', key)
98
105
  conn.exists(key)
99
- conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
106
+ conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
100
107
  conn.expire(key, 60)
101
108
  conn.rpop("#{key}-signals")
102
109
  end
@@ -112,8 +119,8 @@ module Sidekiq
112
119
  # ignore all redis/network issues
113
120
  logger.error("heartbeat: #{e.message}")
114
121
  # don't lose the counts if there was a network issue
115
- Processor::PROCESSED.increment(procd)
116
- Processor::FAILURE.increment(fails)
122
+ Processor::PROCESSED.incr(procd)
123
+ Processor::FAILURE.incr(fails)
117
124
  end
118
125
  end
119
126