sidekiq 5.1.1 → 5.2.10

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (56) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +61 -0
  3. data/.gitignore +2 -0
  4. data/.travis.yml +5 -8
  5. data/COMM-LICENSE +11 -9
  6. data/Changes.md +96 -0
  7. data/Ent-Changes.md +28 -0
  8. data/Gemfile +16 -5
  9. data/LICENSE +1 -1
  10. data/Pro-Changes.md +43 -0
  11. data/README.md +1 -1
  12. data/Rakefile +2 -1
  13. data/bin/sidekiqctl +13 -92
  14. data/bin/sidekiqload +1 -1
  15. data/lib/sidekiq/api.rb +62 -28
  16. data/lib/sidekiq/cli.rb +68 -61
  17. data/lib/sidekiq/client.rb +31 -30
  18. data/lib/sidekiq/ctl.rb +221 -0
  19. data/lib/sidekiq/delay.rb +1 -0
  20. data/lib/sidekiq/fetch.rb +1 -1
  21. data/lib/sidekiq/job_logger.rb +4 -3
  22. data/lib/sidekiq/job_retry.rb +40 -14
  23. data/lib/sidekiq/launcher.rb +19 -13
  24. data/lib/sidekiq/logging.rb +3 -3
  25. data/lib/sidekiq/manager.rb +3 -4
  26. data/lib/sidekiq/middleware/server/active_record.rb +2 -1
  27. data/lib/sidekiq/processor.rb +79 -28
  28. data/lib/sidekiq/rails.rb +4 -8
  29. data/lib/sidekiq/redis_connection.rb +29 -2
  30. data/lib/sidekiq/scheduled.rb +33 -4
  31. data/lib/sidekiq/testing.rb +4 -4
  32. data/lib/sidekiq/util.rb +1 -1
  33. data/lib/sidekiq/version.rb +1 -1
  34. data/lib/sidekiq/web/action.rb +2 -2
  35. data/lib/sidekiq/web/application.rb +28 -3
  36. data/lib/sidekiq/web/helpers.rb +14 -7
  37. data/lib/sidekiq/web/router.rb +10 -10
  38. data/lib/sidekiq/web.rb +4 -4
  39. data/lib/sidekiq/worker.rb +31 -15
  40. data/lib/sidekiq.rb +8 -7
  41. data/sidekiq.gemspec +5 -12
  42. data/web/assets/javascripts/application.js +0 -0
  43. data/web/assets/javascripts/dashboard.js +15 -5
  44. data/web/assets/stylesheets/application.css +35 -2
  45. data/web/assets/stylesheets/bootstrap.css +2 -2
  46. data/web/locales/ar.yml +1 -0
  47. data/web/locales/en.yml +1 -0
  48. data/web/locales/es.yml +3 -3
  49. data/web/views/_footer.erb +3 -0
  50. data/web/views/_nav.erb +3 -17
  51. data/web/views/layout.erb +1 -1
  52. data/web/views/queue.erb +1 -0
  53. data/web/views/queues.erb +1 -1
  54. data/web/views/retries.erb +4 -0
  55. metadata +19 -87
  56. data/lib/sidekiq/middleware/server/active_record_cache.rb +0 -11
@@ -0,0 +1,221 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'fileutils'
4
+ require 'sidekiq/api'
5
+
6
+ class Sidekiq::Ctl
7
+ DEFAULT_KILL_TIMEOUT = 10
8
+ CMD = File.basename($0)
9
+
10
+ attr_reader :stage, :pidfile, :kill_timeout
11
+
12
+ def self.print_usage
13
+ puts "#{CMD} - control Sidekiq from the command line."
14
+ puts
15
+ puts "Usage: #{CMD} quiet <pidfile> <kill_timeout>"
16
+ puts " #{CMD} stop <pidfile> <kill_timeout>"
17
+ puts " #{CMD} status <section>"
18
+ puts
19
+ puts " <pidfile> is path to a pidfile"
20
+ puts " <kill_timeout> is number of seconds to wait until Sidekiq exits"
21
+ puts " (default: #{Sidekiq::Ctl::DEFAULT_KILL_TIMEOUT}), after which Sidekiq will be KILL'd"
22
+ puts
23
+ puts " <section> (optional) view a specific section of the status output"
24
+ puts " Valid sections are: #{Sidekiq::Ctl::Status::VALID_SECTIONS.join(', ')}"
25
+ puts
26
+ puts "Be sure to set the kill_timeout LONGER than Sidekiq's -t timeout. If you want"
27
+ puts "to wait 60 seconds for jobs to finish, use `sidekiq -t 60` and `sidekiqctl stop"
28
+ puts " path_to_pidfile 61`"
29
+ puts
30
+ end
31
+
32
+ def initialize(stage, pidfile, timeout)
33
+ @stage = stage
34
+ @pidfile = pidfile
35
+ @kill_timeout = timeout
36
+
37
+ done('No pidfile given', :error) if !pidfile
38
+ done("Pidfile #{pidfile} does not exist", :warn) if !File.exist?(pidfile)
39
+ done('Invalid pidfile content', :error) if pid == 0
40
+
41
+ fetch_process
42
+
43
+ begin
44
+ send(stage)
45
+ rescue NoMethodError
46
+ done "Invalid command: #{stage}", :error
47
+ end
48
+ end
49
+
50
+ def fetch_process
51
+ Process.kill(0, pid)
52
+ rescue Errno::ESRCH
53
+ done "Process doesn't exist", :error
54
+ # We were not allowed to send a signal, but the process must have existed
55
+ # when Process.kill() was called.
56
+ rescue Errno::EPERM
57
+ return pid
58
+ end
59
+
60
+ def done(msg, error = nil)
61
+ puts msg
62
+ exit(exit_signal(error))
63
+ end
64
+
65
+ def exit_signal(error)
66
+ (error == :error) ? 1 : 0
67
+ end
68
+
69
+ def pid
70
+ @pid ||= File.read(pidfile).to_i
71
+ end
72
+
73
+ def quiet
74
+ `kill -TSTP #{pid}`
75
+ end
76
+
77
+ def stop
78
+ `kill -TERM #{pid}`
79
+ kill_timeout.times do
80
+ begin
81
+ Process.kill(0, pid)
82
+ rescue Errno::ESRCH
83
+ FileUtils.rm_f pidfile
84
+ done 'Sidekiq shut down gracefully.'
85
+ rescue Errno::EPERM
86
+ done 'Not permitted to shut down Sidekiq.'
87
+ end
88
+ sleep 1
89
+ end
90
+ `kill -9 #{pid}`
91
+ FileUtils.rm_f pidfile
92
+ done 'Sidekiq shut down forcefully.'
93
+ end
94
+ alias_method :shutdown, :stop
95
+
96
+ class Status
97
+ VALID_SECTIONS = %w[all version overview processes queues]
98
+ def display(section = nil)
99
+ section ||= 'all'
100
+ unless VALID_SECTIONS.include? section
101
+ puts "I don't know how to check the status of '#{section}'!"
102
+ puts "Try one of these: #{VALID_SECTIONS.join(', ')}"
103
+ return
104
+ end
105
+ send(section)
106
+ rescue StandardError => e
107
+ puts "Couldn't get status: #{e}"
108
+ end
109
+
110
+ def all
111
+ version
112
+ puts
113
+ overview
114
+ puts
115
+ processes
116
+ puts
117
+ queues
118
+ end
119
+
120
+ def version
121
+ puts "Sidekiq #{Sidekiq::VERSION}"
122
+ puts Time.now
123
+ end
124
+
125
+ def overview
126
+ puts '---- Overview ----'
127
+ puts " Processed: #{delimit stats.processed}"
128
+ puts " Failed: #{delimit stats.failed}"
129
+ puts " Busy: #{delimit stats.workers_size}"
130
+ puts " Enqueued: #{delimit stats.enqueued}"
131
+ puts " Retries: #{delimit stats.retry_size}"
132
+ puts " Scheduled: #{delimit stats.scheduled_size}"
133
+ puts " Dead: #{delimit stats.dead_size}"
134
+ end
135
+
136
+ def processes
137
+ puts "---- Processes (#{process_set.size}) ----"
138
+ process_set.each_with_index do |process, index|
139
+ puts "#{process['identity']} #{tags_for(process)}"
140
+ puts " Started: #{Time.at(process['started_at'])} (#{time_ago(process['started_at'])})"
141
+ puts " Threads: #{process['concurrency']} (#{process['busy']} busy)"
142
+ puts " Queues: #{split_multiline(process['queues'].sort, pad: 11)}"
143
+ puts '' unless (index+1) == process_set.size
144
+ end
145
+ end
146
+
147
+ COL_PAD = 2
148
+ def queues
149
+ puts "---- Queues (#{queue_data.size}) ----"
150
+ columns = {
151
+ name: [:ljust, (['name'] + queue_data.map(&:name)).map(&:length).max + COL_PAD],
152
+ size: [:rjust, (['size'] + queue_data.map(&:size)).map(&:length).max + COL_PAD],
153
+ latency: [:rjust, (['latency'] + queue_data.map(&:latency)).map(&:length).max + COL_PAD]
154
+ }
155
+ columns.each { |col, (dir, width)| print col.to_s.upcase.public_send(dir, width) }
156
+ puts
157
+ queue_data.each do |q|
158
+ columns.each do |col, (dir, width)|
159
+ print q.send(col).public_send(dir, width)
160
+ end
161
+ puts
162
+ end
163
+ end
164
+
165
+ private
166
+
167
+ def delimit(number)
168
+ number.to_s.reverse.scan(/.{1,3}/).join(',').reverse
169
+ end
170
+
171
+ def split_multiline(values, opts = {})
172
+ return 'none' unless values
173
+ pad = opts[:pad] || 0
174
+ max_length = opts[:max_length] || (80 - pad)
175
+ out = []
176
+ line = ''
177
+ values.each do |value|
178
+ if (line.length + value.length) > max_length
179
+ out << line
180
+ line = ' ' * pad
181
+ end
182
+ line << value + ', '
183
+ end
184
+ out << line[0..-3]
185
+ out.join("\n")
186
+ end
187
+
188
+ def tags_for(process)
189
+ tags = [
190
+ process['tag'],
191
+ process['labels'],
192
+ (process['quiet'] == 'true' ? 'quiet' : nil)
193
+ ].flatten.compact
194
+ tags.any? ? "[#{tags.join('] [')}]" : nil
195
+ end
196
+
197
+ def time_ago(timestamp)
198
+ seconds = Time.now - Time.at(timestamp)
199
+ return 'just now' if seconds < 60
200
+ return 'a minute ago' if seconds < 120
201
+ return "#{seconds.floor / 60} minutes ago" if seconds < 3600
202
+ return 'an hour ago' if seconds < 7200
203
+ "#{seconds.floor / 60 / 60} hours ago"
204
+ end
205
+
206
+ QUEUE_STRUCT = Struct.new(:name, :size, :latency)
207
+ def queue_data
208
+ @queue_data ||= Sidekiq::Queue.all.map do |q|
209
+ QUEUE_STRUCT.new(q.name, q.size.to_s, sprintf('%#.2f', q.latency))
210
+ end
211
+ end
212
+
213
+ def process_set
214
+ @process_set ||= Sidekiq::ProcessSet.new
215
+ end
216
+
217
+ def stats
218
+ @stats ||= Sidekiq::Stats.new
219
+ end
220
+ end
221
+ end
data/lib/sidekiq/delay.rb CHANGED
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  module Sidekiq
2
3
  module Extensions
3
4
 
data/lib/sidekiq/fetch.rb CHANGED
@@ -13,7 +13,7 @@ module Sidekiq
13
13
  end
14
14
 
15
15
  def queue_name
16
- queue.sub(/.*queue:/, ''.freeze)
16
+ queue.sub(/.*queue:/, '')
17
17
  end
18
18
 
19
19
  def requeue
@@ -1,9 +1,10 @@
1
+ # frozen_string_literal: true
1
2
  module Sidekiq
2
3
  class JobLogger
3
4
 
4
5
  def call(item, queue)
5
- start = Time.now
6
- logger.info("start".freeze)
6
+ start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
+ logger.info("start")
7
8
  yield
8
9
  logger.info("done: #{elapsed(start)} sec")
9
10
  rescue Exception
@@ -14,7 +15,7 @@ module Sidekiq
14
15
  private
15
16
 
16
17
  def elapsed(start)
17
- (Time.now - start).round(3)
18
+ (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
18
19
  end
19
20
 
20
21
  def logger
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  require 'sidekiq/scheduled'
2
3
  require 'sidekiq/api'
3
4
 
@@ -55,7 +56,8 @@ module Sidekiq
55
56
  # end
56
57
  #
57
58
  class JobRetry
58
- class Skip < ::RuntimeError; end
59
+ class Handled < ::RuntimeError; end
60
+ class Skip < Handled; end
59
61
 
60
62
  include Sidekiq::Util
61
63
 
@@ -70,7 +72,7 @@ module Sidekiq
70
72
  # require the worker to be instantiated.
71
73
  def global(msg, queue)
72
74
  yield
73
- rescue Skip => ex
75
+ rescue Handled => ex
74
76
  raise ex
75
77
  rescue Sidekiq::Shutdown => ey
76
78
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -79,9 +81,19 @@ module Sidekiq
79
81
  # ignore, will be pushed back onto queue during hard_shutdown
80
82
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
81
83
 
82
- raise e unless msg['retry']
83
- attempt_retry(nil, msg, queue, e)
84
- raise e
84
+ if msg['retry']
85
+ attempt_retry(nil, msg, queue, e)
86
+ else
87
+ Sidekiq.death_handlers.each do |handler|
88
+ begin
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
+ end
93
+ end
94
+ end
95
+
96
+ raise Handled
85
97
  end
86
98
 
87
99
 
@@ -95,7 +107,7 @@ module Sidekiq
95
107
  # calling the handle_exception handlers.
96
108
  def local(worker, msg, queue)
97
109
  yield
98
- rescue Skip => ex
110
+ rescue Handled => ex
99
111
  raise ex
100
112
  rescue Sidekiq::Shutdown => ey
101
113
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -129,9 +141,7 @@ module Sidekiq
129
141
  queue
130
142
  end
131
143
 
132
- # App code can stuff all sorts of crazy binary data into the error message
133
- # that won't convert to JSON.
134
- m = exception.message.to_s[0, 10_000]
144
+ m = exception_message(exception)
135
145
  if m.respond_to?(:scrub!)
136
146
  m.force_encoding("utf-8")
137
147
  m.scrub!
@@ -157,7 +167,8 @@ module Sidekiq
157
167
 
158
168
  if count < max_retry_attempts
159
169
  delay = delay_for(worker, count, exception)
160
- logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
170
+ # Logging here can break retries if the logging device raises ENOSPC #3979
171
+ #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
161
172
  retry_at = Time.now.to_f + delay
162
173
  payload = Sidekiq.dump_json(msg)
163
174
  Sidekiq.redis do |conn|
@@ -170,7 +181,6 @@ module Sidekiq
170
181
  end
171
182
 
172
183
  def retries_exhausted(worker, msg, exception)
173
- logger.debug { "Retries exhausted for job" }
174
184
  begin
175
185
  block = worker && worker.sidekiq_retries_exhausted_block
176
186
  block.call(msg, exception) if block
@@ -190,7 +200,7 @@ module Sidekiq
190
200
  end
191
201
 
192
202
  def send_to_morgue(msg)
193
- Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
203
+ logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
194
204
  payload = Sidekiq.dump_json(msg)
195
205
  DeadSet.new.kill(payload, notify_failure: false)
196
206
  end
@@ -204,7 +214,11 @@ module Sidekiq
204
214
  end
205
215
 
206
216
  def delay_for(worker, count, exception)
207
- worker && worker.sidekiq_retry_in_block && retry_in(worker, count, exception) || seconds_to_delay(count)
217
+ if worker && worker.sidekiq_retry_in_block
218
+ custom_retry_in = retry_in(worker, count, exception).to_i
219
+ return custom_retry_in if custom_retry_in > 0
220
+ end
221
+ seconds_to_delay(count)
208
222
  end
209
223
 
210
224
  # delayed_job uses the same basic formula
@@ -214,7 +228,7 @@ module Sidekiq
214
228
 
215
229
  def retry_in(worker, count, exception)
216
230
  begin
217
- worker.sidekiq_retry_in_block.call(count, exception).to_i
231
+ worker.sidekiq_retry_in_block.call(count, exception)
218
232
  rescue Exception => e
219
233
  handle_exception(e, { context: "Failure scheduling retry using the defined `sidekiq_retry_in` in #{worker.class.name}, falling back to default" })
220
234
  nil
@@ -232,5 +246,17 @@ module Sidekiq
232
246
  exception_caused_by_shutdown?(e.cause, checked_causes)
233
247
  end
234
248
 
249
+ # Extract message from exception.
250
+ # Set a default if the message raises an error
251
+ def exception_message(exception)
252
+ begin
253
+ # App code can stuff all sorts of crazy binary data into the error message
254
+ # that won't convert to JSON.
255
+ exception.message.to_s[0, 10_000]
256
+ rescue
257
+ "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
+ end
259
+ end
260
+
235
261
  end
236
262
  end
@@ -1,4 +1,3 @@
1
- # encoding: utf-8
2
1
  # frozen_string_literal: true
3
2
  require 'sidekiq/manager'
4
3
  require 'sidekiq/fetch'
@@ -14,6 +13,8 @@ module Sidekiq
14
13
 
15
14
  attr_accessor :manager, :poller, :fetcher
16
15
 
16
+ STATS_TTL = 5*365*24*60*60
17
+
17
18
  def initialize(options)
18
19
  @manager = Sidekiq::Manager.new(options)
19
20
  @poller = Sidekiq::Scheduled::Poller.new
@@ -39,7 +40,7 @@ module Sidekiq
39
40
  # return until all work is complete and cleaned up.
40
41
  # It can take up to the timeout to complete.
41
42
  def stop
42
- deadline = Time.now + @options[:timeout]
43
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
43
44
 
44
45
  @done = true
45
46
  @manager.quiet
@@ -73,19 +74,24 @@ module Sidekiq
73
74
  key = identity
74
75
  fails = procd = 0
75
76
  begin
76
- Processor::FAILURE.update {|curr| fails = curr; 0 }
77
- Processor::PROCESSED.update {|curr| procd = curr; 0 }
77
+ fails = Processor::FAILURE.reset
78
+ procd = Processor::PROCESSED.reset
79
+ curstate = Processor::WORKER_STATE.dup
78
80
 
79
- workers_key = "#{key}:workers".freeze
80
- nowdate = Time.now.utc.strftime("%Y-%m-%d".freeze)
81
+ workers_key = "#{key}:workers"
82
+ nowdate = Time.now.utc.strftime("%Y-%m-%d")
81
83
  Sidekiq.redis do |conn|
82
84
  conn.multi do
83
- conn.incrby("stat:processed".freeze, procd)
85
+ conn.incrby("stat:processed", procd)
84
86
  conn.incrby("stat:processed:#{nowdate}", procd)
85
- conn.incrby("stat:failed".freeze, fails)
87
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
88
+
89
+ conn.incrby("stat:failed", fails)
86
90
  conn.incrby("stat:failed:#{nowdate}", fails)
91
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
92
+
87
93
  conn.del(workers_key)
88
- Processor::WORKER_STATE.each_pair do |tid, hash|
94
+ curstate.each_pair do |tid, hash|
89
95
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
90
96
  end
91
97
  conn.expire(workers_key, 60)
@@ -96,8 +102,8 @@ module Sidekiq
96
102
  _, exists, _, _, msg = Sidekiq.redis do |conn|
97
103
  conn.multi do
98
104
  conn.sadd('processes', key)
99
- conn.exists(key)
100
- conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
105
+ conn.exists?(key)
106
+ conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
101
107
  conn.expire(key, 60)
102
108
  conn.rpop("#{key}-signals")
103
109
  end
@@ -113,8 +119,8 @@ module Sidekiq
113
119
  # ignore all redis/network issues
114
120
  logger.error("heartbeat: #{e.message}")
115
121
  # don't lose the counts if there was a network issue
116
- Processor::PROCESSED.increment(procd)
117
- Processor::FAILURE.increment(fails)
122
+ Processor::PROCESSED.incr(procd)
123
+ Processor::FAILURE.incr(fails)
118
124
  end
119
125
  end
120
126
 
@@ -33,9 +33,9 @@ module Sidekiq
33
33
  def self.job_hash_context(job_hash)
34
34
  # If we're using a wrapper class, like ActiveJob, use the "wrapped"
35
35
  # attribute to expose the underlying thing.
36
- klass = job_hash['wrapped'.freeze] || job_hash["class".freeze]
37
- bid = job_hash['bid'.freeze]
38
- "#{klass} JID-#{job_hash['jid'.freeze]}#{" BID-#{bid}" if bid}"
36
+ klass = job_hash['wrapped'] || job_hash["class"]
37
+ bid = job_hash['bid']
38
+ "#{klass} JID-#{job_hash['jid']}#{" BID-#{bid}" if bid}"
39
39
  end
40
40
 
41
41
  def self.with_job_hash_context(job_hash, &block)
@@ -1,4 +1,3 @@
1
- # encoding: utf-8
2
1
  # frozen_string_literal: true
3
2
  require 'sidekiq/util'
4
3
  require 'sidekiq/processor'
@@ -31,7 +30,7 @@ module Sidekiq
31
30
  def initialize(options={})
32
31
  logger.debug { options.inspect }
33
32
  @options = options
34
- @count = options[:concurrency] || 25
33
+ @count = options[:concurrency] || 10
35
34
  raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
36
35
 
37
36
  @done = false
@@ -71,11 +70,11 @@ module Sidekiq
71
70
  return if @workers.empty?
72
71
 
73
72
  logger.info { "Pausing to allow workers to finish..." }
74
- remaining = deadline - Time.now
73
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
75
74
  while remaining > PAUSE_TIME
76
75
  return if @workers.empty?
77
76
  sleep PAUSE_TIME
78
- remaining = deadline - Time.now
77
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
79
78
  end
80
79
  return if @workers.empty?
81
80
 
@@ -1,3 +1,4 @@
1
+ # frozen_string_literal: true
1
2
  module Sidekiq
2
3
  module Middleware
3
4
  module Server
@@ -6,7 +7,7 @@ module Sidekiq
6
7
  def initialize
7
8
  # With Rails 5+ we must use the Reloader **always**.
8
9
  # The reloader handles code loading and db connection management.
9
- if ::Rails::VERSION::MAJOR >= 5
10
+ if defined?(::Rails) && defined?(::Rails::VERSION) && ::Rails::VERSION::MAJOR >= 5
10
11
  raise ArgumentError, "Rails 5 no longer needs or uses the ActiveRecord middleware."
11
12
  end
12
13
  end
@@ -4,8 +4,6 @@ require 'sidekiq/fetch'
4
4
  require 'sidekiq/job_logger'
5
5
  require 'sidekiq/job_retry'
6
6
  require 'thread'
7
- require 'concurrent/map'
8
- require 'concurrent/atomic/atomic_fixnum'
9
7
 
10
8
  module Sidekiq
11
9
  ##
@@ -89,7 +87,7 @@ module Sidekiq
89
87
  def get_one
90
88
  begin
91
89
  work = @strategy.retrieve_work
92
- (logger.info { "Redis is online, #{Time.now - @down} sec downtime" }; @down = nil) if @down
90
+ (logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }; @down = nil) if @down
93
91
  work
94
92
  rescue Sidekiq::Shutdown
95
93
  rescue => ex
@@ -109,7 +107,7 @@ module Sidekiq
109
107
 
110
108
  def handle_fetch_exception(ex)
111
109
  if !@down
112
- @down = Time.now
110
+ @down = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
113
111
  logger.error("Error fetching job: #{ex}")
114
112
  handle_exception(ex)
115
113
  end
@@ -132,9 +130,9 @@ module Sidekiq
132
130
  # the Reloader. It handles code loading, db connection management, etc.
133
131
  # Effectively this block denotes a "unit of work" to Rails.
134
132
  @reloader.call do
135
- klass = constantize(job_hash['class'.freeze])
133
+ klass = constantize(job_hash['class'])
136
134
  worker = klass.new
137
- worker.jid = job_hash['jid'.freeze]
135
+ worker.jid = job_hash['jid']
138
136
  @retrier.local(worker, pristine, queue) do
139
137
  yield worker
140
138
  end
@@ -149,24 +147,22 @@ module Sidekiq
149
147
  jobstr = work.job
150
148
  queue = work.queue_name
151
149
 
152
- ack = false
150
+ # Treat malformed JSON as a special case: job goes straight to the morgue.
151
+ job_hash = nil
153
152
  begin
154
- # Treat malformed JSON as a special case: job goes straight to the morgue.
155
- job_hash = nil
156
- begin
157
- job_hash = Sidekiq.load_json(jobstr)
158
- rescue => ex
159
- handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
160
- # we can't notify because the job isn't a valid hash payload.
161
- DeadSet.new.kill(jobstr, notify_failure: false)
162
- ack = true
163
- raise
164
- end
153
+ job_hash = Sidekiq.load_json(jobstr)
154
+ rescue => ex
155
+ handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
156
+ # we can't notify because the job isn't a valid hash payload.
157
+ DeadSet.new.kill(jobstr, notify_failure: false)
158
+ return work.acknowledge
159
+ end
165
160
 
166
- ack = true
161
+ ack = true
162
+ begin
167
163
  dispatch(job_hash, queue) do |worker|
168
164
  Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
169
- execute_job(worker, cloned(job_hash['args'.freeze]))
165
+ execute_job(worker, cloned(job_hash['args']))
170
166
  end
171
167
  end
172
168
  rescue Sidekiq::Shutdown
@@ -174,10 +170,19 @@ module Sidekiq
174
170
  # within the timeout. Don't acknowledge the work since
175
171
  # we didn't properly finish it.
176
172
  ack = false
177
- rescue Exception => ex
178
- e = ex.is_a?(::Sidekiq::JobRetry::Skip) && ex.cause ? ex.cause : ex
173
+ rescue Sidekiq::JobRetry::Handled => h
174
+ # this is the common case: job raised error and Sidekiq::JobRetry::Handled
175
+ # signals that we created a retry successfully. We can acknowlege the job.
176
+ e = h.cause ? h.cause : h
179
177
  handle_exception(e, { :context => "Job raised exception", :job => job_hash, :jobstr => jobstr })
180
178
  raise e
179
+ rescue Exception => ex
180
+ # Unexpected error! This is very bad and indicates an exception that got past
181
+ # the retry subsystem (e.g. network partition). We won't acknowledge the job
182
+ # so it can be rescued when using Sidekiq Pro.
183
+ ack = false
184
+ handle_exception(ex, { :context => "Internal exception!", :job => job_hash, :jobstr => jobstr })
185
+ raise e
181
186
  ensure
182
187
  work.acknowledge if ack
183
188
  end
@@ -187,22 +192,68 @@ module Sidekiq
187
192
  worker.perform(*cloned_args)
188
193
  end
189
194
 
190
- WORKER_STATE = Concurrent::Map.new
191
- PROCESSED = Concurrent::AtomicFixnum.new
192
- FAILURE = Concurrent::AtomicFixnum.new
195
+ # Ruby doesn't provide atomic counters out of the box so we'll
196
+ # implement something simple ourselves.
197
+ # https://bugs.ruby-lang.org/issues/14706
198
+ class Counter
199
+ def initialize
200
+ @value = 0
201
+ @lock = Mutex.new
202
+ end
203
+
204
+ def incr(amount=1)
205
+ @lock.synchronize { @value = @value + amount }
206
+ end
207
+
208
+ def reset
209
+ @lock.synchronize { val = @value; @value = 0; val }
210
+ end
211
+ end
212
+
213
+ # jruby's Hash implementation is not threadsafe, so we wrap it in a mutex here
214
+ class SharedWorkerState
215
+ def initialize
216
+ @worker_state = {}
217
+ @lock = Mutex.new
218
+ end
219
+
220
+ def set(tid, hash)
221
+ @lock.synchronize { @worker_state[tid] = hash }
222
+ end
223
+
224
+ def delete(tid)
225
+ @lock.synchronize { @worker_state.delete(tid) }
226
+ end
227
+
228
+ def dup
229
+ @lock.synchronize { @worker_state.dup }
230
+ end
231
+
232
+ def size
233
+ @lock.synchronize { @worker_state.size }
234
+ end
235
+
236
+ def clear
237
+ @lock.synchronize { @worker_state.clear }
238
+ end
239
+ end
240
+
241
+ PROCESSED = Counter.new
242
+ FAILURE = Counter.new
243
+ WORKER_STATE = SharedWorkerState.new
193
244
 
194
245
  def stats(job_hash, queue)
195
246
  tid = Sidekiq::Logging.tid
196
- WORKER_STATE[tid] = {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i }
247
+ WORKER_STATE.set(tid, {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i })
197
248
 
198
249
  begin
199
250
  yield
200
251
  rescue Exception
201
- FAILURE.increment
252
+ FAILURE.incr
202
253
  raise
203
254
  ensure
204
255
  WORKER_STATE.delete(tid)
205
- PROCESSED.increment
256
+ PROCESSED.incr
206
257
  end
207
258
  end
208
259