sidekiq 5.1.3 → 5.2.9

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (46) hide show
  1. checksums.yaml +5 -5
  2. data/.circleci/config.yml +61 -0
  3. data/.gitignore +2 -0
  4. data/.travis.yml +2 -5
  5. data/COMM-LICENSE +11 -9
  6. data/Changes.md +73 -0
  7. data/Ent-Changes.md +22 -0
  8. data/Gemfile +20 -5
  9. data/Pro-Changes.md +30 -0
  10. data/README.md +1 -1
  11. data/Rakefile +2 -1
  12. data/bin/sidekiqctl +13 -92
  13. data/bin/sidekiqload +1 -1
  14. data/lib/sidekiq.rb +5 -3
  15. data/lib/sidekiq/api.rb +47 -14
  16. data/lib/sidekiq/cli.rb +64 -58
  17. data/lib/sidekiq/client.rb +4 -3
  18. data/lib/sidekiq/ctl.rb +221 -0
  19. data/lib/sidekiq/job_logger.rb +2 -2
  20. data/lib/sidekiq/job_retry.rb +33 -12
  21. data/lib/sidekiq/launcher.rb +14 -7
  22. data/lib/sidekiq/manager.rb +3 -3
  23. data/lib/sidekiq/middleware/server/active_record.rb +1 -1
  24. data/lib/sidekiq/processor.rb +76 -25
  25. data/lib/sidekiq/rails.rb +2 -1
  26. data/lib/sidekiq/redis_connection.rb +20 -1
  27. data/lib/sidekiq/scheduled.rb +32 -3
  28. data/lib/sidekiq/testing.rb +4 -4
  29. data/lib/sidekiq/version.rb +1 -1
  30. data/lib/sidekiq/web/application.rb +22 -0
  31. data/lib/sidekiq/web/helpers.rb +13 -6
  32. data/lib/sidekiq/worker.rb +24 -8
  33. data/sidekiq.gemspec +5 -12
  34. data/web/assets/javascripts/application.js +0 -0
  35. data/web/assets/javascripts/dashboard.js +15 -5
  36. data/web/assets/stylesheets/application.css +35 -2
  37. data/web/assets/stylesheets/bootstrap.css +2 -2
  38. data/web/locales/ar.yml +1 -0
  39. data/web/locales/en.yml +1 -0
  40. data/web/locales/es.yml +3 -3
  41. data/web/views/_nav.erb +3 -17
  42. data/web/views/layout.erb +1 -1
  43. data/web/views/queue.erb +1 -0
  44. data/web/views/queues.erb +1 -1
  45. data/web/views/retries.erb +4 -0
  46. metadata +12 -79
@@ -0,0 +1,221 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require 'fileutils'
4
+ require 'sidekiq/api'
5
+
6
+ class Sidekiq::Ctl
7
+ DEFAULT_KILL_TIMEOUT = 10
8
+ CMD = File.basename($0)
9
+
10
+ attr_reader :stage, :pidfile, :kill_timeout
11
+
12
+ def self.print_usage
13
+ puts "#{CMD} - control Sidekiq from the command line."
14
+ puts
15
+ puts "Usage: #{CMD} quiet <pidfile> <kill_timeout>"
16
+ puts " #{CMD} stop <pidfile> <kill_timeout>"
17
+ puts " #{CMD} status <section>"
18
+ puts
19
+ puts " <pidfile> is path to a pidfile"
20
+ puts " <kill_timeout> is number of seconds to wait until Sidekiq exits"
21
+ puts " (default: #{Sidekiq::Ctl::DEFAULT_KILL_TIMEOUT}), after which Sidekiq will be KILL'd"
22
+ puts
23
+ puts " <section> (optional) view a specific section of the status output"
24
+ puts " Valid sections are: #{Sidekiq::Ctl::Status::VALID_SECTIONS.join(', ')}"
25
+ puts
26
+ puts "Be sure to set the kill_timeout LONGER than Sidekiq's -t timeout. If you want"
27
+ puts "to wait 60 seconds for jobs to finish, use `sidekiq -t 60` and `sidekiqctl stop"
28
+ puts " path_to_pidfile 61`"
29
+ puts
30
+ end
31
+
32
+ def initialize(stage, pidfile, timeout)
33
+ @stage = stage
34
+ @pidfile = pidfile
35
+ @kill_timeout = timeout
36
+
37
+ done('No pidfile given', :error) if !pidfile
38
+ done("Pidfile #{pidfile} does not exist", :warn) if !File.exist?(pidfile)
39
+ done('Invalid pidfile content', :error) if pid == 0
40
+
41
+ fetch_process
42
+
43
+ begin
44
+ send(stage)
45
+ rescue NoMethodError
46
+ done "Invalid command: #{stage}", :error
47
+ end
48
+ end
49
+
50
+ def fetch_process
51
+ Process.kill(0, pid)
52
+ rescue Errno::ESRCH
53
+ done "Process doesn't exist", :error
54
+ # We were not allowed to send a signal, but the process must have existed
55
+ # when Process.kill() was called.
56
+ rescue Errno::EPERM
57
+ return pid
58
+ end
59
+
60
+ def done(msg, error = nil)
61
+ puts msg
62
+ exit(exit_signal(error))
63
+ end
64
+
65
+ def exit_signal(error)
66
+ (error == :error) ? 1 : 0
67
+ end
68
+
69
+ def pid
70
+ @pid ||= File.read(pidfile).to_i
71
+ end
72
+
73
+ def quiet
74
+ `kill -TSTP #{pid}`
75
+ end
76
+
77
+ def stop
78
+ `kill -TERM #{pid}`
79
+ kill_timeout.times do
80
+ begin
81
+ Process.kill(0, pid)
82
+ rescue Errno::ESRCH
83
+ FileUtils.rm_f pidfile
84
+ done 'Sidekiq shut down gracefully.'
85
+ rescue Errno::EPERM
86
+ done 'Not permitted to shut down Sidekiq.'
87
+ end
88
+ sleep 1
89
+ end
90
+ `kill -9 #{pid}`
91
+ FileUtils.rm_f pidfile
92
+ done 'Sidekiq shut down forcefully.'
93
+ end
94
+ alias_method :shutdown, :stop
95
+
96
+ class Status
97
+ VALID_SECTIONS = %w[all version overview processes queues]
98
+ def display(section = nil)
99
+ section ||= 'all'
100
+ unless VALID_SECTIONS.include? section
101
+ puts "I don't know how to check the status of '#{section}'!"
102
+ puts "Try one of these: #{VALID_SECTIONS.join(', ')}"
103
+ return
104
+ end
105
+ send(section)
106
+ rescue StandardError => e
107
+ puts "Couldn't get status: #{e}"
108
+ end
109
+
110
+ def all
111
+ version
112
+ puts
113
+ overview
114
+ puts
115
+ processes
116
+ puts
117
+ queues
118
+ end
119
+
120
+ def version
121
+ puts "Sidekiq #{Sidekiq::VERSION}"
122
+ puts Time.now
123
+ end
124
+
125
+ def overview
126
+ puts '---- Overview ----'
127
+ puts " Processed: #{delimit stats.processed}"
128
+ puts " Failed: #{delimit stats.failed}"
129
+ puts " Busy: #{delimit stats.workers_size}"
130
+ puts " Enqueued: #{delimit stats.enqueued}"
131
+ puts " Retries: #{delimit stats.retry_size}"
132
+ puts " Scheduled: #{delimit stats.scheduled_size}"
133
+ puts " Dead: #{delimit stats.dead_size}"
134
+ end
135
+
136
+ def processes
137
+ puts "---- Processes (#{process_set.size}) ----"
138
+ process_set.each_with_index do |process, index|
139
+ puts "#{process['identity']} #{tags_for(process)}"
140
+ puts " Started: #{Time.at(process['started_at'])} (#{time_ago(process['started_at'])})"
141
+ puts " Threads: #{process['concurrency']} (#{process['busy']} busy)"
142
+ puts " Queues: #{split_multiline(process['queues'].sort, pad: 11)}"
143
+ puts '' unless (index+1) == process_set.size
144
+ end
145
+ end
146
+
147
+ COL_PAD = 2
148
+ def queues
149
+ puts "---- Queues (#{queue_data.size}) ----"
150
+ columns = {
151
+ name: [:ljust, (['name'] + queue_data.map(&:name)).map(&:length).max + COL_PAD],
152
+ size: [:rjust, (['size'] + queue_data.map(&:size)).map(&:length).max + COL_PAD],
153
+ latency: [:rjust, (['latency'] + queue_data.map(&:latency)).map(&:length).max + COL_PAD]
154
+ }
155
+ columns.each { |col, (dir, width)| print col.to_s.upcase.public_send(dir, width) }
156
+ puts
157
+ queue_data.each do |q|
158
+ columns.each do |col, (dir, width)|
159
+ print q.send(col).public_send(dir, width)
160
+ end
161
+ puts
162
+ end
163
+ end
164
+
165
+ private
166
+
167
+ def delimit(number)
168
+ number.to_s.reverse.scan(/.{1,3}/).join(',').reverse
169
+ end
170
+
171
+ def split_multiline(values, opts = {})
172
+ return 'none' unless values
173
+ pad = opts[:pad] || 0
174
+ max_length = opts[:max_length] || (80 - pad)
175
+ out = []
176
+ line = ''
177
+ values.each do |value|
178
+ if (line.length + value.length) > max_length
179
+ out << line
180
+ line = ' ' * pad
181
+ end
182
+ line << value + ', '
183
+ end
184
+ out << line[0..-3]
185
+ out.join("\n")
186
+ end
187
+
188
+ def tags_for(process)
189
+ tags = [
190
+ process['tag'],
191
+ process['labels'],
192
+ (process['quiet'] == 'true' ? 'quiet' : nil)
193
+ ].flatten.compact
194
+ tags.any? ? "[#{tags.join('] [')}]" : nil
195
+ end
196
+
197
+ def time_ago(timestamp)
198
+ seconds = Time.now - Time.at(timestamp)
199
+ return 'just now' if seconds < 60
200
+ return 'a minute ago' if seconds < 120
201
+ return "#{seconds.floor / 60} minutes ago" if seconds < 3600
202
+ return 'an hour ago' if seconds < 7200
203
+ "#{seconds.floor / 60 / 60} hours ago"
204
+ end
205
+
206
+ QUEUE_STRUCT = Struct.new(:name, :size, :latency)
207
+ def queue_data
208
+ @queue_data ||= Sidekiq::Queue.all.map do |q|
209
+ QUEUE_STRUCT.new(q.name, q.size.to_s, sprintf('%#.2f', q.latency))
210
+ end
211
+ end
212
+
213
+ def process_set
214
+ @process_set ||= Sidekiq::ProcessSet.new
215
+ end
216
+
217
+ def stats
218
+ @stats ||= Sidekiq::Stats.new
219
+ end
220
+ end
221
+ end
@@ -3,7 +3,7 @@ module Sidekiq
3
3
  class JobLogger
4
4
 
5
5
  def call(item, queue)
6
- start = Time.now
6
+ start = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
7
7
  logger.info("start")
8
8
  yield
9
9
  logger.info("done: #{elapsed(start)} sec")
@@ -15,7 +15,7 @@ module Sidekiq
15
15
  private
16
16
 
17
17
  def elapsed(start)
18
- (Time.now - start).round(3)
18
+ (::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - start).round(3)
19
19
  end
20
20
 
21
21
  def logger
@@ -56,7 +56,8 @@ module Sidekiq
56
56
  # end
57
57
  #
58
58
  class JobRetry
59
- class Skip < ::RuntimeError; end
59
+ class Handled < ::RuntimeError; end
60
+ class Skip < Handled; end
60
61
 
61
62
  include Sidekiq::Util
62
63
 
@@ -71,7 +72,7 @@ module Sidekiq
71
72
  # require the worker to be instantiated.
72
73
  def global(msg, queue)
73
74
  yield
74
- rescue Skip => ex
75
+ rescue Handled => ex
75
76
  raise ex
76
77
  rescue Sidekiq::Shutdown => ey
77
78
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -80,9 +81,19 @@ module Sidekiq
80
81
  # ignore, will be pushed back onto queue during hard_shutdown
81
82
  raise Sidekiq::Shutdown if exception_caused_by_shutdown?(e)
82
83
 
83
- raise e unless msg['retry']
84
- attempt_retry(nil, msg, queue, e)
85
- raise e
84
+ if msg['retry']
85
+ attempt_retry(nil, msg, queue, e)
86
+ else
87
+ Sidekiq.death_handlers.each do |handler|
88
+ begin
89
+ handler.call(msg, e)
90
+ rescue => handler_ex
91
+ handle_exception(handler_ex, { context: "Error calling death handler", job: msg })
92
+ end
93
+ end
94
+ end
95
+
96
+ raise Handled
86
97
  end
87
98
 
88
99
 
@@ -96,7 +107,7 @@ module Sidekiq
96
107
  # calling the handle_exception handlers.
97
108
  def local(worker, msg, queue)
98
109
  yield
99
- rescue Skip => ex
110
+ rescue Handled => ex
100
111
  raise ex
101
112
  rescue Sidekiq::Shutdown => ey
102
113
  # ignore, will be pushed back onto queue during hard_shutdown
@@ -130,9 +141,7 @@ module Sidekiq
130
141
  queue
131
142
  end
132
143
 
133
- # App code can stuff all sorts of crazy binary data into the error message
134
- # that won't convert to JSON.
135
- m = exception.message.to_s[0, 10_000]
144
+ m = exception_message(exception)
136
145
  if m.respond_to?(:scrub!)
137
146
  m.force_encoding("utf-8")
138
147
  m.scrub!
@@ -158,7 +167,8 @@ module Sidekiq
158
167
 
159
168
  if count < max_retry_attempts
160
169
  delay = delay_for(worker, count, exception)
161
- logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
170
+ # Logging here can break retries if the logging device raises ENOSPC #3979
171
+ #logger.debug { "Failure! Retry #{count} in #{delay} seconds" }
162
172
  retry_at = Time.now.to_f + delay
163
173
  payload = Sidekiq.dump_json(msg)
164
174
  Sidekiq.redis do |conn|
@@ -171,7 +181,6 @@ module Sidekiq
171
181
  end
172
182
 
173
183
  def retries_exhausted(worker, msg, exception)
174
- logger.debug { "Retries exhausted for job" }
175
184
  begin
176
185
  block = worker && worker.sidekiq_retries_exhausted_block
177
186
  block.call(msg, exception) if block
@@ -191,7 +200,7 @@ module Sidekiq
191
200
  end
192
201
 
193
202
  def send_to_morgue(msg)
194
- Sidekiq.logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
203
+ logger.info { "Adding dead #{msg['class']} job #{msg['jid']}" }
195
204
  payload = Sidekiq.dump_json(msg)
196
205
  DeadSet.new.kill(payload, notify_failure: false)
197
206
  end
@@ -237,5 +246,17 @@ module Sidekiq
237
246
  exception_caused_by_shutdown?(e.cause, checked_causes)
238
247
  end
239
248
 
249
+ # Extract message from exception.
250
+ # Set a default if the message raises an error
251
+ def exception_message(exception)
252
+ begin
253
+ # App code can stuff all sorts of crazy binary data into the error message
254
+ # that won't convert to JSON.
255
+ exception.message.to_s[0, 10_000]
256
+ rescue
257
+ "!!! ERROR MESSAGE THREW AN ERROR !!!".dup
258
+ end
259
+ end
260
+
240
261
  end
241
262
  end
@@ -13,6 +13,8 @@ module Sidekiq
13
13
 
14
14
  attr_accessor :manager, :poller, :fetcher
15
15
 
16
+ STATS_TTL = 5*365*24*60*60
17
+
16
18
  def initialize(options)
17
19
  @manager = Sidekiq::Manager.new(options)
18
20
  @poller = Sidekiq::Scheduled::Poller.new
@@ -38,7 +40,7 @@ module Sidekiq
38
40
  # return until all work is complete and cleaned up.
39
41
  # It can take up to the timeout to complete.
40
42
  def stop
41
- deadline = Time.now + @options[:timeout]
43
+ deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
42
44
 
43
45
  @done = true
44
46
  @manager.quiet
@@ -72,8 +74,9 @@ module Sidekiq
72
74
  key = identity
73
75
  fails = procd = 0
74
76
  begin
75
- Processor::FAILURE.update {|curr| fails = curr; 0 }
76
- Processor::PROCESSED.update {|curr| procd = curr; 0 }
77
+ fails = Processor::FAILURE.reset
78
+ procd = Processor::PROCESSED.reset
79
+ curstate = Processor::WORKER_STATE.dup
77
80
 
78
81
  workers_key = "#{key}:workers"
79
82
  nowdate = Time.now.utc.strftime("%Y-%m-%d")
@@ -81,10 +84,14 @@ module Sidekiq
81
84
  conn.multi do
82
85
  conn.incrby("stat:processed", procd)
83
86
  conn.incrby("stat:processed:#{nowdate}", procd)
87
+ conn.expire("stat:processed:#{nowdate}", STATS_TTL)
88
+
84
89
  conn.incrby("stat:failed", fails)
85
90
  conn.incrby("stat:failed:#{nowdate}", fails)
91
+ conn.expire("stat:failed:#{nowdate}", STATS_TTL)
92
+
86
93
  conn.del(workers_key)
87
- Processor::WORKER_STATE.each_pair do |tid, hash|
94
+ curstate.each_pair do |tid, hash|
88
95
  conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
89
96
  end
90
97
  conn.expire(workers_key, 60)
@@ -96,7 +103,7 @@ module Sidekiq
96
103
  conn.multi do
97
104
  conn.sadd('processes', key)
98
105
  conn.exists(key)
99
- conn.hmset(key, 'info', to_json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f, 'quiet', @done)
106
+ conn.hmset(key, 'info', to_json, 'busy', curstate.size, 'beat', Time.now.to_f, 'quiet', @done)
100
107
  conn.expire(key, 60)
101
108
  conn.rpop("#{key}-signals")
102
109
  end
@@ -112,8 +119,8 @@ module Sidekiq
112
119
  # ignore all redis/network issues
113
120
  logger.error("heartbeat: #{e.message}")
114
121
  # don't lose the counts if there was a network issue
115
- Processor::PROCESSED.increment(procd)
116
- Processor::FAILURE.increment(fails)
122
+ Processor::PROCESSED.incr(procd)
123
+ Processor::FAILURE.incr(fails)
117
124
  end
118
125
  end
119
126
 
@@ -30,7 +30,7 @@ module Sidekiq
30
30
  def initialize(options={})
31
31
  logger.debug { options.inspect }
32
32
  @options = options
33
- @count = options[:concurrency] || 25
33
+ @count = options[:concurrency] || 10
34
34
  raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
35
35
 
36
36
  @done = false
@@ -70,11 +70,11 @@ module Sidekiq
70
70
  return if @workers.empty?
71
71
 
72
72
  logger.info { "Pausing to allow workers to finish..." }
73
- remaining = deadline - Time.now
73
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
74
74
  while remaining > PAUSE_TIME
75
75
  return if @workers.empty?
76
76
  sleep PAUSE_TIME
77
- remaining = deadline - Time.now
77
+ remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
78
78
  end
79
79
  return if @workers.empty?
80
80
 
@@ -7,7 +7,7 @@ module Sidekiq
7
7
  def initialize
8
8
  # With Rails 5+ we must use the Reloader **always**.
9
9
  # The reloader handles code loading and db connection management.
10
- if defined?(::Rails) && ::Rails::VERSION::MAJOR >= 5
10
+ if defined?(::Rails) && defined?(::Rails::VERSION) && ::Rails::VERSION::MAJOR >= 5
11
11
  raise ArgumentError, "Rails 5 no longer needs or uses the ActiveRecord middleware."
12
12
  end
13
13
  end
@@ -4,8 +4,6 @@ require 'sidekiq/fetch'
4
4
  require 'sidekiq/job_logger'
5
5
  require 'sidekiq/job_retry'
6
6
  require 'thread'
7
- require 'concurrent/map'
8
- require 'concurrent/atomic/atomic_fixnum'
9
7
 
10
8
  module Sidekiq
11
9
  ##
@@ -89,7 +87,7 @@ module Sidekiq
89
87
  def get_one
90
88
  begin
91
89
  work = @strategy.retrieve_work
92
- (logger.info { "Redis is online, #{Time.now - @down} sec downtime" }; @down = nil) if @down
90
+ (logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }; @down = nil) if @down
93
91
  work
94
92
  rescue Sidekiq::Shutdown
95
93
  rescue => ex
@@ -109,7 +107,7 @@ module Sidekiq
109
107
 
110
108
  def handle_fetch_exception(ex)
111
109
  if !@down
112
- @down = Time.now
110
+ @down = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
113
111
  logger.error("Error fetching job: #{ex}")
114
112
  handle_exception(ex)
115
113
  end
@@ -149,21 +147,19 @@ module Sidekiq
149
147
  jobstr = work.job
150
148
  queue = work.queue_name
151
149
 
152
- ack = false
150
+ # Treat malformed JSON as a special case: job goes straight to the morgue.
151
+ job_hash = nil
153
152
  begin
154
- # Treat malformed JSON as a special case: job goes straight to the morgue.
155
- job_hash = nil
156
- begin
157
- job_hash = Sidekiq.load_json(jobstr)
158
- rescue => ex
159
- handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
160
- # we can't notify because the job isn't a valid hash payload.
161
- DeadSet.new.kill(jobstr, notify_failure: false)
162
- ack = true
163
- raise
164
- end
153
+ job_hash = Sidekiq.load_json(jobstr)
154
+ rescue => ex
155
+ handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
156
+ # we can't notify because the job isn't a valid hash payload.
157
+ DeadSet.new.kill(jobstr, notify_failure: false)
158
+ return work.acknowledge
159
+ end
165
160
 
166
- ack = true
161
+ ack = true
162
+ begin
167
163
  dispatch(job_hash, queue) do |worker|
168
164
  Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
169
165
  execute_job(worker, cloned(job_hash['args']))
@@ -174,10 +170,19 @@ module Sidekiq
174
170
  # within the timeout. Don't acknowledge the work since
175
171
  # we didn't properly finish it.
176
172
  ack = false
177
- rescue Exception => ex
178
- e = ex.is_a?(::Sidekiq::JobRetry::Skip) && ex.cause ? ex.cause : ex
173
+ rescue Sidekiq::JobRetry::Handled => h
174
+ # this is the common case: job raised error and Sidekiq::JobRetry::Handled
175
+ # signals that we created a retry successfully. We can acknowlege the job.
176
+ e = h.cause ? h.cause : h
179
177
  handle_exception(e, { :context => "Job raised exception", :job => job_hash, :jobstr => jobstr })
180
178
  raise e
179
+ rescue Exception => ex
180
+ # Unexpected error! This is very bad and indicates an exception that got past
181
+ # the retry subsystem (e.g. network partition). We won't acknowledge the job
182
+ # so it can be rescued when using Sidekiq Pro.
183
+ ack = false
184
+ handle_exception(ex, { :context => "Internal exception!", :job => job_hash, :jobstr => jobstr })
185
+ raise e
181
186
  ensure
182
187
  work.acknowledge if ack
183
188
  end
@@ -187,22 +192,68 @@ module Sidekiq
187
192
  worker.perform(*cloned_args)
188
193
  end
189
194
 
190
- WORKER_STATE = Concurrent::Map.new
191
- PROCESSED = Concurrent::AtomicFixnum.new
192
- FAILURE = Concurrent::AtomicFixnum.new
195
+ # Ruby doesn't provide atomic counters out of the box so we'll
196
+ # implement something simple ourselves.
197
+ # https://bugs.ruby-lang.org/issues/14706
198
+ class Counter
199
+ def initialize
200
+ @value = 0
201
+ @lock = Mutex.new
202
+ end
203
+
204
+ def incr(amount=1)
205
+ @lock.synchronize { @value = @value + amount }
206
+ end
207
+
208
+ def reset
209
+ @lock.synchronize { val = @value; @value = 0; val }
210
+ end
211
+ end
212
+
213
+ # jruby's Hash implementation is not threadsafe, so we wrap it in a mutex here
214
+ class SharedWorkerState
215
+ def initialize
216
+ @worker_state = {}
217
+ @lock = Mutex.new
218
+ end
219
+
220
+ def set(tid, hash)
221
+ @lock.synchronize { @worker_state[tid] = hash }
222
+ end
223
+
224
+ def delete(tid)
225
+ @lock.synchronize { @worker_state.delete(tid) }
226
+ end
227
+
228
+ def dup
229
+ @lock.synchronize { @worker_state.dup }
230
+ end
231
+
232
+ def size
233
+ @lock.synchronize { @worker_state.size }
234
+ end
235
+
236
+ def clear
237
+ @lock.synchronize { @worker_state.clear }
238
+ end
239
+ end
240
+
241
+ PROCESSED = Counter.new
242
+ FAILURE = Counter.new
243
+ WORKER_STATE = SharedWorkerState.new
193
244
 
194
245
  def stats(job_hash, queue)
195
246
  tid = Sidekiq::Logging.tid
196
- WORKER_STATE[tid] = {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i }
247
+ WORKER_STATE.set(tid, {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i })
197
248
 
198
249
  begin
199
250
  yield
200
251
  rescue Exception
201
- FAILURE.increment
252
+ FAILURE.incr
202
253
  raise
203
254
  ensure
204
255
  WORKER_STATE.delete(tid)
205
- PROCESSED.increment
256
+ PROCESSED.incr
206
257
  end
207
258
  end
208
259