sidekiq 5.2.8 → 6.1.3

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (90) hide show
  1. checksums.yaml +4 -4
  2. data/.github/ISSUE_TEMPLATE/bug_report.md +20 -0
  3. data/.github/workflows/ci.yml +41 -0
  4. data/.gitignore +0 -2
  5. data/.standard.yml +20 -0
  6. data/5.0-Upgrade.md +1 -1
  7. data/6.0-Upgrade.md +72 -0
  8. data/Changes.md +196 -0
  9. data/Ent-2.0-Upgrade.md +37 -0
  10. data/Ent-Changes.md +72 -1
  11. data/Gemfile +12 -11
  12. data/Gemfile.lock +193 -0
  13. data/Pro-5.0-Upgrade.md +25 -0
  14. data/Pro-Changes.md +56 -2
  15. data/README.md +18 -34
  16. data/Rakefile +5 -4
  17. data/bin/sidekiq +26 -2
  18. data/bin/sidekiqload +32 -24
  19. data/bin/sidekiqmon +8 -0
  20. data/lib/generators/sidekiq/templates/worker_test.rb.erb +1 -1
  21. data/lib/generators/sidekiq/worker_generator.rb +21 -13
  22. data/lib/sidekiq/api.rb +245 -219
  23. data/lib/sidekiq/cli.rb +144 -180
  24. data/lib/sidekiq/client.rb +68 -48
  25. data/lib/sidekiq/delay.rb +5 -6
  26. data/lib/sidekiq/exception_handler.rb +10 -12
  27. data/lib/sidekiq/extensions/action_mailer.rb +13 -22
  28. data/lib/sidekiq/extensions/active_record.rb +13 -10
  29. data/lib/sidekiq/extensions/class_methods.rb +14 -11
  30. data/lib/sidekiq/extensions/generic_proxy.rb +4 -4
  31. data/lib/sidekiq/fetch.rb +29 -30
  32. data/lib/sidekiq/job_logger.rb +45 -7
  33. data/lib/sidekiq/job_retry.rb +62 -61
  34. data/lib/sidekiq/launcher.rb +112 -54
  35. data/lib/sidekiq/logger.rb +166 -0
  36. data/lib/sidekiq/manager.rb +11 -13
  37. data/lib/sidekiq/middleware/chain.rb +15 -5
  38. data/lib/sidekiq/middleware/i18n.rb +5 -7
  39. data/lib/sidekiq/monitor.rb +133 -0
  40. data/lib/sidekiq/paginator.rb +18 -14
  41. data/lib/sidekiq/processor.rb +71 -70
  42. data/lib/sidekiq/rails.rb +29 -37
  43. data/lib/sidekiq/redis_connection.rb +50 -48
  44. data/lib/sidekiq/scheduled.rb +28 -29
  45. data/lib/sidekiq/sd_notify.rb +149 -0
  46. data/lib/sidekiq/systemd.rb +24 -0
  47. data/lib/sidekiq/testing/inline.rb +2 -1
  48. data/lib/sidekiq/testing.rb +35 -24
  49. data/lib/sidekiq/util.rb +17 -16
  50. data/lib/sidekiq/version.rb +2 -1
  51. data/lib/sidekiq/web/action.rb +14 -10
  52. data/lib/sidekiq/web/application.rb +74 -72
  53. data/lib/sidekiq/web/csrf_protection.rb +156 -0
  54. data/lib/sidekiq/web/helpers.rb +97 -77
  55. data/lib/sidekiq/web/router.rb +18 -17
  56. data/lib/sidekiq/web.rb +53 -53
  57. data/lib/sidekiq/worker.rb +126 -102
  58. data/lib/sidekiq.rb +69 -44
  59. data/sidekiq.gemspec +15 -16
  60. data/web/assets/javascripts/application.js +25 -27
  61. data/web/assets/javascripts/dashboard.js +4 -23
  62. data/web/assets/stylesheets/application-dark.css +149 -0
  63. data/web/assets/stylesheets/application.css +28 -6
  64. data/web/locales/de.yml +14 -2
  65. data/web/locales/en.yml +2 -0
  66. data/web/locales/fr.yml +3 -3
  67. data/web/locales/ja.yml +4 -1
  68. data/web/locales/lt.yml +83 -0
  69. data/web/locales/pl.yml +4 -4
  70. data/web/locales/ru.yml +4 -0
  71. data/web/locales/vi.yml +83 -0
  72. data/web/views/_job_info.erb +2 -1
  73. data/web/views/busy.erb +8 -3
  74. data/web/views/dead.erb +2 -2
  75. data/web/views/layout.erb +1 -0
  76. data/web/views/morgue.erb +5 -2
  77. data/web/views/queue.erb +10 -1
  78. data/web/views/queues.erb +9 -1
  79. data/web/views/retries.erb +5 -2
  80. data/web/views/retry.erb +2 -2
  81. data/web/views/scheduled.erb +5 -2
  82. metadata +31 -49
  83. data/.circleci/config.yml +0 -61
  84. data/.github/issue_template.md +0 -11
  85. data/.travis.yml +0 -11
  86. data/bin/sidekiqctl +0 -20
  87. data/lib/sidekiq/core_ext.rb +0 -1
  88. data/lib/sidekiq/ctl.rb +0 -221
  89. data/lib/sidekiq/logging.rb +0 -122
  90. data/lib/sidekiq/middleware/server/active_record.rb +0 -23
@@ -1,12 +1,11 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/util'
3
- require 'sidekiq/processor'
4
- require 'sidekiq/fetch'
5
- require 'thread'
6
- require 'set'
7
2
 
8
- module Sidekiq
3
+ require "sidekiq/util"
4
+ require "sidekiq/processor"
5
+ require "sidekiq/fetch"
6
+ require "set"
9
7
 
8
+ module Sidekiq
10
9
  ##
11
10
  # The Manager is the central coordination point in Sidekiq, controlling
12
11
  # the lifecycle of the Processors.
@@ -27,7 +26,7 @@ module Sidekiq
27
26
  attr_reader :workers
28
27
  attr_reader :options
29
28
 
30
- def initialize(options={})
29
+ def initialize(options = {})
31
30
  logger.debug { options.inspect }
32
31
  @options = options
33
32
  @count = options[:concurrency] || 10
@@ -36,7 +35,7 @@ module Sidekiq
36
35
  @done = false
37
36
  @workers = Set.new
38
37
  @count.times do
39
- @workers << Processor.new(self)
38
+ @workers << Processor.new(self, options)
40
39
  end
41
40
  @plock = Mutex.new
42
41
  end
@@ -57,7 +56,7 @@ module Sidekiq
57
56
  end
58
57
 
59
58
  # hack for quicker development / testing environment #2774
60
- PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
59
+ PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
61
60
 
62
61
  def stop(deadline)
63
62
  quiet
@@ -91,7 +90,7 @@ module Sidekiq
91
90
  @plock.synchronize do
92
91
  @workers.delete(processor)
93
92
  unless @done
94
- p = Processor.new(self)
93
+ p = Processor.new(self, options)
95
94
  @workers << p
96
95
  p.start
97
96
  end
@@ -113,7 +112,7 @@ module Sidekiq
113
112
  end
114
113
 
115
114
  if cleanup.size > 0
116
- jobs = cleanup.map {|p| p.job }.compact
115
+ jobs = cleanup.map { |p| p.job }.compact
117
116
 
118
117
  logger.warn { "Terminating #{cleanup.size} busy worker threads" }
119
118
  logger.warn { "Work still in progress #{jobs.inspect}" }
@@ -124,7 +123,7 @@ module Sidekiq
124
123
  # contract says that jobs are run AT LEAST once. Process termination
125
124
  # is delayed until we're certain the jobs are back in Redis because
126
125
  # it is worse to lose a job than to run it twice.
127
- strategy = (@options[:fetch] || Sidekiq::BasicFetch)
126
+ strategy = @options[:fetch]
128
127
  strategy.bulk_requeue(jobs, @options)
129
128
  end
130
129
 
@@ -132,6 +131,5 @@ module Sidekiq
132
131
  processor.kill
133
132
  end
134
133
  end
135
-
136
134
  end
137
135
  end
@@ -1,4 +1,5 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  # Middleware is code configured to run before/after
4
5
  # a message is processed. It is patterned after Rack
@@ -66,7 +67,6 @@ module Sidekiq
66
67
  module Middleware
67
68
  class Chain
68
69
  include Enumerable
69
- attr_reader :entries
70
70
 
71
71
  def initialize_copy(copy)
72
72
  copy.instance_variable_set(:@entries, entries.dup)
@@ -77,10 +77,14 @@ module Sidekiq
77
77
  end
78
78
 
79
79
  def initialize
80
- @entries = []
80
+ @entries = nil
81
81
  yield self if block_given?
82
82
  end
83
83
 
84
+ def entries
85
+ @entries ||= []
86
+ end
87
+
84
88
  def remove(klass)
85
89
  entries.delete_if { |entry| entry.klass == klass }
86
90
  end
@@ -106,13 +110,17 @@ module Sidekiq
106
110
  i = entries.index { |entry| entry.klass == newklass }
107
111
  new_entry = i.nil? ? Entry.new(newklass, *args) : entries.delete_at(i)
108
112
  i = entries.index { |entry| entry.klass == oldklass } || entries.count - 1
109
- entries.insert(i+1, new_entry)
113
+ entries.insert(i + 1, new_entry)
110
114
  end
111
115
 
112
116
  def exists?(klass)
113
117
  any? { |entry| entry.klass == klass }
114
118
  end
115
119
 
120
+ def empty?
121
+ @entries.nil? || @entries.empty?
122
+ end
123
+
116
124
  def retrieve
117
125
  map(&:make_new)
118
126
  end
@@ -122,8 +130,10 @@ module Sidekiq
122
130
  end
123
131
 
124
132
  def invoke(*args)
133
+ return yield if empty?
134
+
125
135
  chain = retrieve.dup
126
- traverse_chain = lambda do
136
+ traverse_chain = proc do
127
137
  if chain.empty?
128
138
  yield
129
139
  else
@@ -139,7 +149,7 @@ module Sidekiq
139
149
 
140
150
  def initialize(klass, *args)
141
151
  @klass = klass
142
- @args = args
152
+ @args = args
143
153
  end
144
154
 
145
155
  def make_new
@@ -1,4 +1,5 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  #
3
4
  # Simple middleware to save the current locale and restore it when the job executes.
4
5
  # Use it by requiring it in your initializer:
@@ -9,19 +10,16 @@ module Sidekiq::Middleware::I18n
9
10
  # Get the current locale and store it in the message
10
11
  # to be sent to Sidekiq.
11
12
  class Client
12
- def call(worker_class, msg, queue, redis_pool)
13
- msg['locale'] ||= I18n.locale
13
+ def call(_worker, msg, _queue, _redis)
14
+ msg["locale"] ||= I18n.locale
14
15
  yield
15
16
  end
16
17
  end
17
18
 
18
19
  # Pull the msg locale out and set the current thread to use it.
19
20
  class Server
20
- def call(worker, msg, queue)
21
- I18n.locale = msg['locale'] || I18n.default_locale
22
- yield
23
- ensure
24
- I18n.locale = I18n.default_locale
21
+ def call(_worker, msg, _queue, &block)
22
+ I18n.with_locale(msg.fetch("locale", I18n.default_locale), &block)
25
23
  end
26
24
  end
27
25
  end
@@ -0,0 +1,133 @@
1
+ #!/usr/bin/env ruby
2
+
3
+ require "fileutils"
4
+ require "sidekiq/api"
5
+
6
+ class Sidekiq::Monitor
7
+ class Status
8
+ VALID_SECTIONS = %w[all version overview processes queues]
9
+ COL_PAD = 2
10
+
11
+ def display(section = nil)
12
+ section ||= "all"
13
+ unless VALID_SECTIONS.include? section
14
+ puts "I don't know how to check the status of '#{section}'!"
15
+ puts "Try one of these: #{VALID_SECTIONS.join(", ")}"
16
+ return
17
+ end
18
+ send(section)
19
+ rescue => e
20
+ puts "Couldn't get status: #{e}"
21
+ end
22
+
23
+ def all
24
+ version
25
+ puts
26
+ overview
27
+ puts
28
+ processes
29
+ puts
30
+ queues
31
+ end
32
+
33
+ def version
34
+ puts "Sidekiq #{Sidekiq::VERSION}"
35
+ puts Time.now.utc
36
+ end
37
+
38
+ def overview
39
+ puts "---- Overview ----"
40
+ puts " Processed: #{delimit stats.processed}"
41
+ puts " Failed: #{delimit stats.failed}"
42
+ puts " Busy: #{delimit stats.workers_size}"
43
+ puts " Enqueued: #{delimit stats.enqueued}"
44
+ puts " Retries: #{delimit stats.retry_size}"
45
+ puts " Scheduled: #{delimit stats.scheduled_size}"
46
+ puts " Dead: #{delimit stats.dead_size}"
47
+ end
48
+
49
+ def processes
50
+ puts "---- Processes (#{process_set.size}) ----"
51
+ process_set.each_with_index do |process, index|
52
+ puts "#{process["identity"]} #{tags_for(process)}"
53
+ puts " Started: #{Time.at(process["started_at"])} (#{time_ago(process["started_at"])})"
54
+ puts " Threads: #{process["concurrency"]} (#{process["busy"]} busy)"
55
+ puts " Queues: #{split_multiline(process["queues"].sort, pad: 11)}"
56
+ puts "" unless (index + 1) == process_set.size
57
+ end
58
+ end
59
+
60
+ def queues
61
+ puts "---- Queues (#{queue_data.size}) ----"
62
+ columns = {
63
+ name: [:ljust, (["name"] + queue_data.map(&:name)).map(&:length).max + COL_PAD],
64
+ size: [:rjust, (["size"] + queue_data.map(&:size)).map(&:length).max + COL_PAD],
65
+ latency: [:rjust, (["latency"] + queue_data.map(&:latency)).map(&:length).max + COL_PAD]
66
+ }
67
+ columns.each { |col, (dir, width)| print col.to_s.upcase.public_send(dir, width) }
68
+ puts
69
+ queue_data.each do |q|
70
+ columns.each do |col, (dir, width)|
71
+ print q.send(col).public_send(dir, width)
72
+ end
73
+ puts
74
+ end
75
+ end
76
+
77
+ private
78
+
79
+ def delimit(number)
80
+ number.to_s.reverse.scan(/.{1,3}/).join(",").reverse
81
+ end
82
+
83
+ def split_multiline(values, opts = {})
84
+ return "none" unless values
85
+ pad = opts[:pad] || 0
86
+ max_length = opts[:max_length] || (80 - pad)
87
+ out = []
88
+ line = ""
89
+ values.each do |value|
90
+ if (line.length + value.length) > max_length
91
+ out << line
92
+ line = " " * pad
93
+ end
94
+ line << value + ", "
95
+ end
96
+ out << line[0..-3]
97
+ out.join("\n")
98
+ end
99
+
100
+ def tags_for(process)
101
+ tags = [
102
+ process["tag"],
103
+ process["labels"],
104
+ (process["quiet"] == "true" ? "quiet" : nil)
105
+ ].flatten.compact
106
+ tags.any? ? "[#{tags.join("] [")}]" : nil
107
+ end
108
+
109
+ def time_ago(timestamp)
110
+ seconds = Time.now - Time.at(timestamp)
111
+ return "just now" if seconds < 60
112
+ return "a minute ago" if seconds < 120
113
+ return "#{seconds.floor / 60} minutes ago" if seconds < 3600
114
+ return "an hour ago" if seconds < 7200
115
+ "#{seconds.floor / 60 / 60} hours ago"
116
+ end
117
+
118
+ QUEUE_STRUCT = Struct.new(:name, :size, :latency)
119
+ def queue_data
120
+ @queue_data ||= Sidekiq::Queue.all.map { |q|
121
+ QUEUE_STRUCT.new(q.name, q.size.to_s, sprintf("%#.2f", q.latency))
122
+ }
123
+ end
124
+
125
+ def process_set
126
+ @process_set ||= Sidekiq::ProcessSet.new
127
+ end
128
+
129
+ def stats
130
+ @stats ||= Sidekiq::Stats.new
131
+ end
132
+ end
133
+ end
@@ -1,8 +1,8 @@
1
1
  # frozen_string_literal: true
2
+
2
3
  module Sidekiq
3
4
  module Paginator
4
-
5
- def page(key, pageidx=1, page_size=25, opts=nil)
5
+ def page(key, pageidx = 1, page_size = 25, opts = nil)
6
6
  current_page = pageidx.to_i < 1 ? 1 : pageidx.to_i
7
7
  pageidx = current_page - 1
8
8
  total_size = 0
@@ -12,32 +12,36 @@ module Sidekiq
12
12
 
13
13
  Sidekiq.redis do |conn|
14
14
  type = conn.type(key)
15
+ rev = opts && opts[:reverse]
15
16
 
16
17
  case type
17
- when 'zset'
18
- rev = opts && opts[:reverse]
19
- total_size, items = conn.multi do
18
+ when "zset"
19
+ total_size, items = conn.multi {
20
20
  conn.zcard(key)
21
21
  if rev
22
- conn.zrevrange(key, starting, ending, :with_scores => true)
22
+ conn.zrevrange(key, starting, ending, with_scores: true)
23
23
  else
24
- conn.zrange(key, starting, ending, :with_scores => true)
24
+ conn.zrange(key, starting, ending, with_scores: true)
25
25
  end
26
- end
26
+ }
27
27
  [current_page, total_size, items]
28
- when 'list'
29
- total_size, items = conn.multi do
28
+ when "list"
29
+ total_size, items = conn.multi {
30
30
  conn.llen(key)
31
- conn.lrange(key, starting, ending)
32
- end
31
+ if rev
32
+ conn.lrange(key, -ending - 1, -starting - 1)
33
+ else
34
+ conn.lrange(key, starting, ending)
35
+ end
36
+ }
37
+ items.reverse! if rev
33
38
  [current_page, total_size, items]
34
- when 'none'
39
+ when "none"
35
40
  [1, 0, []]
36
41
  else
37
42
  raise "can't page a #{type}"
38
43
  end
39
44
  end
40
45
  end
41
-
42
46
  end
43
47
  end
@@ -1,9 +1,9 @@
1
1
  # frozen_string_literal: true
2
- require 'sidekiq/util'
3
- require 'sidekiq/fetch'
4
- require 'sidekiq/job_logger'
5
- require 'sidekiq/job_retry'
6
- require 'thread'
2
+
3
+ require "sidekiq/util"
4
+ require "sidekiq/fetch"
5
+ require "sidekiq/job_logger"
6
+ require "sidekiq/job_retry"
7
7
 
8
8
  module Sidekiq
9
9
  ##
@@ -23,33 +23,32 @@ module Sidekiq
23
23
  # to replace itself and exits.
24
24
  #
25
25
  class Processor
26
-
27
26
  include Util
28
27
 
29
28
  attr_reader :thread
30
29
  attr_reader :job
31
30
 
32
- def initialize(mgr)
31
+ def initialize(mgr, options)
33
32
  @mgr = mgr
34
33
  @down = false
35
34
  @done = false
36
35
  @job = nil
37
36
  @thread = nil
38
- @strategy = (mgr.options[:fetch] || Sidekiq::BasicFetch).new(mgr.options)
39
- @reloader = Sidekiq.options[:reloader]
40
- @logging = (mgr.options[:job_logger] || Sidekiq::JobLogger).new
37
+ @strategy = options[:fetch]
38
+ @reloader = options[:reloader] || proc { |&block| block.call }
39
+ @job_logger = (options[:job_logger] || Sidekiq::JobLogger).new
41
40
  @retrier = Sidekiq::JobRetry.new
42
41
  end
43
42
 
44
- def terminate(wait=false)
43
+ def terminate(wait = false)
45
44
  @done = true
46
- return if !@thread
45
+ return unless @thread
47
46
  @thread.value if wait
48
47
  end
49
48
 
50
- def kill(wait=false)
49
+ def kill(wait = false)
51
50
  @done = true
52
- return if !@thread
51
+ return unless @thread
53
52
  # unlike the other actors, terminate does not wait
54
53
  # for the thread to finish because we don't know how
55
54
  # long the job will take to finish. Instead we
@@ -66,16 +65,12 @@ module Sidekiq
66
65
  private unless $TESTING
67
66
 
68
67
  def run
69
- begin
70
- while !@done
71
- process_one
72
- end
73
- @mgr.processor_stopped(self)
74
- rescue Sidekiq::Shutdown
75
- @mgr.processor_stopped(self)
76
- rescue Exception => ex
77
- @mgr.processor_died(self, ex)
78
- end
68
+ process_one until @done
69
+ @mgr.processor_stopped(self)
70
+ rescue Sidekiq::Shutdown
71
+ @mgr.processor_stopped(self)
72
+ rescue Exception => ex
73
+ @mgr.processor_died(self, ex)
79
74
  end
80
75
 
81
76
  def process_one
@@ -85,14 +80,15 @@ module Sidekiq
85
80
  end
86
81
 
87
82
  def get_one
88
- begin
89
- work = @strategy.retrieve_work
90
- (logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }; @down = nil) if @down
91
- work
92
- rescue Sidekiq::Shutdown
93
- rescue => ex
94
- handle_fetch_exception(ex)
83
+ work = @strategy.retrieve_work
84
+ if @down
85
+ logger.info { "Redis is online, #{::Process.clock_gettime(::Process::CLOCK_MONOTONIC) - @down} sec downtime" }
86
+ @down = nil
95
87
  end
88
+ work
89
+ rescue Sidekiq::Shutdown
90
+ rescue => ex
91
+ handle_fetch_exception(ex)
96
92
  end
97
93
 
98
94
  def fetch
@@ -106,7 +102,7 @@ module Sidekiq
106
102
  end
107
103
 
108
104
  def handle_fetch_exception(ex)
109
- if !@down
105
+ unless @down
110
106
  @down = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
111
107
  logger.error("Error fetching job: #{ex}")
112
108
  handle_exception(ex)
@@ -115,25 +111,28 @@ module Sidekiq
115
111
  nil
116
112
  end
117
113
 
118
- def dispatch(job_hash, queue)
114
+ def dispatch(job_hash, queue, jobstr)
119
115
  # since middleware can mutate the job hash
120
- # we clone here so we report the original
116
+ # we need to clone it to report the original
121
117
  # job structure to the Web UI
122
- pristine = cloned(job_hash)
123
-
124
- Sidekiq::Logging.with_job_hash_context(job_hash) do
125
- @retrier.global(pristine, queue) do
126
- @logging.call(job_hash, queue) do
127
- stats(pristine, queue) do
118
+ # or to push back to redis when retrying.
119
+ # To avoid costly and, most of the time, useless cloning here,
120
+ # we pass original String of JSON to respected methods
121
+ # to re-parse it there if we need access to the original, untouched job
122
+
123
+ @job_logger.prepare(job_hash) do
124
+ @retrier.global(jobstr, queue) do
125
+ @job_logger.call(job_hash, queue) do
126
+ stats(jobstr, queue) do
128
127
  # Rails 5 requires a Reloader to wrap code execution. In order to
129
128
  # constantize the worker and instantiate an instance, we have to call
130
129
  # the Reloader. It handles code loading, db connection management, etc.
131
130
  # Effectively this block denotes a "unit of work" to Rails.
132
131
  @reloader.call do
133
- klass = constantize(job_hash['class'])
132
+ klass = constantize(job_hash["class"])
134
133
  worker = klass.new
135
- worker.jid = job_hash['jid']
136
- @retrier.local(worker, pristine, queue) do
134
+ worker.jid = job_hash["jid"]
135
+ @retrier.local(worker, jobstr, queue) do
137
136
  yield worker
138
137
  end
139
138
  end
@@ -152,39 +151,44 @@ module Sidekiq
152
151
  begin
153
152
  job_hash = Sidekiq.load_json(jobstr)
154
153
  rescue => ex
155
- handle_exception(ex, { :context => "Invalid JSON for job", :jobstr => jobstr })
154
+ handle_exception(ex, {context: "Invalid JSON for job", jobstr: jobstr})
156
155
  # we can't notify because the job isn't a valid hash payload.
157
156
  DeadSet.new.kill(jobstr, notify_failure: false)
158
157
  return work.acknowledge
159
158
  end
160
159
 
161
- ack = true
160
+ ack = false
162
161
  begin
163
- dispatch(job_hash, queue) do |worker|
162
+ dispatch(job_hash, queue, jobstr) do |worker|
164
163
  Sidekiq.server_middleware.invoke(worker, job_hash, queue) do
165
- execute_job(worker, cloned(job_hash['args']))
164
+ execute_job(worker, job_hash["args"])
166
165
  end
167
166
  end
167
+ ack = true
168
168
  rescue Sidekiq::Shutdown
169
169
  # Had to force kill this job because it didn't finish
170
170
  # within the timeout. Don't acknowledge the work since
171
171
  # we didn't properly finish it.
172
- ack = false
173
172
  rescue Sidekiq::JobRetry::Handled => h
174
173
  # this is the common case: job raised error and Sidekiq::JobRetry::Handled
175
174
  # signals that we created a retry successfully. We can acknowlege the job.
176
- e = h.cause ? h.cause : h
177
- handle_exception(e, { :context => "Job raised exception", :job => job_hash, :jobstr => jobstr })
175
+ ack = true
176
+ e = h.cause || h
177
+ handle_exception(e, {context: "Job raised exception", job: job_hash, jobstr: jobstr})
178
178
  raise e
179
179
  rescue Exception => ex
180
180
  # Unexpected error! This is very bad and indicates an exception that got past
181
181
  # the retry subsystem (e.g. network partition). We won't acknowledge the job
182
182
  # so it can be rescued when using Sidekiq Pro.
183
- ack = false
184
- handle_exception(ex, { :context => "Internal exception!", :job => job_hash, :jobstr => jobstr })
185
- raise e
183
+ handle_exception(ex, {context: "Internal exception!", job: job_hash, jobstr: jobstr})
184
+ raise ex
186
185
  ensure
187
- work.acknowledge if ack
186
+ if ack
187
+ # We don't want a shutdown signal to interrupt job acknowledgment.
188
+ Thread.handle_interrupt(Sidekiq::Shutdown => :never) do
189
+ work.acknowledge
190
+ end
191
+ end
188
192
  end
189
193
  end
190
194
 
@@ -201,12 +205,16 @@ module Sidekiq
201
205
  @lock = Mutex.new
202
206
  end
203
207
 
204
- def incr(amount=1)
205
- @lock.synchronize { @value = @value + amount }
208
+ def incr(amount = 1)
209
+ @lock.synchronize { @value += amount }
206
210
  end
207
211
 
208
212
  def reset
209
- @lock.synchronize { val = @value; @value = 0; val }
213
+ @lock.synchronize {
214
+ val = @value
215
+ @value = 0
216
+ val
217
+ }
210
218
  end
211
219
  end
212
220
 
@@ -242,9 +250,8 @@ module Sidekiq
242
250
  FAILURE = Counter.new
243
251
  WORKER_STATE = SharedWorkerState.new
244
252
 
245
- def stats(job_hash, queue)
246
- tid = Sidekiq::Logging.tid
247
- WORKER_STATE.set(tid, {:queue => queue, :payload => job_hash, :run_at => Time.now.to_i })
253
+ def stats(jobstr, queue)
254
+ WORKER_STATE.set(tid, {queue: queue, payload: jobstr, run_at: Time.now.to_i})
248
255
 
249
256
  begin
250
257
  yield
@@ -257,23 +264,17 @@ module Sidekiq
257
264
  end
258
265
  end
259
266
 
260
- # Deep clone the arguments passed to the worker so that if
261
- # the job fails, what is pushed back onto Redis hasn't
262
- # been mutated by the worker.
263
- def cloned(thing)
264
- Marshal.load(Marshal.dump(thing))
265
- end
266
-
267
267
  def constantize(str)
268
- names = str.split('::')
268
+ return Object.const_get(str) unless str.include?("::")
269
+
270
+ names = str.split("::")
269
271
  names.shift if names.empty? || names.first.empty?
270
272
 
271
273
  names.inject(Object) do |constant, name|
272
274
  # the false flag limits search for name to under the constant namespace
273
275
  # which mimics Rails' behaviour
274
- constant.const_defined?(name, false) ? constant.const_get(name, false) : constant.const_missing(name)
276
+ constant.const_get(name, false)
275
277
  end
276
278
  end
277
-
278
279
  end
279
280
  end