sidekiq 3.4.1 → 4.0.0

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq might be problematic. Click here for more details.

Files changed (83) hide show
  1. checksums.yaml +4 -4
  2. data/4.0-Upgrade.md +50 -0
  3. data/COMM-LICENSE +55 -45
  4. data/Changes.md +73 -1
  5. data/Ent-Changes.md +66 -0
  6. data/Gemfile +7 -1
  7. data/Pro-2.0-Upgrade.md +2 -2
  8. data/Pro-3.0-Upgrade.md +46 -0
  9. data/Pro-Changes.md +65 -2
  10. data/README.md +8 -9
  11. data/bin/sidekiq +5 -0
  12. data/bin/sidekiqctl +8 -2
  13. data/bin/sidekiqload +167 -0
  14. data/lib/sidekiq/api.rb +29 -31
  15. data/lib/sidekiq/cli.rb +41 -42
  16. data/lib/sidekiq/client.rb +5 -10
  17. data/lib/sidekiq/fetch.rb +35 -111
  18. data/lib/sidekiq/launcher.rb +102 -42
  19. data/lib/sidekiq/manager.rb +78 -180
  20. data/lib/sidekiq/middleware/server/logging.rb +10 -5
  21. data/lib/sidekiq/middleware/server/retry_jobs.rb +5 -5
  22. data/lib/sidekiq/processor.rb +126 -97
  23. data/lib/sidekiq/redis_connection.rb +23 -5
  24. data/lib/sidekiq/scheduled.rb +47 -26
  25. data/lib/sidekiq/testing.rb +96 -17
  26. data/lib/sidekiq/util.rb +20 -0
  27. data/lib/sidekiq/version.rb +1 -1
  28. data/lib/sidekiq/web.rb +17 -1
  29. data/lib/sidekiq/web_helpers.rb +26 -4
  30. data/lib/sidekiq/worker.rb +14 -0
  31. data/lib/sidekiq.rb +37 -14
  32. data/sidekiq.gemspec +11 -11
  33. data/test/helper.rb +45 -10
  34. data/test/test_actors.rb +137 -0
  35. data/test/test_api.rb +388 -388
  36. data/test/test_cli.rb +29 -59
  37. data/test/test_client.rb +60 -135
  38. data/test/test_extensions.rb +29 -23
  39. data/test/test_fetch.rb +2 -57
  40. data/test/test_launcher.rb +80 -0
  41. data/test/test_logging.rb +1 -1
  42. data/test/test_manager.rb +16 -131
  43. data/test/test_middleware.rb +3 -5
  44. data/test/test_processor.rb +110 -76
  45. data/test/test_rails.rb +21 -0
  46. data/test/test_redis_connection.rb +0 -1
  47. data/test/test_retry.rb +114 -162
  48. data/test/test_scheduled.rb +11 -17
  49. data/test/test_scheduling.rb +20 -42
  50. data/test/test_sidekiq.rb +46 -16
  51. data/test/test_testing.rb +80 -20
  52. data/test/test_testing_fake.rb +68 -8
  53. data/test/test_testing_inline.rb +3 -3
  54. data/test/test_util.rb +16 -0
  55. data/test/test_web.rb +17 -3
  56. data/test/test_web_helpers.rb +3 -2
  57. data/web/assets/images/favicon.ico +0 -0
  58. data/web/assets/javascripts/application.js +6 -1
  59. data/web/assets/javascripts/dashboard.js +2 -8
  60. data/web/assets/javascripts/locales/jquery.timeago.pt-br.js +14 -14
  61. data/web/assets/stylesheets/application.css +33 -56
  62. data/web/locales/de.yml +1 -1
  63. data/web/locales/en.yml +1 -0
  64. data/web/locales/{no.yml → nb.yml} +10 -2
  65. data/web/locales/uk.yml +76 -0
  66. data/web/views/_footer.erb +2 -7
  67. data/web/views/_job_info.erb +1 -1
  68. data/web/views/_nav.erb +2 -2
  69. data/web/views/_poll_js.erb +5 -0
  70. data/web/views/{_poll.erb → _poll_link.erb} +0 -3
  71. data/web/views/busy.erb +2 -1
  72. data/web/views/dead.erb +1 -0
  73. data/web/views/layout.erb +2 -0
  74. data/web/views/morgue.erb +3 -0
  75. data/web/views/queue.erb +1 -0
  76. data/web/views/queues.erb +1 -0
  77. data/web/views/retries.erb +3 -0
  78. data/web/views/retry.erb +1 -0
  79. data/web/views/scheduled.erb +1 -0
  80. data/web/views/scheduled_job_info.erb +1 -0
  81. metadata +81 -47
  82. data/lib/sidekiq/actor.rb +0 -39
  83. data/test/test_worker_generator.rb +0 -17
data/lib/sidekiq/fetch.rb CHANGED
@@ -1,101 +1,34 @@
1
1
  require 'sidekiq'
2
- require 'sidekiq/util'
3
- require 'sidekiq/actor'
4
2
 
5
3
  module Sidekiq
6
- ##
7
- # The Fetcher blocks on Redis, waiting for a message to process
8
- # from the queues. It gets the message and hands it to the Manager
9
- # to assign to a ready Processor.
10
- class Fetcher
11
- include Util
12
- include Actor
13
-
14
- TIMEOUT = 1
15
-
16
- attr_reader :down
17
-
18
- def initialize(mgr, options)
19
- @down = nil
20
- @mgr = mgr
21
- @strategy = Fetcher.strategy.new(options)
22
- end
23
-
24
- # Fetching is straightforward: the Manager makes a fetch
25
- # request for each idle processor when Sidekiq starts and
26
- # then issues a new fetch request every time a Processor
27
- # finishes a message.
28
- #
29
- # Because we have to shut down cleanly, we can't block
30
- # forever and we can't loop forever. Instead we reschedule
31
- # a new fetch if the current fetch turned up nothing.
32
- def fetch
33
- watchdog('Fetcher#fetch died') do
34
- return if Sidekiq::Fetcher.done?
35
-
36
- begin
37
- work = @strategy.retrieve_work
38
- ::Sidekiq.logger.info("Redis is online, #{Time.now - @down} sec downtime") if @down
39
- @down = nil
40
-
41
- if work
42
- @mgr.async.assign(work)
43
- else
44
- after(0) { fetch }
45
- end
46
- rescue => ex
47
- handle_fetch_exception(ex)
48
- end
4
+ class BasicFetch
5
+ # We want the fetch operation to timeout every few seconds so the thread
6
+ # can check if the process is shutting down.
7
+ TIMEOUT = 2
49
8
 
9
+ UnitOfWork = Struct.new(:queue, :job) do
10
+ def acknowledge
11
+ # nothing to do
50
12
  end
51
- end
52
-
53
- private
54
13
 
55
- def pause
56
- sleep(TIMEOUT)
57
- end
14
+ def queue_name
15
+ queue.gsub(/.*queue:/, ''.freeze)
16
+ end
58
17
 
59
- def handle_fetch_exception(ex)
60
- if !@down
61
- logger.error("Error fetching message: #{ex}")
62
- ex.backtrace.each do |bt|
63
- logger.error(bt)
18
+ def requeue
19
+ Sidekiq.redis do |conn|
20
+ conn.rpush("queue:#{queue_name}", job)
64
21
  end
65
22
  end
66
- @down ||= Time.now
67
- pause
68
- after(0) { fetch }
69
- rescue Task::TerminatedError
70
- # If redis is down when we try to shut down, all the fetch backlog
71
- # raises these errors. Haven't been able to figure out what I'm doing wrong.
72
- end
73
-
74
- # Ugh. Say hello to a bloody hack.
75
- # Can't find a clean way to get the fetcher to just stop processing
76
- # its mailbox when shutdown starts.
77
- def self.done!
78
- @done = true
79
- end
80
-
81
- def self.reset # testing only
82
- @done = nil
83
23
  end
84
24
 
85
- def self.done?
86
- @done
87
- end
88
-
89
- def self.strategy
90
- Sidekiq.options[:fetch] || BasicFetch
91
- end
92
- end
93
-
94
- class BasicFetch
95
25
  def initialize(options)
96
26
  @strictly_ordered_queues = !!options[:strict]
97
27
  @queues = options[:queues].map { |q| "queue:#{q}" }
98
- @unique_queues = @queues.uniq
28
+ if @strictly_ordered_queues
29
+ @queues = @queues.uniq
30
+ @queues << TIMEOUT
31
+ end
99
32
  end
100
33
 
101
34
  def retrieve_work
@@ -103,6 +36,22 @@ module Sidekiq
103
36
  UnitOfWork.new(*work) if work
104
37
  end
105
38
 
39
+ # Creating the Redis#brpop command takes into account any
40
+ # configured queue weights. By default Redis#brpop returns
41
+ # data from the first queue that has pending elements. We
42
+ # recreate the queue command each time we invoke Redis#brpop
43
+ # to honor weights and avoid queue starvation.
44
+ def queues_cmd
45
+ if @strictly_ordered_queues
46
+ @queues
47
+ else
48
+ queues = @queues.shuffle.uniq
49
+ queues << TIMEOUT
50
+ queues
51
+ end
52
+ end
53
+
54
+
106
55
  # By leaving this as a class method, it can be pluggable and used by the Manager actor. Making it
107
56
  # an instance method will make it async to the Fetcher actor
108
57
  def self.bulk_requeue(inprogress, options)
@@ -112,7 +61,7 @@ module Sidekiq
112
61
  jobs_to_requeue = {}
113
62
  inprogress.each do |unit_of_work|
114
63
  jobs_to_requeue[unit_of_work.queue_name] ||= []
115
- jobs_to_requeue[unit_of_work.queue_name] << unit_of_work.message
64
+ jobs_to_requeue[unit_of_work.queue_name] << unit_of_work.job
116
65
  end
117
66
 
118
67
  Sidekiq.redis do |conn|
@@ -122,35 +71,10 @@ module Sidekiq
122
71
  end
123
72
  end
124
73
  end
125
- Sidekiq.logger.info("Pushed #{inprogress.size} messages back to Redis")
74
+ Sidekiq.logger.info("Pushed #{inprogress.size} jobs back to Redis")
126
75
  rescue => ex
127
76
  Sidekiq.logger.warn("Failed to requeue #{inprogress.size} jobs: #{ex.message}")
128
77
  end
129
78
 
130
- UnitOfWork = Struct.new(:queue, :message) do
131
- def acknowledge
132
- # nothing to do
133
- end
134
-
135
- def queue_name
136
- queue.gsub(/.*queue:/, '')
137
- end
138
-
139
- def requeue
140
- Sidekiq.redis do |conn|
141
- conn.rpush("queue:#{queue_name}", message)
142
- end
143
- end
144
- end
145
-
146
- # Creating the Redis#brpop command takes into account any
147
- # configured queue weights. By default Redis#brpop returns
148
- # data from the first queue that has pending elements. We
149
- # recreate the queue command each time we invoke Redis#brpop
150
- # to honor weights and avoid queue starvation.
151
- def queues_cmd
152
- queues = @strictly_ordered_queues ? @unique_queues.dup : @queues.shuffle.uniq
153
- queues << Sidekiq::Fetcher::TIMEOUT
154
- end
155
79
  end
156
80
  end
@@ -1,4 +1,4 @@
1
- require 'sidekiq/actor'
1
+ # encoding: utf-8
2
2
  require 'sidekiq/manager'
3
3
  require 'sidekiq/fetch'
4
4
  require 'sidekiq/scheduled'
@@ -9,64 +9,116 @@ module Sidekiq
9
9
  # If any of these actors die, the Sidekiq process exits
10
10
  # immediately.
11
11
  class Launcher
12
- include Actor
13
12
  include Util
14
13
 
15
- trap_exit :actor_died
16
-
17
- attr_reader :manager, :poller, :fetcher
14
+ attr_accessor :manager, :poller, :fetcher
18
15
 
19
16
  def initialize(options)
20
- @condvar = Celluloid::Condition.new
21
- @manager = Sidekiq::Manager.new_link(@condvar, options)
22
- @poller = Sidekiq::Scheduled::Poller.new_link
23
- @fetcher = Sidekiq::Fetcher.new_link(@manager, options)
24
- @manager.fetcher = @fetcher
17
+ @manager = Sidekiq::Manager.new(options)
18
+ @poller = Sidekiq::Scheduled::Poller.new
25
19
  @done = false
26
20
  @options = options
27
21
  end
28
22
 
29
- def actor_died(actor, reason)
30
- # https://github.com/mperham/sidekiq/issues/2057#issuecomment-66485477
31
- return if @done || !reason
23
+ def run
24
+ @thread = safe_thread("heartbeat", &method(:start_heartbeat))
25
+ @poller.start
26
+ @manager.start
27
+ end
32
28
 
33
- Sidekiq.logger.warn("Sidekiq died due to the following error, cannot recover, process exiting")
34
- handle_exception(reason)
35
- exit(1)
29
+ # Stops this instance from processing any more jobs,
30
+ #
31
+ def quiet
32
+ @done = true
33
+ @manager.quiet
34
+ @poller.terminate
36
35
  end
37
36
 
38
- def run
39
- watchdog('Launcher#run') do
40
- manager.async.start
41
- poller.async.poll(true)
37
+ # Shuts down the process. This method does not
38
+ # return until all work is complete and cleaned up.
39
+ # It can take up to the timeout to complete.
40
+ def stop
41
+ deadline = Time.now + @options[:timeout]
42
42
 
43
- start_heartbeat
44
- end
43
+ @done = true
44
+ @manager.quiet
45
+ @poller.terminate
46
+
47
+ @manager.stop(deadline)
48
+
49
+ # Requeue everything in case there was a worker who grabbed work while stopped
50
+ # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
51
+ strategy = (@options[:fetch] || Sidekiq::BasicFetch)
52
+ strategy.bulk_requeue([], @options)
53
+
54
+ clear_heartbeat
45
55
  end
46
56
 
47
- def stop
48
- watchdog('Launcher#stop') do
49
- @done = true
50
- Sidekiq::Fetcher.done!
51
- fetcher.terminate if fetcher.alive?
52
- poller.terminate if poller.alive?
57
+ def stopping?
58
+ @done
59
+ end
53
60
 
54
- manager.async.stop(:shutdown => true, :timeout => @options[:timeout])
55
- @condvar.wait
56
- manager.terminate
61
+ private unless $TESTING
57
62
 
58
- # Requeue everything in case there was a worker who grabbed work while stopped
59
- # This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
60
- Sidekiq::Fetcher.strategy.bulk_requeue([], @options)
63
+ JVM_RESERVED_SIGNALS = ['USR1', 'USR2'] # Don't Process#kill if we get these signals via the API
61
64
 
62
- stop_heartbeat
63
- end
65
+ def heartbeat(k, data, json)
66
+ results = Sidekiq::CLI::PROCTITLES.map {|x| x.(self, data) }
67
+ results.compact!
68
+ $0 = results.join(' ')
69
+
70
+ ❤(k, json)
64
71
  end
65
72
 
66
- private
73
+ def ❤(key, json)
74
+ fails = procd = 0
75
+ begin
76
+ Processor::FAILURE.update {|curr| fails = curr; 0 }
77
+ Processor::PROCESSED.update {|curr| procd = curr; 0 }
78
+
79
+ workers_key = "#{key}:workers".freeze
80
+ nowdate = Time.now.utc.strftime("%Y-%m-%d".freeze)
81
+ Sidekiq.redis do |conn|
82
+ conn.pipelined do
83
+ conn.incrby("stat:processed".freeze, procd)
84
+ conn.incrby("stat:processed:#{nowdate}", procd)
85
+ conn.incrby("stat:failed".freeze, fails)
86
+ conn.incrby("stat:failed:#{nowdate}", fails)
87
+ conn.del(workers_key)
88
+ Processor::WORKER_STATE.each_pair do |tid, hash|
89
+ conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
90
+ end
91
+ end
92
+ end
93
+ fails = procd = 0
94
+
95
+ _, _, _, msg = Sidekiq.redis do |conn|
96
+ conn.pipelined do
97
+ conn.sadd('processes', key)
98
+ conn.hmset(key, 'info', json, 'busy', Processor::WORKER_STATE.size, 'beat', Time.now.to_f)
99
+ conn.expire(key, 60)
100
+ conn.rpop("#{key}-signals")
101
+ end
102
+ end
103
+
104
+ return unless msg
105
+
106
+ if JVM_RESERVED_SIGNALS.include?(msg)
107
+ Sidekiq::CLI.instance.handle_signal(msg)
108
+ else
109
+ ::Process.kill(msg, $$)
110
+ end
111
+ rescue => e
112
+ # ignore all redis/network issues
113
+ logger.error("heartbeat: #{e.message}")
114
+ # don't lose the counts if there was a network issue
115
+ PROCESSED.increment(procd)
116
+ FAILURE.increment(fails)
117
+ end
118
+ end
67
119
 
68
120
  def start_heartbeat
69
- key = identity
121
+ k = identity
70
122
  data = {
71
123
  'hostname' => hostname,
72
124
  'started_at' => Time.now.to_f,
@@ -74,16 +126,24 @@ module Sidekiq
74
126
  'tag' => @options[:tag] || '',
75
127
  'concurrency' => @options[:concurrency],
76
128
  'queues' => @options[:queues].uniq,
77
- 'labels' => Sidekiq.options[:labels],
78
- 'identity' => identity,
129
+ 'labels' => @options[:labels],
130
+ 'identity' => k,
79
131
  }
80
132
  # this data doesn't change so dump it to a string
81
133
  # now so we don't need to dump it every heartbeat.
82
134
  json = Sidekiq.dump_json(data)
83
- manager.heartbeat(key, data, json)
135
+
136
+ while true
137
+ heartbeat(k, data, json)
138
+ sleep 5
139
+ end
140
+ Sidekiq.logger.info("Heartbeat stopping...")
84
141
  end
85
142
 
86
- def stop_heartbeat
143
+ def clear_heartbeat
144
+ # Remove record from Redis since we are shutting down.
145
+ # Note we don't stop the heartbeat thread; if the process
146
+ # doesn't actually exit, it'll reappear in the Web UI.
87
147
  Sidekiq.redis do |conn|
88
148
  conn.pipelined do
89
149
  conn.srem('processes', identity)
@@ -1,155 +1,96 @@
1
1
  # encoding: utf-8
2
2
  require 'sidekiq/util'
3
- require 'sidekiq/actor'
4
3
  require 'sidekiq/processor'
5
4
  require 'sidekiq/fetch'
5
+ require 'thread'
6
6
 
7
7
  module Sidekiq
8
8
 
9
9
  ##
10
- # The main router in the system. This
11
- # manages the processor state and accepts messages
12
- # from Redis to be dispatched to an idle processor.
10
+ # The Manager is the central coordination point in Sidekiq, controlling
11
+ # the lifecycle of the Processors and feeding them jobs as necessary.
12
+ #
13
+ # Tasks:
14
+ #
15
+ # 1. start: Spin up Processors.
16
+ # 3. processor_died: Handle job failure, throw away Processor, create new one.
17
+ # 4. quiet: shutdown idle Processors.
18
+ # 5. stop: hard stop the Processors by deadline.
19
+ #
20
+ # Note that only the last task requires its own Thread since it has to monitor
21
+ # the shutdown process. The other tasks are performed by other threads.
13
22
  #
14
23
  class Manager
15
24
  include Util
16
- include Actor
17
- trap_exit :processor_died
18
25
 
19
- attr_reader :ready
20
- attr_reader :busy
21
- attr_accessor :fetcher
26
+ attr_reader :workers
27
+ attr_reader :options
22
28
 
23
- SPIN_TIME_FOR_GRACEFUL_SHUTDOWN = 1
24
- JVM_RESERVED_SIGNALS = ['USR1', 'USR2'] # Don't Process#kill if we get these signals via the API
25
-
26
- def initialize(condvar, options={})
29
+ def initialize(options={})
27
30
  logger.debug { options.inspect }
28
31
  @options = options
29
32
  @count = options[:concurrency] || 25
30
- @done_callback = nil
31
- @finished = condvar
33
+ raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
32
34
 
33
- @in_progress = {}
34
- @threads = {}
35
35
  @done = false
36
- @busy = []
37
- @ready = @count.times.map do
38
- p = Processor.new_link(current_actor)
39
- p.proxy_id = p.object_id
40
- p
36
+ @workers = Set.new
37
+ @count.times do
38
+ @workers << Processor.new(self)
41
39
  end
40
+ @plock = Mutex.new
42
41
  end
43
42
 
44
- def stop(options={})
45
- watchdog('Manager#stop died') do
46
- should_shutdown = options[:shutdown]
47
- timeout = options[:timeout]
48
-
49
- @done = true
50
-
51
- logger.info { "Terminating #{@ready.size} quiet workers" }
52
- @ready.each { |x| x.terminate if x.alive? }
53
- @ready.clear
54
-
55
- return if clean_up_for_graceful_shutdown
56
-
57
- hard_shutdown_in timeout if should_shutdown
43
+ def start
44
+ @workers.each do |x|
45
+ x.start
58
46
  end
59
47
  end
60
48
 
61
- def clean_up_for_graceful_shutdown
62
- if @busy.empty?
63
- shutdown
64
- return true
65
- end
49
+ def quiet
50
+ return if @done
51
+ @done = true
66
52
 
67
- after(SPIN_TIME_FOR_GRACEFUL_SHUTDOWN) { clean_up_for_graceful_shutdown }
68
- false
53
+ logger.info { "Terminating quiet workers" }
54
+ @workers.each { |x| x.terminate }
55
+ fire_event(:quiet, true)
69
56
  end
70
57
 
71
- def start
72
- @ready.each { dispatch }
73
- end
58
+ def stop(deadline)
59
+ quiet
60
+ fire_event(:shutdown, true)
74
61
 
75
- def when_done(&blk)
76
- @done_callback = blk
77
- end
62
+ # some of the shutdown events can be async,
63
+ # we don't have any way to know when they're done but
64
+ # give them a little time to take effect
65
+ sleep 0.5
66
+ return if @workers.empty?
78
67
 
79
- def processor_done(processor)
80
- watchdog('Manager#processor_done died') do
81
- @done_callback.call(processor) if @done_callback
82
- @in_progress.delete(processor.object_id)
83
- @threads.delete(processor.object_id)
84
- @busy.delete(processor)
85
- if stopped?
86
- processor.terminate if processor.alive?
87
- shutdown if @busy.empty?
88
- else
89
- @ready << processor if processor.alive?
90
- end
91
- dispatch
68
+ logger.info { "Pausing to allow workers to finish..." }
69
+ remaining = deadline - Time.now
70
+ while remaining > 0.5
71
+ return if @workers.empty?
72
+ sleep 0.5
73
+ remaining = deadline - Time.now
92
74
  end
93
- end
75
+ return if @workers.empty?
94
76
 
95
- def processor_died(processor, reason)
96
- watchdog("Manager#processor_died died") do
97
- @in_progress.delete(processor.object_id)
98
- @threads.delete(processor.object_id)
99
- @busy.delete(processor)
100
-
101
- unless stopped?
102
- p = Processor.new_link(current_actor)
103
- p.proxy_id = p.object_id
104
- @ready << p
105
- dispatch
106
- else
107
- shutdown if @busy.empty?
108
- end
109
- end
77
+ hard_shutdown
110
78
  end
111
79
 
112
- def assign(work)
113
- watchdog("Manager#assign died") do
114
- if stopped?
115
- # Race condition between Manager#stop if Fetcher
116
- # is blocked on redis and gets a message after
117
- # all the ready Processors have been stopped.
118
- # Push the message back to redis.
119
- work.requeue
120
- else
121
- processor = @ready.pop
122
- @in_progress[processor.object_id] = work
123
- @busy << processor
124
- processor.async.process(work)
125
- end
80
+ def processor_stopped(processor)
81
+ @plock.synchronize do
82
+ @workers.delete(processor)
126
83
  end
127
84
  end
128
85
 
129
- # A hack worthy of Rube Goldberg. We need to be able
130
- # to hard stop a working thread. But there's no way for us to
131
- # get handle to the underlying thread performing work for a processor
132
- # so we have it call us and tell us.
133
- def real_thread(proxy_id, thr)
134
- @threads[proxy_id] = thr
135
- end
136
-
137
- PROCTITLES = [
138
- proc { 'sidekiq'.freeze },
139
- proc { Sidekiq::VERSION },
140
- proc { |mgr, data| data['tag'] },
141
- proc { |mgr, data| "[#{mgr.busy.size} of #{data['concurrency']} busy]" },
142
- proc { |mgr, data| "stopping" if mgr.stopped? },
143
- ]
144
-
145
- def heartbeat(key, data, json)
146
- results = PROCTITLES.map {|x| x.(self, data) }
147
- results.compact!
148
- $0 = results.join(' ')
149
-
150
- ❤(key, json)
151
- after(5) do
152
- heartbeat(key, data, json)
86
+ def processor_died(processor, reason)
87
+ @plock.synchronize do
88
+ @workers.delete(processor)
89
+ unless @done
90
+ p = Processor.new(self)
91
+ @workers << p
92
+ p.start
93
+ end
153
94
  end
154
95
  end
155
96
 
@@ -159,77 +100,34 @@ module Sidekiq
159
100
 
160
101
  private
161
102
 
162
- def ❤(key, json)
163
- begin
164
- _, _, _, msg = Sidekiq.redis do |conn|
165
- conn.multi do
166
- conn.sadd('processes', key)
167
- conn.hmset(key, 'info', json, 'busy', @busy.size, 'beat', Time.now.to_f)
168
- conn.expire(key, 60)
169
- conn.rpop("#{key}-signals")
170
- end
171
- end
172
-
173
- return unless msg
174
-
175
- if JVM_RESERVED_SIGNALS.include?(msg)
176
- Sidekiq::CLI.instance.handle_signal(msg)
177
- else
178
- ::Process.kill(msg, $$)
179
- end
180
- rescue => e
181
- # ignore all redis/network issues
182
- logger.error("heartbeat: #{e.message}")
103
+ def hard_shutdown
104
+ # We've reached the timeout and we still have busy workers.
105
+ # They must die but their jobs shall live on.
106
+ cleanup = nil
107
+ @plock.synchronize do
108
+ cleanup = @workers.dup
183
109
  end
184
- end
185
110
 
186
- def hard_shutdown_in(delay)
187
- logger.info { "Pausing up to #{delay} seconds to allow workers to finish..." }
111
+ if cleanup.size > 0
112
+ jobs = cleanup.map {|p| p.job }.compact
188
113
 
189
- after(delay) do
190
- watchdog("Manager#hard_shutdown_in died") do
191
- # We've reached the timeout and we still have busy workers.
192
- # They must die but their messages shall live on.
193
- logger.warn { "Terminating #{@busy.size} busy worker threads" }
194
- logger.warn { "Work still in progress #{@in_progress.values.inspect}" }
114
+ logger.warn { "Terminating #{cleanup.size} busy worker threads" }
115
+ logger.warn { "Work still in progress #{jobs.inspect}" }
195
116
 
196
- requeue
197
-
198
- @busy.each do |processor|
199
- if processor.alive? && t = @threads.delete(processor.object_id)
200
- t.raise Shutdown
201
- end
202
- end
203
-
204
- @finished.signal
205
- end
117
+ # Re-enqueue unfinished jobs
118
+ # NOTE: You may notice that we may push a job back to redis before
119
+ # the worker thread is terminated. This is ok because Sidekiq's
120
+ # contract says that jobs are run AT LEAST once. Process termination
121
+ # is delayed until we're certain the jobs are back in Redis because
122
+ # it is worse to lose a job than to run it twice.
123
+ strategy = (@options[:fetch] || Sidekiq::BasicFetch)
124
+ strategy.bulk_requeue(jobs, @options)
206
125
  end
207
- end
208
-
209
- def dispatch
210
- return if stopped?
211
- # This is a safety check to ensure we haven't leaked
212
- # processors somehow.
213
- raise "BUG: No processors, cannot continue!" if @ready.empty? && @busy.empty?
214
- raise "No ready processor!?" if @ready.empty?
215
126
 
216
- @fetcher.async.fetch
217
- end
218
-
219
- def shutdown
220
- requeue
221
- @finished.signal
127
+ cleanup.each do |processor|
128
+ processor.kill
129
+ end
222
130
  end
223
131
 
224
- def requeue
225
- # Re-enqueue terminated jobs
226
- # NOTE: You may notice that we may push a job back to redis before
227
- # the worker thread is terminated. This is ok because Sidekiq's
228
- # contract says that jobs are run AT LEAST once. Process termination
229
- # is delayed until we're certain the jobs are back in Redis because
230
- # it is worse to lose a job than to run it twice.
231
- Sidekiq::Fetcher.strategy.bulk_requeue(@in_progress.values, @options)
232
- @in_progress.clear
233
- end
234
132
  end
235
133
  end