sidekiq 6.1.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/.circleci/config.yml +71 -0
- data/.github/contributing.md +32 -0
- data/.github/issue_template.md +11 -0
- data/.gitignore +13 -0
- data/.standard.yml +20 -0
- data/3.0-Upgrade.md +70 -0
- data/4.0-Upgrade.md +53 -0
- data/5.0-Upgrade.md +56 -0
- data/6.0-Upgrade.md +72 -0
- data/COMM-LICENSE +97 -0
- data/Changes.md +1718 -0
- data/Ent-2.0-Upgrade.md +37 -0
- data/Ent-Changes.md +269 -0
- data/Gemfile +24 -0
- data/Gemfile.lock +208 -0
- data/LICENSE +9 -0
- data/Pro-2.0-Upgrade.md +138 -0
- data/Pro-3.0-Upgrade.md +44 -0
- data/Pro-4.0-Upgrade.md +35 -0
- data/Pro-5.0-Upgrade.md +25 -0
- data/Pro-Changes.md +790 -0
- data/README.md +94 -0
- data/Rakefile +10 -0
- data/bin/sidekiq +42 -0
- data/bin/sidekiqload +157 -0
- data/bin/sidekiqmon +8 -0
- data/code_of_conduct.md +50 -0
- data/lib/generators/sidekiq/templates/worker.rb.erb +9 -0
- data/lib/generators/sidekiq/templates/worker_spec.rb.erb +6 -0
- data/lib/generators/sidekiq/templates/worker_test.rb.erb +8 -0
- data/lib/generators/sidekiq/worker_generator.rb +57 -0
- data/lib/sidekiq.rb +262 -0
- data/lib/sidekiq/api.rb +960 -0
- data/lib/sidekiq/cli.rb +401 -0
- data/lib/sidekiq/client.rb +263 -0
- data/lib/sidekiq/delay.rb +41 -0
- data/lib/sidekiq/exception_handler.rb +27 -0
- data/lib/sidekiq/extensions/action_mailer.rb +47 -0
- data/lib/sidekiq/extensions/active_record.rb +43 -0
- data/lib/sidekiq/extensions/class_methods.rb +43 -0
- data/lib/sidekiq/extensions/generic_proxy.rb +31 -0
- data/lib/sidekiq/fetch.rb +82 -0
- data/lib/sidekiq/job_logger.rb +63 -0
- data/lib/sidekiq/job_retry.rb +262 -0
- data/lib/sidekiq/launcher.rb +206 -0
- data/lib/sidekiq/logger.rb +165 -0
- data/lib/sidekiq/manager.rb +135 -0
- data/lib/sidekiq/middleware/chain.rb +160 -0
- data/lib/sidekiq/middleware/i18n.rb +40 -0
- data/lib/sidekiq/monitor.rb +133 -0
- data/lib/sidekiq/paginator.rb +47 -0
- data/lib/sidekiq/processor.rb +280 -0
- data/lib/sidekiq/rails.rb +50 -0
- data/lib/sidekiq/redis_connection.rb +146 -0
- data/lib/sidekiq/scheduled.rb +173 -0
- data/lib/sidekiq/sd_notify.rb +149 -0
- data/lib/sidekiq/systemd.rb +24 -0
- data/lib/sidekiq/testing.rb +344 -0
- data/lib/sidekiq/testing/inline.rb +30 -0
- data/lib/sidekiq/util.rb +67 -0
- data/lib/sidekiq/version.rb +5 -0
- data/lib/sidekiq/web.rb +213 -0
- data/lib/sidekiq/web/action.rb +93 -0
- data/lib/sidekiq/web/application.rb +357 -0
- data/lib/sidekiq/web/csrf_protection.rb +153 -0
- data/lib/sidekiq/web/helpers.rb +333 -0
- data/lib/sidekiq/web/router.rb +101 -0
- data/lib/sidekiq/worker.rb +244 -0
- data/sidekiq.gemspec +20 -0
- data/web/assets/images/favicon.ico +0 -0
- data/web/assets/images/logo.png +0 -0
- data/web/assets/images/status.png +0 -0
- data/web/assets/javascripts/application.js +95 -0
- data/web/assets/javascripts/dashboard.js +296 -0
- data/web/assets/stylesheets/application-dark.css +133 -0
- data/web/assets/stylesheets/application-rtl.css +246 -0
- data/web/assets/stylesheets/application.css +1158 -0
- data/web/assets/stylesheets/bootstrap-rtl.min.css +9 -0
- data/web/assets/stylesheets/bootstrap.css +5 -0
- data/web/locales/ar.yml +81 -0
- data/web/locales/cs.yml +78 -0
- data/web/locales/da.yml +68 -0
- data/web/locales/de.yml +81 -0
- data/web/locales/el.yml +68 -0
- data/web/locales/en.yml +83 -0
- data/web/locales/es.yml +70 -0
- data/web/locales/fa.yml +80 -0
- data/web/locales/fr.yml +78 -0
- data/web/locales/he.yml +79 -0
- data/web/locales/hi.yml +75 -0
- data/web/locales/it.yml +69 -0
- data/web/locales/ja.yml +83 -0
- data/web/locales/ko.yml +68 -0
- data/web/locales/lt.yml +83 -0
- data/web/locales/nb.yml +77 -0
- data/web/locales/nl.yml +68 -0
- data/web/locales/pl.yml +59 -0
- data/web/locales/pt-br.yml +68 -0
- data/web/locales/pt.yml +67 -0
- data/web/locales/ru.yml +78 -0
- data/web/locales/sv.yml +68 -0
- data/web/locales/ta.yml +75 -0
- data/web/locales/uk.yml +76 -0
- data/web/locales/ur.yml +80 -0
- data/web/locales/vi.yml +83 -0
- data/web/locales/zh-cn.yml +68 -0
- data/web/locales/zh-tw.yml +68 -0
- data/web/views/_footer.erb +20 -0
- data/web/views/_job_info.erb +89 -0
- data/web/views/_nav.erb +52 -0
- data/web/views/_paging.erb +23 -0
- data/web/views/_poll_link.erb +7 -0
- data/web/views/_status.erb +4 -0
- data/web/views/_summary.erb +40 -0
- data/web/views/busy.erb +101 -0
- data/web/views/dashboard.erb +75 -0
- data/web/views/dead.erb +34 -0
- data/web/views/layout.erb +41 -0
- data/web/views/morgue.erb +78 -0
- data/web/views/queue.erb +55 -0
- data/web/views/queues.erb +38 -0
- data/web/views/retries.erb +83 -0
- data/web/views/retry.erb +34 -0
- data/web/views/scheduled.erb +57 -0
- data/web/views/scheduled_job_info.erb +8 -0
- metadata +212 -0
@@ -0,0 +1,206 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/manager"
|
4
|
+
require "sidekiq/fetch"
|
5
|
+
require "sidekiq/scheduled"
|
6
|
+
|
7
|
+
module Sidekiq
|
8
|
+
# The Launcher starts the Manager and Poller threads and provides the process heartbeat.
|
9
|
+
class Launcher
|
10
|
+
include Util
|
11
|
+
|
12
|
+
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
13
|
+
|
14
|
+
PROCTITLES = [
|
15
|
+
proc { "sidekiq" },
|
16
|
+
proc { Sidekiq::VERSION },
|
17
|
+
proc { |me, data| data["tag"] },
|
18
|
+
proc { |me, data| "[#{Processor::WORKER_STATE.size} of #{data["concurrency"]} busy]" },
|
19
|
+
proc { |me, data| "stopping" if me.stopping? }
|
20
|
+
]
|
21
|
+
|
22
|
+
attr_accessor :manager, :poller, :fetcher
|
23
|
+
|
24
|
+
def initialize(options)
|
25
|
+
options[:fetch] ||= BasicFetch.new(options)
|
26
|
+
@manager = Sidekiq::Manager.new(options)
|
27
|
+
@poller = Sidekiq::Scheduled::Poller.new
|
28
|
+
@done = false
|
29
|
+
@options = options
|
30
|
+
end
|
31
|
+
|
32
|
+
def run
|
33
|
+
@thread = safe_thread("heartbeat", &method(:start_heartbeat))
|
34
|
+
@poller.start
|
35
|
+
@manager.start
|
36
|
+
end
|
37
|
+
|
38
|
+
# Stops this instance from processing any more jobs,
|
39
|
+
#
|
40
|
+
def quiet
|
41
|
+
@done = true
|
42
|
+
@manager.quiet
|
43
|
+
@poller.terminate
|
44
|
+
end
|
45
|
+
|
46
|
+
# Shuts down the process. This method does not
|
47
|
+
# return until all work is complete and cleaned up.
|
48
|
+
# It can take up to the timeout to complete.
|
49
|
+
def stop
|
50
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @options[:timeout]
|
51
|
+
|
52
|
+
@done = true
|
53
|
+
@manager.quiet
|
54
|
+
@poller.terminate
|
55
|
+
|
56
|
+
@manager.stop(deadline)
|
57
|
+
|
58
|
+
# Requeue everything in case there was a worker who grabbed work while stopped
|
59
|
+
# This call is a no-op in Sidekiq but necessary for Sidekiq Pro.
|
60
|
+
strategy = @options[:fetch]
|
61
|
+
strategy.bulk_requeue([], @options)
|
62
|
+
|
63
|
+
clear_heartbeat
|
64
|
+
end
|
65
|
+
|
66
|
+
def stopping?
|
67
|
+
@done
|
68
|
+
end
|
69
|
+
|
70
|
+
private unless $TESTING
|
71
|
+
|
72
|
+
def start_heartbeat
|
73
|
+
loop do
|
74
|
+
heartbeat
|
75
|
+
sleep 5
|
76
|
+
end
|
77
|
+
Sidekiq.logger.info("Heartbeat stopping...")
|
78
|
+
end
|
79
|
+
|
80
|
+
def clear_heartbeat
|
81
|
+
# Remove record from Redis since we are shutting down.
|
82
|
+
# Note we don't stop the heartbeat thread; if the process
|
83
|
+
# doesn't actually exit, it'll reappear in the Web UI.
|
84
|
+
Sidekiq.redis do |conn|
|
85
|
+
conn.pipelined do
|
86
|
+
conn.srem("processes", identity)
|
87
|
+
conn.unlink("#{identity}:workers")
|
88
|
+
end
|
89
|
+
end
|
90
|
+
rescue
|
91
|
+
# best effort, ignore network errors
|
92
|
+
end
|
93
|
+
|
94
|
+
def heartbeat
|
95
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
|
96
|
+
|
97
|
+
❤
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.flush_stats
|
101
|
+
fails = Processor::FAILURE.reset
|
102
|
+
procd = Processor::PROCESSED.reset
|
103
|
+
return if fails + procd == 0
|
104
|
+
|
105
|
+
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
106
|
+
begin
|
107
|
+
Sidekiq.redis do |conn|
|
108
|
+
conn.pipelined do
|
109
|
+
conn.incrby("stat:processed", procd)
|
110
|
+
conn.incrby("stat:processed:#{nowdate}", procd)
|
111
|
+
conn.expire("stat:processed:#{nowdate}", STATS_TTL)
|
112
|
+
|
113
|
+
conn.incrby("stat:failed", fails)
|
114
|
+
conn.incrby("stat:failed:#{nowdate}", fails)
|
115
|
+
conn.expire("stat:failed:#{nowdate}", STATS_TTL)
|
116
|
+
end
|
117
|
+
end
|
118
|
+
rescue => ex
|
119
|
+
# we're exiting the process, things might be shut down so don't
|
120
|
+
# try to handle the exception
|
121
|
+
Sidekiq.logger.warn("Unable to flush stats: #{ex}")
|
122
|
+
end
|
123
|
+
end
|
124
|
+
at_exit(&method(:flush_stats))
|
125
|
+
|
126
|
+
def ❤
|
127
|
+
key = identity
|
128
|
+
fails = procd = 0
|
129
|
+
|
130
|
+
begin
|
131
|
+
fails = Processor::FAILURE.reset
|
132
|
+
procd = Processor::PROCESSED.reset
|
133
|
+
curstate = Processor::WORKER_STATE.dup
|
134
|
+
|
135
|
+
workers_key = "#{key}:workers"
|
136
|
+
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
137
|
+
|
138
|
+
Sidekiq.redis do |conn|
|
139
|
+
conn.multi do
|
140
|
+
conn.incrby("stat:processed", procd)
|
141
|
+
conn.incrby("stat:processed:#{nowdate}", procd)
|
142
|
+
conn.expire("stat:processed:#{nowdate}", STATS_TTL)
|
143
|
+
|
144
|
+
conn.incrby("stat:failed", fails)
|
145
|
+
conn.incrby("stat:failed:#{nowdate}", fails)
|
146
|
+
conn.expire("stat:failed:#{nowdate}", STATS_TTL)
|
147
|
+
|
148
|
+
conn.unlink(workers_key)
|
149
|
+
curstate.each_pair do |tid, hash|
|
150
|
+
conn.hset(workers_key, tid, Sidekiq.dump_json(hash))
|
151
|
+
end
|
152
|
+
conn.expire(workers_key, 60)
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
fails = procd = 0
|
157
|
+
|
158
|
+
_, exists, _, _, msg = Sidekiq.redis { |conn|
|
159
|
+
conn.multi {
|
160
|
+
conn.sadd("processes", key)
|
161
|
+
conn.exists?(key)
|
162
|
+
conn.hmset(key, "info", to_json, "busy", curstate.size, "beat", Time.now.to_f, "quiet", @done)
|
163
|
+
conn.expire(key, 60)
|
164
|
+
conn.rpop("#{key}-signals")
|
165
|
+
}
|
166
|
+
}
|
167
|
+
|
168
|
+
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
169
|
+
fire_event(:heartbeat) unless exists
|
170
|
+
|
171
|
+
return unless msg
|
172
|
+
|
173
|
+
::Process.kill(msg, ::Process.pid)
|
174
|
+
rescue => e
|
175
|
+
# ignore all redis/network issues
|
176
|
+
logger.error("heartbeat: #{e}")
|
177
|
+
# don't lose the counts if there was a network issue
|
178
|
+
Processor::PROCESSED.incr(procd)
|
179
|
+
Processor::FAILURE.incr(fails)
|
180
|
+
end
|
181
|
+
end
|
182
|
+
|
183
|
+
def to_data
|
184
|
+
@data ||= begin
|
185
|
+
{
|
186
|
+
"hostname" => hostname,
|
187
|
+
"started_at" => Time.now.to_f,
|
188
|
+
"pid" => ::Process.pid,
|
189
|
+
"tag" => @options[:tag] || "",
|
190
|
+
"concurrency" => @options[:concurrency],
|
191
|
+
"queues" => @options[:queues].uniq,
|
192
|
+
"labels" => @options[:labels],
|
193
|
+
"identity" => identity
|
194
|
+
}
|
195
|
+
end
|
196
|
+
end
|
197
|
+
|
198
|
+
def to_json
|
199
|
+
@json ||= begin
|
200
|
+
# this data changes infrequently so dump it to a string
|
201
|
+
# now so we don't need to dump it every heartbeat.
|
202
|
+
Sidekiq.dump_json(to_data)
|
203
|
+
end
|
204
|
+
end
|
205
|
+
end
|
206
|
+
end
|
@@ -0,0 +1,165 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "logger"
|
4
|
+
require "time"
|
5
|
+
|
6
|
+
module Sidekiq
|
7
|
+
module Context
|
8
|
+
def self.with(hash)
|
9
|
+
current.merge!(hash)
|
10
|
+
yield
|
11
|
+
ensure
|
12
|
+
hash.each_key { |key| current.delete(key) }
|
13
|
+
end
|
14
|
+
|
15
|
+
def self.current
|
16
|
+
Thread.current[:sidekiq_context] ||= {}
|
17
|
+
end
|
18
|
+
end
|
19
|
+
|
20
|
+
module LoggingUtils
|
21
|
+
LEVELS = {
|
22
|
+
"debug" => 0,
|
23
|
+
"info" => 1,
|
24
|
+
"warn" => 2,
|
25
|
+
"error" => 3,
|
26
|
+
"fatal" => 4
|
27
|
+
}
|
28
|
+
LEVELS.default_proc = proc do |_, level|
|
29
|
+
Sidekiq.logger.warn("Invalid log level: #{level.inspect}")
|
30
|
+
nil
|
31
|
+
end
|
32
|
+
|
33
|
+
def debug?
|
34
|
+
level <= 0
|
35
|
+
end
|
36
|
+
|
37
|
+
def info?
|
38
|
+
level <= 1
|
39
|
+
end
|
40
|
+
|
41
|
+
def warn?
|
42
|
+
level <= 2
|
43
|
+
end
|
44
|
+
|
45
|
+
def error?
|
46
|
+
level <= 3
|
47
|
+
end
|
48
|
+
|
49
|
+
def fatal?
|
50
|
+
level <= 4
|
51
|
+
end
|
52
|
+
|
53
|
+
def local_level
|
54
|
+
Thread.current[:sidekiq_log_level]
|
55
|
+
end
|
56
|
+
|
57
|
+
def local_level=(level)
|
58
|
+
case level
|
59
|
+
when Integer
|
60
|
+
Thread.current[:sidekiq_log_level] = level
|
61
|
+
when Symbol, String
|
62
|
+
Thread.current[:sidekiq_log_level] = LEVELS[level.to_s]
|
63
|
+
when nil
|
64
|
+
Thread.current[:sidekiq_log_level] = nil
|
65
|
+
else
|
66
|
+
raise ArgumentError, "Invalid log level: #{level.inspect}"
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def level
|
71
|
+
local_level || super
|
72
|
+
end
|
73
|
+
|
74
|
+
# Change the thread-local level for the duration of the given block.
|
75
|
+
def log_at(level)
|
76
|
+
old_local_level = local_level
|
77
|
+
self.local_level = level
|
78
|
+
yield
|
79
|
+
ensure
|
80
|
+
self.local_level = old_local_level
|
81
|
+
end
|
82
|
+
|
83
|
+
# Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
|
84
|
+
# FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
|
85
|
+
def add(severity, message = nil, progname = nil, &block)
|
86
|
+
severity ||= ::Logger::UNKNOWN
|
87
|
+
progname ||= @progname
|
88
|
+
|
89
|
+
return true if @logdev.nil? || severity < level
|
90
|
+
|
91
|
+
if message.nil?
|
92
|
+
if block_given?
|
93
|
+
message = yield
|
94
|
+
else
|
95
|
+
message = progname
|
96
|
+
progname = @progname
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
@logdev.write format_message(format_severity(severity), Time.now, progname, message)
|
101
|
+
end
|
102
|
+
end
|
103
|
+
|
104
|
+
class Logger < ::Logger
|
105
|
+
include LoggingUtils
|
106
|
+
|
107
|
+
def initialize(*args, **kwargs)
|
108
|
+
super
|
109
|
+
self.formatter = Sidekiq.log_formatter
|
110
|
+
end
|
111
|
+
|
112
|
+
module Formatters
|
113
|
+
class Base < ::Logger::Formatter
|
114
|
+
def tid
|
115
|
+
Thread.current["sidekiq_tid"] ||= (Thread.current.object_id ^ ::Process.pid).to_s(36)
|
116
|
+
end
|
117
|
+
|
118
|
+
def ctx
|
119
|
+
Sidekiq::Context.current
|
120
|
+
end
|
121
|
+
|
122
|
+
def format_context
|
123
|
+
if ctx.any?
|
124
|
+
" " + ctx.compact.map { |k, v|
|
125
|
+
case v
|
126
|
+
when Array
|
127
|
+
"#{k}=#{v.join(",")}"
|
128
|
+
else
|
129
|
+
"#{k}=#{v}"
|
130
|
+
end
|
131
|
+
}.join(" ")
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
|
136
|
+
class Pretty < Base
|
137
|
+
def call(severity, time, program_name, message)
|
138
|
+
"#{time.utc.iso8601(3)} pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
class WithoutTimestamp < Pretty
|
143
|
+
def call(severity, time, program_name, message)
|
144
|
+
"pid=#{::Process.pid} tid=#{tid}#{format_context} #{severity}: #{message}\n"
|
145
|
+
end
|
146
|
+
end
|
147
|
+
|
148
|
+
class JSON < Base
|
149
|
+
def call(severity, time, program_name, message)
|
150
|
+
hash = {
|
151
|
+
ts: time.utc.iso8601(3),
|
152
|
+
pid: ::Process.pid,
|
153
|
+
tid: tid,
|
154
|
+
lvl: severity,
|
155
|
+
msg: message
|
156
|
+
}
|
157
|
+
c = ctx
|
158
|
+
hash["ctx"] = c unless c.empty?
|
159
|
+
|
160
|
+
Sidekiq.dump_json(hash) << "\n"
|
161
|
+
end
|
162
|
+
end
|
163
|
+
end
|
164
|
+
end
|
165
|
+
end
|
@@ -0,0 +1,135 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require "sidekiq/util"
|
4
|
+
require "sidekiq/processor"
|
5
|
+
require "sidekiq/fetch"
|
6
|
+
require "set"
|
7
|
+
|
8
|
+
module Sidekiq
|
9
|
+
##
|
10
|
+
# The Manager is the central coordination point in Sidekiq, controlling
|
11
|
+
# the lifecycle of the Processors.
|
12
|
+
#
|
13
|
+
# Tasks:
|
14
|
+
#
|
15
|
+
# 1. start: Spin up Processors.
|
16
|
+
# 3. processor_died: Handle job failure, throw away Processor, create new one.
|
17
|
+
# 4. quiet: shutdown idle Processors.
|
18
|
+
# 5. stop: hard stop the Processors by deadline.
|
19
|
+
#
|
20
|
+
# Note that only the last task requires its own Thread since it has to monitor
|
21
|
+
# the shutdown process. The other tasks are performed by other threads.
|
22
|
+
#
|
23
|
+
class Manager
|
24
|
+
include Util
|
25
|
+
|
26
|
+
attr_reader :workers
|
27
|
+
attr_reader :options
|
28
|
+
|
29
|
+
def initialize(options = {})
|
30
|
+
logger.debug { options.inspect }
|
31
|
+
@options = options
|
32
|
+
@count = options[:concurrency] || 10
|
33
|
+
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
|
34
|
+
|
35
|
+
@done = false
|
36
|
+
@workers = Set.new
|
37
|
+
@count.times do
|
38
|
+
@workers << Processor.new(self, options)
|
39
|
+
end
|
40
|
+
@plock = Mutex.new
|
41
|
+
end
|
42
|
+
|
43
|
+
def start
|
44
|
+
@workers.each do |x|
|
45
|
+
x.start
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
def quiet
|
50
|
+
return if @done
|
51
|
+
@done = true
|
52
|
+
|
53
|
+
logger.info { "Terminating quiet workers" }
|
54
|
+
@workers.each { |x| x.terminate }
|
55
|
+
fire_event(:quiet, reverse: true)
|
56
|
+
end
|
57
|
+
|
58
|
+
# hack for quicker development / testing environment #2774
|
59
|
+
PAUSE_TIME = STDOUT.tty? ? 0.1 : 0.5
|
60
|
+
|
61
|
+
def stop(deadline)
|
62
|
+
quiet
|
63
|
+
fire_event(:shutdown, reverse: true)
|
64
|
+
|
65
|
+
# some of the shutdown events can be async,
|
66
|
+
# we don't have any way to know when they're done but
|
67
|
+
# give them a little time to take effect
|
68
|
+
sleep PAUSE_TIME
|
69
|
+
return if @workers.empty?
|
70
|
+
|
71
|
+
logger.info { "Pausing to allow workers to finish..." }
|
72
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
73
|
+
while remaining > PAUSE_TIME
|
74
|
+
return if @workers.empty?
|
75
|
+
sleep PAUSE_TIME
|
76
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
77
|
+
end
|
78
|
+
return if @workers.empty?
|
79
|
+
|
80
|
+
hard_shutdown
|
81
|
+
end
|
82
|
+
|
83
|
+
def processor_stopped(processor)
|
84
|
+
@plock.synchronize do
|
85
|
+
@workers.delete(processor)
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def processor_died(processor, reason)
|
90
|
+
@plock.synchronize do
|
91
|
+
@workers.delete(processor)
|
92
|
+
unless @done
|
93
|
+
p = Processor.new(self, options)
|
94
|
+
@workers << p
|
95
|
+
p.start
|
96
|
+
end
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def stopped?
|
101
|
+
@done
|
102
|
+
end
|
103
|
+
|
104
|
+
private
|
105
|
+
|
106
|
+
def hard_shutdown
|
107
|
+
# We've reached the timeout and we still have busy workers.
|
108
|
+
# They must die but their jobs shall live on.
|
109
|
+
cleanup = nil
|
110
|
+
@plock.synchronize do
|
111
|
+
cleanup = @workers.dup
|
112
|
+
end
|
113
|
+
|
114
|
+
if cleanup.size > 0
|
115
|
+
jobs = cleanup.map { |p| p.job }.compact
|
116
|
+
|
117
|
+
logger.warn { "Terminating #{cleanup.size} busy worker threads" }
|
118
|
+
logger.warn { "Work still in progress #{jobs.inspect}" }
|
119
|
+
|
120
|
+
# Re-enqueue unfinished jobs
|
121
|
+
# NOTE: You may notice that we may push a job back to redis before
|
122
|
+
# the worker thread is terminated. This is ok because Sidekiq's
|
123
|
+
# contract says that jobs are run AT LEAST once. Process termination
|
124
|
+
# is delayed until we're certain the jobs are back in Redis because
|
125
|
+
# it is worse to lose a job than to run it twice.
|
126
|
+
strategy = @options[:fetch]
|
127
|
+
strategy.bulk_requeue(jobs, @options)
|
128
|
+
end
|
129
|
+
|
130
|
+
cleanup.each do |processor|
|
131
|
+
processor.kill
|
132
|
+
end
|
133
|
+
end
|
134
|
+
end
|
135
|
+
end
|