sidekiq 6.2.2 → 7.1.2
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of sidekiq might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/Changes.md +299 -11
- data/LICENSE.txt +9 -0
- data/README.md +45 -32
- data/bin/sidekiq +4 -9
- data/bin/sidekiqload +207 -117
- data/bin/sidekiqmon +4 -1
- data/lib/generators/sidekiq/job_generator.rb +57 -0
- data/lib/generators/sidekiq/templates/{worker.rb.erb → job.rb.erb} +2 -2
- data/lib/generators/sidekiq/templates/{worker_spec.rb.erb → job_spec.rb.erb} +1 -1
- data/lib/generators/sidekiq/templates/{worker_test.rb.erb → job_test.rb.erb} +1 -1
- data/lib/sidekiq/api.rb +334 -190
- data/lib/sidekiq/capsule.rb +127 -0
- data/lib/sidekiq/cli.rb +95 -81
- data/lib/sidekiq/client.rb +102 -96
- data/lib/sidekiq/{util.rb → component.rb} +14 -41
- data/lib/sidekiq/config.rb +278 -0
- data/lib/sidekiq/deploy.rb +62 -0
- data/lib/sidekiq/embedded.rb +61 -0
- data/lib/sidekiq/fetch.rb +26 -26
- data/lib/sidekiq/job.rb +371 -5
- data/lib/sidekiq/job_logger.rb +16 -28
- data/lib/sidekiq/job_retry.rb +85 -59
- data/lib/sidekiq/job_util.rb +105 -0
- data/lib/sidekiq/launcher.rb +106 -94
- data/lib/sidekiq/logger.rb +9 -44
- data/lib/sidekiq/manager.rb +40 -41
- data/lib/sidekiq/metrics/query.rb +153 -0
- data/lib/sidekiq/metrics/shared.rb +95 -0
- data/lib/sidekiq/metrics/tracking.rb +136 -0
- data/lib/sidekiq/middleware/chain.rb +96 -51
- data/lib/sidekiq/middleware/current_attributes.rb +95 -0
- data/lib/sidekiq/middleware/i18n.rb +6 -4
- data/lib/sidekiq/middleware/modules.rb +21 -0
- data/lib/sidekiq/monitor.rb +17 -4
- data/lib/sidekiq/paginator.rb +17 -9
- data/lib/sidekiq/processor.rb +60 -60
- data/lib/sidekiq/rails.rb +29 -6
- data/lib/sidekiq/redis_client_adapter.rb +96 -0
- data/lib/sidekiq/redis_connection.rb +17 -88
- data/lib/sidekiq/ring_buffer.rb +29 -0
- data/lib/sidekiq/scheduled.rb +101 -44
- data/lib/sidekiq/testing/inline.rb +4 -4
- data/lib/sidekiq/testing.rb +41 -68
- data/lib/sidekiq/transaction_aware_client.rb +44 -0
- data/lib/sidekiq/version.rb +2 -1
- data/lib/sidekiq/web/action.rb +3 -3
- data/lib/sidekiq/web/application.rb +47 -13
- data/lib/sidekiq/web/csrf_protection.rb +3 -3
- data/lib/sidekiq/web/helpers.rb +36 -33
- data/lib/sidekiq/web.rb +10 -17
- data/lib/sidekiq/worker_compatibility_alias.rb +13 -0
- data/lib/sidekiq.rb +86 -201
- data/sidekiq.gemspec +12 -10
- data/web/assets/javascripts/application.js +131 -60
- data/web/assets/javascripts/base-charts.js +106 -0
- data/web/assets/javascripts/chart.min.js +13 -0
- data/web/assets/javascripts/chartjs-plugin-annotation.min.js +7 -0
- data/web/assets/javascripts/dashboard-charts.js +166 -0
- data/web/assets/javascripts/dashboard.js +36 -273
- data/web/assets/javascripts/metrics.js +264 -0
- data/web/assets/stylesheets/application-dark.css +23 -23
- data/web/assets/stylesheets/application-rtl.css +2 -95
- data/web/assets/stylesheets/application.css +73 -402
- data/web/locales/ar.yml +70 -70
- data/web/locales/cs.yml +62 -62
- data/web/locales/da.yml +60 -53
- data/web/locales/de.yml +65 -65
- data/web/locales/el.yml +43 -24
- data/web/locales/en.yml +82 -69
- data/web/locales/es.yml +68 -68
- data/web/locales/fa.yml +65 -65
- data/web/locales/fr.yml +81 -67
- data/web/locales/gd.yml +99 -0
- data/web/locales/he.yml +65 -64
- data/web/locales/hi.yml +59 -59
- data/web/locales/it.yml +53 -53
- data/web/locales/ja.yml +73 -68
- data/web/locales/ko.yml +52 -52
- data/web/locales/lt.yml +66 -66
- data/web/locales/nb.yml +61 -61
- data/web/locales/nl.yml +52 -52
- data/web/locales/pl.yml +45 -45
- data/web/locales/pt-br.yml +63 -55
- data/web/locales/pt.yml +51 -51
- data/web/locales/ru.yml +67 -66
- data/web/locales/sv.yml +53 -53
- data/web/locales/ta.yml +60 -60
- data/web/locales/uk.yml +62 -61
- data/web/locales/ur.yml +64 -64
- data/web/locales/vi.yml +67 -67
- data/web/locales/zh-cn.yml +43 -16
- data/web/locales/zh-tw.yml +42 -8
- data/web/views/_footer.erb +6 -3
- data/web/views/_job_info.erb +18 -2
- data/web/views/_metrics_period_select.erb +12 -0
- data/web/views/_nav.erb +1 -1
- data/web/views/_paging.erb +2 -0
- data/web/views/_poll_link.erb +3 -6
- data/web/views/_summary.erb +7 -7
- data/web/views/busy.erb +44 -28
- data/web/views/dashboard.erb +44 -12
- data/web/views/layout.erb +1 -1
- data/web/views/metrics.erb +82 -0
- data/web/views/metrics_for_job.erb +68 -0
- data/web/views/morgue.erb +5 -9
- data/web/views/queue.erb +24 -24
- data/web/views/queues.erb +4 -2
- data/web/views/retries.erb +5 -9
- data/web/views/scheduled.erb +12 -13
- metadata +62 -31
- data/LICENSE +0 -9
- data/lib/generators/sidekiq/worker_generator.rb +0 -57
- data/lib/sidekiq/delay.rb +0 -41
- data/lib/sidekiq/exception_handler.rb +0 -27
- data/lib/sidekiq/extensions/action_mailer.rb +0 -48
- data/lib/sidekiq/extensions/active_record.rb +0 -43
- data/lib/sidekiq/extensions/class_methods.rb +0 -43
- data/lib/sidekiq/extensions/generic_proxy.rb +0 -33
- data/lib/sidekiq/worker.rb +0 -244
data/lib/sidekiq/launcher.rb
CHANGED
@@ -1,13 +1,14 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
3
|
require "sidekiq/manager"
|
4
|
-
require "sidekiq/
|
4
|
+
require "sidekiq/capsule"
|
5
5
|
require "sidekiq/scheduled"
|
6
|
+
require "sidekiq/ring_buffer"
|
6
7
|
|
7
8
|
module Sidekiq
|
8
|
-
# The Launcher starts the
|
9
|
+
# The Launcher starts the Capsule Managers, the Poller thread and provides the process heartbeat.
|
9
10
|
class Launcher
|
10
|
-
include
|
11
|
+
include Sidekiq::Component
|
11
12
|
|
12
13
|
STATS_TTL = 5 * 365 * 24 * 60 * 60 # 5 years
|
13
14
|
|
@@ -15,50 +16,56 @@ module Sidekiq
|
|
15
16
|
proc { "sidekiq" },
|
16
17
|
proc { Sidekiq::VERSION },
|
17
18
|
proc { |me, data| data["tag"] },
|
18
|
-
proc { |me, data| "[#{Processor::
|
19
|
+
proc { |me, data| "[#{Processor::WORK_STATE.size} of #{me.config.total_concurrency} busy]" },
|
19
20
|
proc { |me, data| "stopping" if me.stopping? }
|
20
21
|
]
|
21
22
|
|
22
|
-
attr_accessor :
|
23
|
+
attr_accessor :managers, :poller
|
23
24
|
|
24
|
-
def initialize(
|
25
|
-
|
26
|
-
@
|
27
|
-
@
|
25
|
+
def initialize(config, embedded: false)
|
26
|
+
@config = config
|
27
|
+
@embedded = embedded
|
28
|
+
@managers = config.capsules.values.map do |cap|
|
29
|
+
Sidekiq::Manager.new(cap)
|
30
|
+
end
|
31
|
+
@poller = Sidekiq::Scheduled::Poller.new(@config)
|
28
32
|
@done = false
|
29
|
-
@options = options
|
30
33
|
end
|
31
34
|
|
32
|
-
|
33
|
-
|
35
|
+
# Start this Sidekiq instance. If an embedding process already
|
36
|
+
# has a heartbeat thread, caller can use `async_beat: false`
|
37
|
+
# and instead have thread call Launcher#heartbeat every N seconds.
|
38
|
+
def run(async_beat: true)
|
39
|
+
Sidekiq.freeze!
|
40
|
+
logger.debug { @config.merge!({}) }
|
41
|
+
@thread = safe_thread("heartbeat", &method(:start_heartbeat)) if async_beat
|
34
42
|
@poller.start
|
35
|
-
@
|
43
|
+
@managers.each(&:start)
|
36
44
|
end
|
37
45
|
|
38
46
|
# Stops this instance from processing any more jobs,
|
39
|
-
#
|
40
47
|
def quiet
|
48
|
+
return if @done
|
49
|
+
|
41
50
|
@done = true
|
42
|
-
@
|
51
|
+
@managers.each(&:quiet)
|
43
52
|
@poller.terminate
|
53
|
+
fire_event(:quiet, reverse: true)
|
44
54
|
end
|
45
55
|
|
46
|
-
# Shuts down
|
47
|
-
# return until all work is complete and cleaned up.
|
48
|
-
# It can take up to the timeout to complete.
|
56
|
+
# Shuts down this Sidekiq instance. Waits up to the deadline for all jobs to complete.
|
49
57
|
def stop
|
50
|
-
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @
|
58
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + @config[:timeout]
|
51
59
|
|
52
|
-
|
53
|
-
@
|
54
|
-
|
55
|
-
|
56
|
-
|
60
|
+
quiet
|
61
|
+
stoppers = @managers.map do |mgr|
|
62
|
+
Thread.new do
|
63
|
+
mgr.stop(deadline)
|
64
|
+
end
|
65
|
+
end
|
57
66
|
|
58
|
-
|
59
|
-
|
60
|
-
strategy = @options[:fetch]
|
61
|
-
strategy.bulk_requeue([], @options)
|
67
|
+
fire_event(:shutdown, reverse: true)
|
68
|
+
stoppers.each(&:join)
|
62
69
|
|
63
70
|
clear_heartbeat
|
64
71
|
end
|
@@ -67,89 +74,86 @@ module Sidekiq
|
|
67
74
|
@done
|
68
75
|
end
|
69
76
|
|
77
|
+
# If embedding Sidekiq, you can have the process heartbeat
|
78
|
+
# call this method to regularly heartbeat rather than creating
|
79
|
+
# a separate thread.
|
80
|
+
def heartbeat
|
81
|
+
❤
|
82
|
+
end
|
83
|
+
|
70
84
|
private unless $TESTING
|
71
85
|
|
86
|
+
BEAT_PAUSE = 10
|
87
|
+
|
72
88
|
def start_heartbeat
|
73
89
|
loop do
|
74
|
-
|
75
|
-
sleep
|
90
|
+
beat
|
91
|
+
sleep BEAT_PAUSE
|
76
92
|
end
|
77
|
-
|
93
|
+
logger.info("Heartbeat stopping...")
|
94
|
+
end
|
95
|
+
|
96
|
+
def beat
|
97
|
+
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ") unless @embedded
|
98
|
+
❤
|
78
99
|
end
|
79
100
|
|
80
101
|
def clear_heartbeat
|
102
|
+
flush_stats
|
103
|
+
|
81
104
|
# Remove record from Redis since we are shutting down.
|
82
105
|
# Note we don't stop the heartbeat thread; if the process
|
83
106
|
# doesn't actually exit, it'll reappear in the Web UI.
|
84
|
-
|
85
|
-
conn.pipelined do
|
86
|
-
|
87
|
-
|
107
|
+
redis do |conn|
|
108
|
+
conn.pipelined do |pipeline|
|
109
|
+
pipeline.srem("processes", [identity])
|
110
|
+
pipeline.unlink("#{identity}:work")
|
88
111
|
end
|
89
112
|
end
|
90
113
|
rescue
|
91
114
|
# best effort, ignore network errors
|
92
115
|
end
|
93
116
|
|
94
|
-
def
|
95
|
-
$0 = PROCTITLES.map { |proc| proc.call(self, to_data) }.compact.join(" ")
|
96
|
-
|
97
|
-
❤
|
98
|
-
end
|
99
|
-
|
100
|
-
def self.flush_stats
|
117
|
+
def flush_stats
|
101
118
|
fails = Processor::FAILURE.reset
|
102
119
|
procd = Processor::PROCESSED.reset
|
103
120
|
return if fails + procd == 0
|
104
121
|
|
105
122
|
nowdate = Time.now.utc.strftime("%Y-%m-%d")
|
106
123
|
begin
|
107
|
-
|
108
|
-
conn.pipelined do
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
124
|
+
redis do |conn|
|
125
|
+
conn.pipelined do |pipeline|
|
126
|
+
pipeline.incrby("stat:processed", procd)
|
127
|
+
pipeline.incrby("stat:processed:#{nowdate}", procd)
|
128
|
+
pipeline.expire("stat:processed:#{nowdate}", STATS_TTL)
|
129
|
+
|
130
|
+
pipeline.incrby("stat:failed", fails)
|
131
|
+
pipeline.incrby("stat:failed:#{nowdate}", fails)
|
132
|
+
pipeline.expire("stat:failed:#{nowdate}", STATS_TTL)
|
116
133
|
end
|
117
134
|
end
|
118
135
|
rescue => ex
|
119
|
-
|
120
|
-
# try to handle the exception
|
121
|
-
Sidekiq.logger.warn("Unable to flush stats: #{ex}")
|
136
|
+
logger.warn("Unable to flush stats: #{ex}")
|
122
137
|
end
|
123
138
|
end
|
124
|
-
at_exit(&method(:flush_stats))
|
125
139
|
|
126
140
|
def ❤
|
127
141
|
key = identity
|
128
142
|
fails = procd = 0
|
129
143
|
|
130
144
|
begin
|
131
|
-
|
132
|
-
|
133
|
-
curstate = Processor::
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
conn.multi do
|
140
|
-
conn.incrby("stat:processed", procd)
|
141
|
-
conn.incrby("stat:processed:#{nowdate}", procd)
|
142
|
-
conn.expire("stat:processed:#{nowdate}", STATS_TTL)
|
143
|
-
|
144
|
-
conn.incrby("stat:failed", fails)
|
145
|
-
conn.incrby("stat:failed:#{nowdate}", fails)
|
146
|
-
conn.expire("stat:failed:#{nowdate}", STATS_TTL)
|
147
|
-
|
148
|
-
conn.unlink(workers_key)
|
145
|
+
flush_stats
|
146
|
+
|
147
|
+
curstate = Processor::WORK_STATE.dup
|
148
|
+
redis do |conn|
|
149
|
+
# work is the current set of executing jobs
|
150
|
+
work_key = "#{key}:work"
|
151
|
+
conn.pipelined do |transaction|
|
152
|
+
transaction.unlink(work_key)
|
149
153
|
curstate.each_pair do |tid, hash|
|
150
|
-
|
154
|
+
transaction.hset(work_key, tid, Sidekiq.dump_json(hash))
|
151
155
|
end
|
152
|
-
|
156
|
+
transaction.expire(work_key, 60)
|
153
157
|
end
|
154
158
|
end
|
155
159
|
|
@@ -158,27 +162,26 @@ module Sidekiq
|
|
158
162
|
fails = procd = 0
|
159
163
|
kb = memory_usage(::Process.pid)
|
160
164
|
|
161
|
-
_, exists, _, _,
|
162
|
-
conn.multi {
|
163
|
-
|
164
|
-
|
165
|
-
|
165
|
+
_, exists, _, _, signal = redis { |conn|
|
166
|
+
conn.multi { |transaction|
|
167
|
+
transaction.sadd("processes", [key])
|
168
|
+
transaction.exists(key)
|
169
|
+
transaction.hset(key, "info", to_json,
|
166
170
|
"busy", curstate.size,
|
167
171
|
"beat", Time.now.to_f,
|
168
172
|
"rtt_us", rtt,
|
169
|
-
"quiet", @done,
|
173
|
+
"quiet", @done.to_s,
|
170
174
|
"rss", kb)
|
171
|
-
|
172
|
-
|
175
|
+
transaction.expire(key, 60)
|
176
|
+
transaction.rpop("#{key}-signals")
|
173
177
|
}
|
174
178
|
}
|
175
179
|
|
176
180
|
# first heartbeat or recovering from an outage and need to reestablish our heartbeat
|
177
|
-
fire_event(:heartbeat) unless exists
|
181
|
+
fire_event(:heartbeat) unless exists > 0
|
182
|
+
fire_event(:beat, oneshot: false)
|
178
183
|
|
179
|
-
|
180
|
-
|
181
|
-
::Process.kill(msg, ::Process.pid)
|
184
|
+
::Process.kill(signal, ::Process.pid) if signal && !@embedded
|
182
185
|
rescue => e
|
183
186
|
# ignore all redis/network issues
|
184
187
|
logger.error("heartbeat: #{e}")
|
@@ -196,7 +199,7 @@ module Sidekiq
|
|
196
199
|
|
197
200
|
def check_rtt
|
198
201
|
a = b = 0
|
199
|
-
|
202
|
+
redis do |x|
|
200
203
|
a = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
201
204
|
x.ping
|
202
205
|
b = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC, :microsecond)
|
@@ -207,10 +210,12 @@ module Sidekiq
|
|
207
210
|
# Workable is < 10,000µs
|
208
211
|
# Log a warning if it's a disaster.
|
209
212
|
if RTT_READINGS.all? { |x| x > RTT_WARNING_LEVEL }
|
210
|
-
|
213
|
+
logger.warn <<~EOM
|
211
214
|
Your Redis network connection is performing extremely poorly.
|
212
215
|
Last RTT readings were #{RTT_READINGS.buffer.inspect}, ideally these should be < 1000.
|
213
216
|
Ensure Redis is running in the same AZ or datacenter as Sidekiq.
|
217
|
+
If these values are close to 100,000, that means your Sidekiq process may be
|
218
|
+
CPU-saturated; reduce your concurrency and/or see https://github.com/sidekiq/sidekiq/discussions/5039
|
214
219
|
EOM
|
215
220
|
RTT_READINGS.reset
|
216
221
|
end
|
@@ -242,14 +247,21 @@ module Sidekiq
|
|
242
247
|
"hostname" => hostname,
|
243
248
|
"started_at" => Time.now.to_f,
|
244
249
|
"pid" => ::Process.pid,
|
245
|
-
"tag" => @
|
246
|
-
"concurrency" => @
|
247
|
-
"queues" => @
|
248
|
-
"
|
249
|
-
"
|
250
|
+
"tag" => @config[:tag] || "",
|
251
|
+
"concurrency" => @config.total_concurrency,
|
252
|
+
"queues" => @config.capsules.values.flat_map { |cap| cap.queues }.uniq,
|
253
|
+
"weights" => to_weights,
|
254
|
+
"labels" => @config[:labels].to_a,
|
255
|
+
"identity" => identity,
|
256
|
+
"version" => Sidekiq::VERSION,
|
257
|
+
"embedded" => @embedded
|
250
258
|
}
|
251
259
|
end
|
252
260
|
|
261
|
+
def to_weights
|
262
|
+
@config.capsules.values.map(&:weights)
|
263
|
+
end
|
264
|
+
|
253
265
|
def to_json
|
254
266
|
# this data changes infrequently so dump it to a string
|
255
267
|
# now so we don't need to dump it every heartbeat.
|
data/lib/sidekiq/logger.rb
CHANGED
@@ -16,6 +16,10 @@ module Sidekiq
|
|
16
16
|
def self.current
|
17
17
|
Thread.current[:sidekiq_context] ||= {}
|
18
18
|
end
|
19
|
+
|
20
|
+
def self.add(k, v)
|
21
|
+
current[k] = v
|
22
|
+
end
|
19
23
|
end
|
20
24
|
|
21
25
|
module LoggingUtils
|
@@ -27,28 +31,14 @@ module Sidekiq
|
|
27
31
|
"fatal" => 4
|
28
32
|
}
|
29
33
|
LEVELS.default_proc = proc do |_, level|
|
30
|
-
|
34
|
+
puts("Invalid log level: #{level.inspect}")
|
31
35
|
nil
|
32
36
|
end
|
33
37
|
|
34
|
-
|
35
|
-
level
|
36
|
-
|
37
|
-
|
38
|
-
def info?
|
39
|
-
level <= 1
|
40
|
-
end
|
41
|
-
|
42
|
-
def warn?
|
43
|
-
level <= 2
|
44
|
-
end
|
45
|
-
|
46
|
-
def error?
|
47
|
-
level <= 3
|
48
|
-
end
|
49
|
-
|
50
|
-
def fatal?
|
51
|
-
level <= 4
|
38
|
+
LEVELS.each do |level, numeric_level|
|
39
|
+
define_method("#{level}?") do
|
40
|
+
local_level.nil? ? super() : local_level <= numeric_level
|
41
|
+
end
|
52
42
|
end
|
53
43
|
|
54
44
|
def local_level
|
@@ -80,36 +70,11 @@ module Sidekiq
|
|
80
70
|
ensure
|
81
71
|
self.local_level = old_local_level
|
82
72
|
end
|
83
|
-
|
84
|
-
# Redefined to check severity against #level, and thus the thread-local level, rather than +@level+.
|
85
|
-
# FIXME: Remove when the minimum Ruby version supports overriding Logger#level.
|
86
|
-
def add(severity, message = nil, progname = nil, &block)
|
87
|
-
severity ||= ::Logger::UNKNOWN
|
88
|
-
progname ||= @progname
|
89
|
-
|
90
|
-
return true if @logdev.nil? || severity < level
|
91
|
-
|
92
|
-
if message.nil?
|
93
|
-
if block
|
94
|
-
message = yield
|
95
|
-
else
|
96
|
-
message = progname
|
97
|
-
progname = @progname
|
98
|
-
end
|
99
|
-
end
|
100
|
-
|
101
|
-
@logdev.write format_message(format_severity(severity), Time.now, progname, message)
|
102
|
-
end
|
103
73
|
end
|
104
74
|
|
105
75
|
class Logger < ::Logger
|
106
76
|
include LoggingUtils
|
107
77
|
|
108
|
-
def initialize(*args, **kwargs)
|
109
|
-
super
|
110
|
-
self.formatter = Sidekiq.log_formatter
|
111
|
-
end
|
112
|
-
|
113
78
|
module Formatters
|
114
79
|
class Base < ::Logger::Formatter
|
115
80
|
def tid
|
data/lib/sidekiq/manager.rb
CHANGED
@@ -1,8 +1,6 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
require "sidekiq/util"
|
4
3
|
require "sidekiq/processor"
|
5
|
-
require "sidekiq/fetch"
|
6
4
|
require "set"
|
7
5
|
|
8
6
|
module Sidekiq
|
@@ -21,46 +19,38 @@ module Sidekiq
|
|
21
19
|
# the shutdown process. The other tasks are performed by other threads.
|
22
20
|
#
|
23
21
|
class Manager
|
24
|
-
include
|
22
|
+
include Sidekiq::Component
|
25
23
|
|
26
24
|
attr_reader :workers
|
27
|
-
attr_reader :
|
25
|
+
attr_reader :capsule
|
28
26
|
|
29
|
-
def initialize(
|
30
|
-
|
31
|
-
@
|
32
|
-
@count = options[:concurrency] || 10
|
27
|
+
def initialize(capsule)
|
28
|
+
@config = @capsule = capsule
|
29
|
+
@count = capsule.concurrency
|
33
30
|
raise ArgumentError, "Concurrency of #{@count} is not supported" if @count < 1
|
34
31
|
|
35
32
|
@done = false
|
36
33
|
@workers = Set.new
|
34
|
+
@plock = Mutex.new
|
37
35
|
@count.times do
|
38
|
-
@workers << Processor.new(
|
36
|
+
@workers << Processor.new(@config, &method(:processor_result))
|
39
37
|
end
|
40
|
-
@plock = Mutex.new
|
41
38
|
end
|
42
39
|
|
43
40
|
def start
|
44
|
-
@workers.each
|
45
|
-
x.start
|
46
|
-
end
|
41
|
+
@workers.each(&:start)
|
47
42
|
end
|
48
43
|
|
49
44
|
def quiet
|
50
45
|
return if @done
|
51
46
|
@done = true
|
52
47
|
|
53
|
-
logger.info { "Terminating quiet
|
54
|
-
@workers.each
|
55
|
-
fire_event(:quiet, reverse: true)
|
48
|
+
logger.info { "Terminating quiet threads for #{capsule.name} capsule" }
|
49
|
+
@workers.each(&:terminate)
|
56
50
|
end
|
57
51
|
|
58
|
-
# hack for quicker development / testing environment #2774
|
59
|
-
PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
|
60
|
-
|
61
52
|
def stop(deadline)
|
62
53
|
quiet
|
63
|
-
fire_event(:shutdown, reverse: true)
|
64
54
|
|
65
55
|
# some of the shutdown events can be async,
|
66
56
|
# we don't have any way to know when they're done but
|
@@ -68,29 +58,20 @@ module Sidekiq
|
|
68
58
|
sleep PAUSE_TIME
|
69
59
|
return if @workers.empty?
|
70
60
|
|
71
|
-
logger.info { "Pausing to allow
|
72
|
-
|
73
|
-
while remaining > PAUSE_TIME
|
74
|
-
return if @workers.empty?
|
75
|
-
sleep PAUSE_TIME
|
76
|
-
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
77
|
-
end
|
61
|
+
logger.info { "Pausing to allow jobs to finish..." }
|
62
|
+
wait_for(deadline) { @workers.empty? }
|
78
63
|
return if @workers.empty?
|
79
64
|
|
80
65
|
hard_shutdown
|
66
|
+
ensure
|
67
|
+
capsule.stop
|
81
68
|
end
|
82
69
|
|
83
|
-
def
|
84
|
-
@plock.synchronize do
|
85
|
-
@workers.delete(processor)
|
86
|
-
end
|
87
|
-
end
|
88
|
-
|
89
|
-
def processor_died(processor, reason)
|
70
|
+
def processor_result(processor, reason = nil)
|
90
71
|
@plock.synchronize do
|
91
72
|
@workers.delete(processor)
|
92
73
|
unless @done
|
93
|
-
p = Processor.new(
|
74
|
+
p = Processor.new(@config, &method(:processor_result))
|
94
75
|
@workers << p
|
95
76
|
p.start
|
96
77
|
end
|
@@ -104,7 +85,7 @@ module Sidekiq
|
|
104
85
|
private
|
105
86
|
|
106
87
|
def hard_shutdown
|
107
|
-
# We've reached the timeout and we still have busy
|
88
|
+
# We've reached the timeout and we still have busy threads.
|
108
89
|
# They must die but their jobs shall live on.
|
109
90
|
cleanup = nil
|
110
91
|
@plock.synchronize do
|
@@ -114,22 +95,40 @@ module Sidekiq
|
|
114
95
|
if cleanup.size > 0
|
115
96
|
jobs = cleanup.map { |p| p.job }.compact
|
116
97
|
|
117
|
-
logger.warn { "Terminating #{cleanup.size} busy
|
118
|
-
logger.
|
98
|
+
logger.warn { "Terminating #{cleanup.size} busy threads" }
|
99
|
+
logger.debug { "Jobs still in progress #{jobs.inspect}" }
|
119
100
|
|
120
101
|
# Re-enqueue unfinished jobs
|
121
102
|
# NOTE: You may notice that we may push a job back to redis before
|
122
|
-
# the
|
103
|
+
# the thread is terminated. This is ok because Sidekiq's
|
123
104
|
# contract says that jobs are run AT LEAST once. Process termination
|
124
105
|
# is delayed until we're certain the jobs are back in Redis because
|
125
106
|
# it is worse to lose a job than to run it twice.
|
126
|
-
|
127
|
-
strategy.bulk_requeue(jobs, @options)
|
107
|
+
capsule.fetcher.bulk_requeue(jobs)
|
128
108
|
end
|
129
109
|
|
130
110
|
cleanup.each do |processor|
|
131
111
|
processor.kill
|
132
112
|
end
|
113
|
+
|
114
|
+
# when this method returns, we immediately call `exit` which may not give
|
115
|
+
# the remaining threads time to run `ensure` blocks, etc. We pause here up
|
116
|
+
# to 3 seconds to give threads a minimal amount of time to run `ensure` blocks.
|
117
|
+
deadline = ::Process.clock_gettime(::Process::CLOCK_MONOTONIC) + 3
|
118
|
+
wait_for(deadline) { @workers.empty? }
|
119
|
+
end
|
120
|
+
|
121
|
+
# hack for quicker development / testing environment #2774
|
122
|
+
PAUSE_TIME = $stdout.tty? ? 0.1 : 0.5
|
123
|
+
|
124
|
+
# Wait for the orblock to be true or the deadline passed.
|
125
|
+
def wait_for(deadline, &condblock)
|
126
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
127
|
+
while remaining > PAUSE_TIME
|
128
|
+
return if condblock.call
|
129
|
+
sleep PAUSE_TIME
|
130
|
+
remaining = deadline - ::Process.clock_gettime(::Process::CLOCK_MONOTONIC)
|
131
|
+
end
|
133
132
|
end
|
134
133
|
end
|
135
134
|
end
|