gitlab-puma 4.3.1.gitlab.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/History.md +1537 -0
- data/LICENSE +26 -0
- data/README.md +291 -0
- data/bin/puma +10 -0
- data/bin/puma-wild +31 -0
- data/bin/pumactl +12 -0
- data/docs/architecture.md +37 -0
- data/docs/deployment.md +111 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/nginx.md +80 -0
- data/docs/plugins.md +38 -0
- data/docs/restart.md +41 -0
- data/docs/signals.md +96 -0
- data/docs/systemd.md +290 -0
- data/docs/tcp_mode.md +96 -0
- data/ext/puma_http11/PumaHttp11Service.java +19 -0
- data/ext/puma_http11/ext_help.h +15 -0
- data/ext/puma_http11/extconf.rb +28 -0
- data/ext/puma_http11/http11_parser.c +1044 -0
- data/ext/puma_http11/http11_parser.h +65 -0
- data/ext/puma_http11/http11_parser.java.rl +145 -0
- data/ext/puma_http11/http11_parser.rl +147 -0
- data/ext/puma_http11/http11_parser_common.rl +54 -0
- data/ext/puma_http11/io_buffer.c +155 -0
- data/ext/puma_http11/mini_ssl.c +553 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +226 -0
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +455 -0
- data/ext/puma_http11/org/jruby/puma/IOBuffer.java +72 -0
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +363 -0
- data/ext/puma_http11/puma_http11.c +502 -0
- data/lib/puma.rb +31 -0
- data/lib/puma/accept_nonblock.rb +29 -0
- data/lib/puma/app/status.rb +80 -0
- data/lib/puma/binder.rb +385 -0
- data/lib/puma/cli.rb +239 -0
- data/lib/puma/client.rb +494 -0
- data/lib/puma/cluster.rb +554 -0
- data/lib/puma/commonlogger.rb +108 -0
- data/lib/puma/configuration.rb +362 -0
- data/lib/puma/const.rb +242 -0
- data/lib/puma/control_cli.rb +289 -0
- data/lib/puma/detect.rb +15 -0
- data/lib/puma/dsl.rb +740 -0
- data/lib/puma/events.rb +156 -0
- data/lib/puma/io_buffer.rb +4 -0
- data/lib/puma/jruby_restart.rb +84 -0
- data/lib/puma/launcher.rb +475 -0
- data/lib/puma/minissl.rb +278 -0
- data/lib/puma/minissl/context_builder.rb +76 -0
- data/lib/puma/null_io.rb +44 -0
- data/lib/puma/plugin.rb +120 -0
- data/lib/puma/plugin/tmp_restart.rb +36 -0
- data/lib/puma/rack/builder.rb +301 -0
- data/lib/puma/rack/urlmap.rb +93 -0
- data/lib/puma/rack_default.rb +9 -0
- data/lib/puma/reactor.rb +400 -0
- data/lib/puma/runner.rb +192 -0
- data/lib/puma/server.rb +1053 -0
- data/lib/puma/single.rb +123 -0
- data/lib/puma/state_file.rb +31 -0
- data/lib/puma/tcp_logger.rb +41 -0
- data/lib/puma/thread_pool.rb +348 -0
- data/lib/puma/util.rb +124 -0
- data/lib/rack/handler/puma.rb +115 -0
- data/tools/docker/Dockerfile +16 -0
- data/tools/jungle/README.md +19 -0
- data/tools/jungle/init.d/README.md +61 -0
- data/tools/jungle/init.d/puma +421 -0
- data/tools/jungle/init.d/run-puma +18 -0
- data/tools/jungle/rc.d/README.md +74 -0
- data/tools/jungle/rc.d/puma +61 -0
- data/tools/jungle/rc.d/puma.conf +10 -0
- data/tools/jungle/upstart/README.md +61 -0
- data/tools/jungle/upstart/puma-manager.conf +31 -0
- data/tools/jungle/upstart/puma.conf +69 -0
- data/tools/trickletest.rb +44 -0
- metadata +147 -0
data/lib/puma/cluster.rb
ADDED
@@ -0,0 +1,554 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'puma/runner'
|
4
|
+
require 'puma/util'
|
5
|
+
require 'puma/plugin'
|
6
|
+
|
7
|
+
require 'time'
|
8
|
+
|
9
|
+
module Puma
|
10
|
+
# This class is instantiated by the `Puma::Launcher` and used
|
11
|
+
# to boot and serve a Ruby application when puma "workers" are needed
|
12
|
+
# i.e. when using multi-processes. For example `$ puma -w 5`
|
13
|
+
#
|
14
|
+
# At the core of this class is running an instance of `Puma::Server` which
|
15
|
+
# gets created via the `start_server` method from the `Puma::Runner` class
|
16
|
+
# that this inherits from.
|
17
|
+
#
|
18
|
+
# An instance of this class will spawn the number of processes passed in
|
19
|
+
# via the `spawn_workers` method call. Each worker will have it's own
|
20
|
+
# instance of a `Puma::Server`.
|
21
|
+
class Cluster < Runner
|
22
|
+
def initialize(cli, events)
|
23
|
+
super cli, events
|
24
|
+
|
25
|
+
@phase = 0
|
26
|
+
@workers = []
|
27
|
+
@next_check = nil
|
28
|
+
|
29
|
+
@phased_state = :idle
|
30
|
+
@phased_restart = false
|
31
|
+
end
|
32
|
+
|
33
|
+
def stop_workers
|
34
|
+
log "- Gracefully shutting down workers..."
|
35
|
+
@workers.each { |x| x.term }
|
36
|
+
|
37
|
+
begin
|
38
|
+
loop do
|
39
|
+
wait_workers
|
40
|
+
break if @workers.empty?
|
41
|
+
sleep 0.2
|
42
|
+
end
|
43
|
+
rescue Interrupt
|
44
|
+
log "! Cancelled waiting for workers"
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def start_phased_restart
|
49
|
+
@phase += 1
|
50
|
+
log "- Starting phased worker restart, phase: #{@phase}"
|
51
|
+
|
52
|
+
# Be sure to change the directory again before loading
|
53
|
+
# the app. This way we can pick up new code.
|
54
|
+
dir = @launcher.restart_dir
|
55
|
+
log "+ Changing to #{dir}"
|
56
|
+
Dir.chdir dir
|
57
|
+
end
|
58
|
+
|
59
|
+
def redirect_io
|
60
|
+
super
|
61
|
+
|
62
|
+
@workers.each { |x| x.hup }
|
63
|
+
end
|
64
|
+
|
65
|
+
class Worker
|
66
|
+
def initialize(idx, pid, phase, options)
|
67
|
+
@index = idx
|
68
|
+
@pid = pid
|
69
|
+
@phase = phase
|
70
|
+
@stage = :started
|
71
|
+
@signal = "TERM"
|
72
|
+
@options = options
|
73
|
+
@first_term_sent = nil
|
74
|
+
@started_at = Time.now
|
75
|
+
@last_checkin = Time.now
|
76
|
+
@last_status = '{}'
|
77
|
+
@term = false
|
78
|
+
end
|
79
|
+
|
80
|
+
attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status, :started_at
|
81
|
+
|
82
|
+
def booted?
|
83
|
+
@stage == :booted
|
84
|
+
end
|
85
|
+
|
86
|
+
def boot!
|
87
|
+
@last_checkin = Time.now
|
88
|
+
@stage = :booted
|
89
|
+
end
|
90
|
+
|
91
|
+
def term?
|
92
|
+
@term
|
93
|
+
end
|
94
|
+
|
95
|
+
def ping!(status)
|
96
|
+
@last_checkin = Time.now
|
97
|
+
@last_status = status
|
98
|
+
end
|
99
|
+
|
100
|
+
def ping_timeout?(which)
|
101
|
+
Time.now - @last_checkin > which
|
102
|
+
end
|
103
|
+
|
104
|
+
def term
|
105
|
+
begin
|
106
|
+
if @first_term_sent && (Time.now - @first_term_sent) > @options[:worker_shutdown_timeout]
|
107
|
+
@signal = "KILL"
|
108
|
+
else
|
109
|
+
@term ||= true
|
110
|
+
@first_term_sent ||= Time.now
|
111
|
+
end
|
112
|
+
Process.kill @signal, @pid
|
113
|
+
rescue Errno::ESRCH
|
114
|
+
end
|
115
|
+
end
|
116
|
+
|
117
|
+
def kill
|
118
|
+
Process.kill "KILL", @pid
|
119
|
+
rescue Errno::ESRCH
|
120
|
+
end
|
121
|
+
|
122
|
+
def hup
|
123
|
+
Process.kill "HUP", @pid
|
124
|
+
rescue Errno::ESRCH
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
def spawn_workers
|
129
|
+
diff = @options[:workers] - @workers.size
|
130
|
+
return if diff < 1
|
131
|
+
|
132
|
+
master = Process.pid
|
133
|
+
|
134
|
+
diff.times do
|
135
|
+
idx = next_worker_index
|
136
|
+
@launcher.config.run_hooks :before_worker_fork, idx
|
137
|
+
|
138
|
+
pid = fork { worker(idx, master) }
|
139
|
+
if !pid
|
140
|
+
log "! Complete inability to spawn new workers detected"
|
141
|
+
log "! Seppuku is the only choice."
|
142
|
+
exit! 1
|
143
|
+
end
|
144
|
+
|
145
|
+
debug "Spawned worker: #{pid}"
|
146
|
+
@workers << Worker.new(idx, pid, @phase, @options)
|
147
|
+
|
148
|
+
@launcher.config.run_hooks :after_worker_fork, idx
|
149
|
+
end
|
150
|
+
|
151
|
+
if diff > 0
|
152
|
+
@phased_state = :idle
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
def cull_workers
|
157
|
+
diff = @workers.size - @options[:workers]
|
158
|
+
return if diff < 1
|
159
|
+
|
160
|
+
debug "Culling #{diff.inspect} workers"
|
161
|
+
|
162
|
+
workers_to_cull = @workers[-diff,diff]
|
163
|
+
debug "Workers to cull: #{workers_to_cull.inspect}"
|
164
|
+
|
165
|
+
workers_to_cull.each do |worker|
|
166
|
+
log "- Worker #{worker.index} (pid: #{worker.pid}) terminating"
|
167
|
+
worker.term
|
168
|
+
end
|
169
|
+
end
|
170
|
+
|
171
|
+
def next_worker_index
|
172
|
+
all_positions = 0...@options[:workers]
|
173
|
+
occupied_positions = @workers.map { |w| w.index }
|
174
|
+
available_positions = all_positions.to_a - occupied_positions
|
175
|
+
available_positions.first
|
176
|
+
end
|
177
|
+
|
178
|
+
def all_workers_booted?
|
179
|
+
@workers.count { |w| !w.booted? } == 0
|
180
|
+
end
|
181
|
+
|
182
|
+
def check_workers(force=false)
|
183
|
+
return if !force && @next_check && @next_check >= Time.now
|
184
|
+
|
185
|
+
@next_check = Time.now + Const::WORKER_CHECK_INTERVAL
|
186
|
+
|
187
|
+
any = false
|
188
|
+
|
189
|
+
@workers.each do |w|
|
190
|
+
next if !w.booted? && !w.ping_timeout?(@options[:worker_boot_timeout])
|
191
|
+
if w.ping_timeout?(@options[:worker_timeout])
|
192
|
+
log "! Terminating timed out worker: #{w.pid}"
|
193
|
+
w.kill
|
194
|
+
any = true
|
195
|
+
end
|
196
|
+
end
|
197
|
+
|
198
|
+
# If we killed any timed out workers, try to catch them
|
199
|
+
# during this loop by giving the kernel time to kill them.
|
200
|
+
sleep 1 if any
|
201
|
+
|
202
|
+
wait_workers
|
203
|
+
cull_workers
|
204
|
+
spawn_workers
|
205
|
+
|
206
|
+
if all_workers_booted?
|
207
|
+
# If we're running at proper capacity, check to see if
|
208
|
+
# we need to phase any workers out (which will restart
|
209
|
+
# in the right phase).
|
210
|
+
#
|
211
|
+
w = @workers.find { |x| x.phase != @phase }
|
212
|
+
|
213
|
+
if w
|
214
|
+
if @phased_state == :idle
|
215
|
+
@phased_state = :waiting
|
216
|
+
log "- Stopping #{w.pid} for phased upgrade..."
|
217
|
+
end
|
218
|
+
|
219
|
+
unless w.term?
|
220
|
+
w.term
|
221
|
+
log "- #{w.signal} sent to #{w.pid}..."
|
222
|
+
end
|
223
|
+
end
|
224
|
+
end
|
225
|
+
end
|
226
|
+
|
227
|
+
def wakeup!
|
228
|
+
return unless @wakeup
|
229
|
+
|
230
|
+
begin
|
231
|
+
@wakeup.write "!" unless @wakeup.closed?
|
232
|
+
rescue SystemCallError, IOError
|
233
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
234
|
+
end
|
235
|
+
end
|
236
|
+
|
237
|
+
def worker(index, master)
|
238
|
+
title = "puma: cluster worker #{index}: #{master}"
|
239
|
+
title += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty?
|
240
|
+
$0 = title
|
241
|
+
|
242
|
+
Signal.trap "SIGINT", "IGNORE"
|
243
|
+
|
244
|
+
@workers = []
|
245
|
+
@master_read.close
|
246
|
+
@suicide_pipe.close
|
247
|
+
|
248
|
+
Thread.new do
|
249
|
+
Puma.set_thread_name "worker check pipe"
|
250
|
+
IO.select [@check_pipe]
|
251
|
+
log "! Detected parent died, dying"
|
252
|
+
exit! 1
|
253
|
+
end
|
254
|
+
|
255
|
+
# If we're not running under a Bundler context, then
|
256
|
+
# report the info about the context we will be using
|
257
|
+
if !ENV['BUNDLE_GEMFILE']
|
258
|
+
if File.exist?("Gemfile")
|
259
|
+
log "+ Gemfile in context: #{File.expand_path("Gemfile")}"
|
260
|
+
elsif File.exist?("gems.rb")
|
261
|
+
log "+ Gemfile in context: #{File.expand_path("gems.rb")}"
|
262
|
+
end
|
263
|
+
end
|
264
|
+
|
265
|
+
# Invoke any worker boot hooks so they can get
|
266
|
+
# things in shape before booting the app.
|
267
|
+
@launcher.config.run_hooks :before_worker_boot, index
|
268
|
+
|
269
|
+
server = start_server
|
270
|
+
|
271
|
+
Signal.trap "SIGTERM" do
|
272
|
+
@worker_write << "e#{Process.pid}\n" rescue nil
|
273
|
+
server.stop
|
274
|
+
end
|
275
|
+
|
276
|
+
begin
|
277
|
+
@worker_write << "b#{Process.pid}\n"
|
278
|
+
rescue SystemCallError, IOError
|
279
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
280
|
+
STDERR.puts "Master seems to have exited, exiting."
|
281
|
+
return
|
282
|
+
end
|
283
|
+
|
284
|
+
Thread.new(@worker_write) do |io|
|
285
|
+
Puma.set_thread_name "stat payload"
|
286
|
+
base_payload = "p#{Process.pid}"
|
287
|
+
|
288
|
+
while true
|
289
|
+
sleep Const::WORKER_CHECK_INTERVAL
|
290
|
+
begin
|
291
|
+
b = server.backlog || 0
|
292
|
+
r = server.running || 0
|
293
|
+
t = server.pool_capacity || 0
|
294
|
+
m = server.max_threads || 0
|
295
|
+
payload = %Q!#{base_payload}{ "backlog":#{b}, "running":#{r}, "pool_capacity":#{t}, "max_threads": #{m} }\n!
|
296
|
+
io << payload
|
297
|
+
rescue IOError
|
298
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
299
|
+
break
|
300
|
+
end
|
301
|
+
end
|
302
|
+
end
|
303
|
+
|
304
|
+
server.run.join
|
305
|
+
|
306
|
+
# Invoke any worker shutdown hooks so they can prevent the worker
|
307
|
+
# exiting until any background operations are completed
|
308
|
+
@launcher.config.run_hooks :before_worker_shutdown, index
|
309
|
+
ensure
|
310
|
+
@worker_write << "t#{Process.pid}\n" rescue nil
|
311
|
+
@worker_write.close
|
312
|
+
end
|
313
|
+
|
314
|
+
def restart
|
315
|
+
@restart = true
|
316
|
+
stop
|
317
|
+
end
|
318
|
+
|
319
|
+
def phased_restart
|
320
|
+
return false if @options[:preload_app]
|
321
|
+
|
322
|
+
@phased_restart = true
|
323
|
+
wakeup!
|
324
|
+
|
325
|
+
true
|
326
|
+
end
|
327
|
+
|
328
|
+
def stop
|
329
|
+
@status = :stop
|
330
|
+
wakeup!
|
331
|
+
end
|
332
|
+
|
333
|
+
def stop_blocked
|
334
|
+
@status = :stop if @status == :run
|
335
|
+
wakeup!
|
336
|
+
@control.stop(true) if @control
|
337
|
+
Process.waitall
|
338
|
+
end
|
339
|
+
|
340
|
+
def halt
|
341
|
+
@status = :halt
|
342
|
+
wakeup!
|
343
|
+
end
|
344
|
+
|
345
|
+
def reload_worker_directory
|
346
|
+
dir = @launcher.restart_dir
|
347
|
+
log "+ Changing to #{dir}"
|
348
|
+
Dir.chdir dir
|
349
|
+
end
|
350
|
+
|
351
|
+
# Inside of a child process, this will return all zeroes, as @workers is only populated in
|
352
|
+
# the master process.
|
353
|
+
def stats
|
354
|
+
old_worker_count = @workers.count { |w| w.phase != @phase }
|
355
|
+
booted_worker_count = @workers.count { |w| w.booted? }
|
356
|
+
worker_status = '[' + @workers.map { |w| %Q!{ "started_at": "#{w.started_at.utc.iso8601}", "pid": #{w.pid}, "index": #{w.index}, "phase": #{w.phase}, "booted": #{w.booted?}, "last_checkin": "#{w.last_checkin.utc.iso8601}", "last_status": #{w.last_status} }!}.join(",") + ']'
|
357
|
+
%Q!{ "started_at": "#{@started_at.utc.iso8601}", "workers": #{@workers.size}, "phase": #{@phase}, "booted_workers": #{booted_worker_count}, "old_workers": #{old_worker_count}, "worker_status": #{worker_status} }!
|
358
|
+
end
|
359
|
+
|
360
|
+
def preload?
|
361
|
+
@options[:preload_app]
|
362
|
+
end
|
363
|
+
|
364
|
+
# We do this in a separate method to keep the lambda scope
|
365
|
+
# of the signals handlers as small as possible.
|
366
|
+
def setup_signals
|
367
|
+
Signal.trap "SIGCHLD" do
|
368
|
+
wakeup!
|
369
|
+
end
|
370
|
+
|
371
|
+
Signal.trap "TTIN" do
|
372
|
+
@options[:workers] += 1
|
373
|
+
wakeup!
|
374
|
+
end
|
375
|
+
|
376
|
+
Signal.trap "TTOU" do
|
377
|
+
@options[:workers] -= 1 if @options[:workers] >= 2
|
378
|
+
wakeup!
|
379
|
+
end
|
380
|
+
|
381
|
+
master_pid = Process.pid
|
382
|
+
|
383
|
+
Signal.trap "SIGTERM" do
|
384
|
+
# The worker installs their own SIGTERM when booted.
|
385
|
+
# Until then, this is run by the worker and the worker
|
386
|
+
# should just exit if they get it.
|
387
|
+
if Process.pid != master_pid
|
388
|
+
log "Early termination of worker"
|
389
|
+
exit! 0
|
390
|
+
else
|
391
|
+
@launcher.close_binder_listeners
|
392
|
+
|
393
|
+
stop_workers
|
394
|
+
stop
|
395
|
+
|
396
|
+
raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm]
|
397
|
+
exit 0 # Clean exit, workers were stopped
|
398
|
+
end
|
399
|
+
end
|
400
|
+
end
|
401
|
+
|
402
|
+
def run
|
403
|
+
@status = :run
|
404
|
+
|
405
|
+
output_header "cluster"
|
406
|
+
|
407
|
+
log "* Process workers: #{@options[:workers]}"
|
408
|
+
|
409
|
+
before = Thread.list
|
410
|
+
|
411
|
+
if preload?
|
412
|
+
log "* Preloading application"
|
413
|
+
load_and_bind
|
414
|
+
|
415
|
+
after = Thread.list
|
416
|
+
|
417
|
+
if after.size > before.size
|
418
|
+
threads = (after - before)
|
419
|
+
if threads.first.respond_to? :backtrace
|
420
|
+
log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot:"
|
421
|
+
threads.each do |t|
|
422
|
+
log "! #{t.inspect} - #{t.backtrace ? t.backtrace.first : ''}"
|
423
|
+
end
|
424
|
+
else
|
425
|
+
log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot"
|
426
|
+
end
|
427
|
+
end
|
428
|
+
else
|
429
|
+
log "* Phased restart available"
|
430
|
+
|
431
|
+
unless @launcher.config.app_configured?
|
432
|
+
error "No application configured, nothing to run"
|
433
|
+
exit 1
|
434
|
+
end
|
435
|
+
|
436
|
+
@launcher.binder.parse @options[:binds], self
|
437
|
+
end
|
438
|
+
|
439
|
+
read, @wakeup = Puma::Util.pipe
|
440
|
+
|
441
|
+
setup_signals
|
442
|
+
|
443
|
+
# Used by the workers to detect if the master process dies.
|
444
|
+
# If select says that @check_pipe is ready, it's because the
|
445
|
+
# master has exited and @suicide_pipe has been automatically
|
446
|
+
# closed.
|
447
|
+
#
|
448
|
+
@check_pipe, @suicide_pipe = Puma::Util.pipe
|
449
|
+
|
450
|
+
if daemon?
|
451
|
+
log "* Daemonizing..."
|
452
|
+
Process.daemon(true)
|
453
|
+
else
|
454
|
+
log "Use Ctrl-C to stop"
|
455
|
+
end
|
456
|
+
|
457
|
+
redirect_io
|
458
|
+
|
459
|
+
Plugins.fire_background
|
460
|
+
|
461
|
+
@launcher.write_state
|
462
|
+
|
463
|
+
start_control
|
464
|
+
|
465
|
+
@master_read, @worker_write = read, @wakeup
|
466
|
+
|
467
|
+
@launcher.config.run_hooks :before_fork, nil
|
468
|
+
|
469
|
+
spawn_workers
|
470
|
+
|
471
|
+
Signal.trap "SIGINT" do
|
472
|
+
stop
|
473
|
+
end
|
474
|
+
|
475
|
+
@launcher.events.fire_on_booted!
|
476
|
+
|
477
|
+
begin
|
478
|
+
force_check = false
|
479
|
+
|
480
|
+
while @status == :run
|
481
|
+
begin
|
482
|
+
if @phased_restart
|
483
|
+
start_phased_restart
|
484
|
+
@phased_restart = false
|
485
|
+
end
|
486
|
+
|
487
|
+
check_workers force_check
|
488
|
+
|
489
|
+
force_check = false
|
490
|
+
|
491
|
+
res = IO.select([read], nil, nil, Const::WORKER_CHECK_INTERVAL)
|
492
|
+
|
493
|
+
if res
|
494
|
+
req = read.read_nonblock(1)
|
495
|
+
|
496
|
+
next if !req || req == "!"
|
497
|
+
|
498
|
+
result = read.gets
|
499
|
+
pid = result.to_i
|
500
|
+
|
501
|
+
if w = @workers.find { |x| x.pid == pid }
|
502
|
+
case req
|
503
|
+
when "b"
|
504
|
+
w.boot!
|
505
|
+
log "- Worker #{w.index} (pid: #{pid}) booted, phase: #{w.phase}"
|
506
|
+
force_check = true
|
507
|
+
when "e"
|
508
|
+
# external term, see worker method, Signal.trap "SIGTERM"
|
509
|
+
w.instance_variable_set :@term, true
|
510
|
+
when "t"
|
511
|
+
w.term unless w.term?
|
512
|
+
force_check = true
|
513
|
+
when "p"
|
514
|
+
w.ping!(result.sub(/^\d+/,'').chomp)
|
515
|
+
end
|
516
|
+
else
|
517
|
+
log "! Out-of-sync worker list, no #{pid} worker"
|
518
|
+
end
|
519
|
+
end
|
520
|
+
|
521
|
+
rescue Interrupt
|
522
|
+
@status = :stop
|
523
|
+
end
|
524
|
+
end
|
525
|
+
|
526
|
+
stop_workers unless @status == :halt
|
527
|
+
ensure
|
528
|
+
@check_pipe.close
|
529
|
+
@suicide_pipe.close
|
530
|
+
read.close
|
531
|
+
@wakeup.close
|
532
|
+
end
|
533
|
+
end
|
534
|
+
|
535
|
+
private
|
536
|
+
|
537
|
+
# loops thru @workers, removing workers that exited, and calling
|
538
|
+
# `#term` if needed
|
539
|
+
def wait_workers
|
540
|
+
@workers.reject! do |w|
|
541
|
+
begin
|
542
|
+
if Process.wait(w.pid, Process::WNOHANG)
|
543
|
+
true
|
544
|
+
else
|
545
|
+
w.term if w.term?
|
546
|
+
nil
|
547
|
+
end
|
548
|
+
rescue Errno::ECHILD
|
549
|
+
true # child is already terminated
|
550
|
+
end
|
551
|
+
end
|
552
|
+
end
|
553
|
+
end
|
554
|
+
end
|