puma 2.0.0.b5 → 5.0.0.beta1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +7 -0
- data/History.md +1598 -0
- data/LICENSE +23 -20
- data/README.md +222 -62
- data/bin/puma-wild +31 -0
- data/bin/pumactl +1 -1
- data/docs/architecture.md +37 -0
- data/docs/deployment.md +113 -0
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +13 -0
- data/docs/jungle/rc.d/README.md +74 -0
- data/docs/jungle/rc.d/puma +61 -0
- data/docs/jungle/rc.d/puma.conf +10 -0
- data/docs/jungle/upstart/README.md +61 -0
- data/docs/jungle/upstart/puma-manager.conf +31 -0
- data/docs/jungle/upstart/puma.conf +69 -0
- data/docs/nginx.md +5 -10
- data/docs/plugins.md +38 -0
- data/docs/restart.md +41 -0
- data/docs/signals.md +97 -0
- data/docs/systemd.md +228 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/extconf.rb +23 -2
- data/ext/puma_http11/http11_parser.c +301 -482
- data/ext/puma_http11/http11_parser.h +13 -11
- data/ext/puma_http11/http11_parser.java.rl +26 -42
- data/ext/puma_http11/http11_parser.rl +22 -21
- data/ext/puma_http11/http11_parser_common.rl +5 -5
- data/ext/puma_http11/mini_ssl.c +377 -18
- data/ext/puma_http11/org/jruby/puma/Http11.java +108 -107
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +137 -170
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +265 -191
- data/ext/puma_http11/puma_http11.c +57 -81
- data/lib/puma.rb +25 -4
- data/lib/puma/accept_nonblock.rb +7 -1
- data/lib/puma/app/status.rb +61 -24
- data/lib/puma/binder.rb +212 -78
- data/lib/puma/cli.rb +149 -644
- data/lib/puma/client.rb +316 -65
- data/lib/puma/cluster.rb +659 -0
- data/lib/puma/commonlogger.rb +108 -0
- data/lib/puma/configuration.rb +279 -180
- data/lib/puma/const.rb +126 -39
- data/lib/puma/control_cli.rb +183 -96
- data/lib/puma/detect.rb +20 -1
- data/lib/puma/dsl.rb +776 -0
- data/lib/puma/events.rb +91 -23
- data/lib/puma/io_buffer.rb +9 -5
- data/lib/puma/jruby_restart.rb +9 -5
- data/lib/puma/launcher.rb +487 -0
- data/lib/puma/minissl.rb +239 -93
- data/lib/puma/minissl/context_builder.rb +76 -0
- data/lib/puma/null_io.rb +22 -12
- data/lib/puma/plugin.rb +111 -0
- data/lib/puma/plugin/tmp_restart.rb +36 -0
- data/lib/puma/rack/builder.rb +297 -0
- data/lib/puma/rack/urlmap.rb +93 -0
- data/lib/puma/rack_default.rb +9 -0
- data/lib/puma/reactor.rb +290 -43
- data/lib/puma/runner.rb +163 -0
- data/lib/puma/server.rb +493 -126
- data/lib/puma/single.rb +66 -0
- data/lib/puma/state_file.rb +34 -0
- data/lib/puma/thread_pool.rb +228 -47
- data/lib/puma/util.rb +115 -0
- data/lib/rack/handler/puma.rb +78 -31
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +44 -0
- metadata +60 -155
- data/COPYING +0 -55
- data/Gemfile +0 -8
- data/History.txt +0 -196
- data/Manifest.txt +0 -56
- data/Rakefile +0 -121
- data/TODO +0 -5
- data/docs/config.md +0 -0
- data/ext/puma_http11/io_buffer.c +0 -154
- data/lib/puma/capistrano.rb +0 -26
- data/lib/puma/compat.rb +0 -11
- data/lib/puma/daemon_ext.rb +0 -20
- data/lib/puma/delegation.rb +0 -11
- data/lib/puma/java_io_buffer.rb +0 -45
- data/lib/puma/rack_patch.rb +0 -25
- data/puma.gemspec +0 -45
- data/test/test_app_status.rb +0 -88
- data/test/test_cli.rb +0 -171
- data/test/test_config.rb +0 -16
- data/test/test_http10.rb +0 -27
- data/test/test_http11.rb +0 -126
- data/test/test_integration.rb +0 -150
- data/test/test_iobuffer.rb +0 -38
- data/test/test_minissl.rb +0 -22
- data/test/test_null_io.rb +0 -31
- data/test/test_persistent.rb +0 -238
- data/test/test_puma_server.rb +0 -128
- data/test/test_rack_handler.rb +0 -10
- data/test/test_rack_server.rb +0 -141
- data/test/test_thread_pool.rb +0 -146
- data/test/test_unix_socket.rb +0 -39
- data/test/test_ws.rb +0 -89
- data/tools/jungle/README.md +0 -54
- data/tools/jungle/puma +0 -332
- data/tools/jungle/run-puma +0 -3
data/lib/puma/cluster.rb
ADDED
@@ -0,0 +1,659 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require 'puma/runner'
|
4
|
+
require 'puma/util'
|
5
|
+
require 'puma/plugin'
|
6
|
+
|
7
|
+
require 'time'
|
8
|
+
require 'json'
|
9
|
+
|
10
|
+
module Puma
|
11
|
+
# This class is instantiated by the `Puma::Launcher` and used
|
12
|
+
# to boot and serve a Ruby application when puma "workers" are needed
|
13
|
+
# i.e. when using multi-processes. For example `$ puma -w 5`
|
14
|
+
#
|
15
|
+
# At the core of this class is running an instance of `Puma::Server` which
|
16
|
+
# gets created via the `start_server` method from the `Puma::Runner` class
|
17
|
+
# that this inherits from.
|
18
|
+
#
|
19
|
+
# An instance of this class will spawn the number of processes passed in
|
20
|
+
# via the `spawn_workers` method call. Each worker will have it's own
|
21
|
+
# instance of a `Puma::Server`.
|
22
|
+
class Cluster < Runner
|
23
|
+
def initialize(cli, events)
|
24
|
+
super cli, events
|
25
|
+
|
26
|
+
@phase = 0
|
27
|
+
@workers = []
|
28
|
+
@next_check = Time.now
|
29
|
+
|
30
|
+
@phased_restart = false
|
31
|
+
end
|
32
|
+
|
33
|
+
def stop_workers
|
34
|
+
log "- Gracefully shutting down workers..."
|
35
|
+
@workers.each { |x| x.term }
|
36
|
+
|
37
|
+
begin
|
38
|
+
loop do
|
39
|
+
wait_workers
|
40
|
+
break if @workers.reject {|w| w.pid.nil?}.empty?
|
41
|
+
sleep 0.2
|
42
|
+
end
|
43
|
+
rescue Interrupt
|
44
|
+
log "! Cancelled waiting for workers"
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def start_phased_restart
|
49
|
+
@phase += 1
|
50
|
+
log "- Starting phased worker restart, phase: #{@phase}"
|
51
|
+
|
52
|
+
# Be sure to change the directory again before loading
|
53
|
+
# the app. This way we can pick up new code.
|
54
|
+
dir = @launcher.restart_dir
|
55
|
+
log "+ Changing to #{dir}"
|
56
|
+
Dir.chdir dir
|
57
|
+
end
|
58
|
+
|
59
|
+
def redirect_io
|
60
|
+
super
|
61
|
+
|
62
|
+
@workers.each { |x| x.hup }
|
63
|
+
end
|
64
|
+
|
65
|
+
class Worker
|
66
|
+
def initialize(idx, pid, phase, options)
|
67
|
+
@index = idx
|
68
|
+
@pid = pid
|
69
|
+
@phase = phase
|
70
|
+
@stage = :started
|
71
|
+
@signal = "TERM"
|
72
|
+
@options = options
|
73
|
+
@first_term_sent = nil
|
74
|
+
@started_at = Time.now
|
75
|
+
@last_checkin = Time.now
|
76
|
+
@last_status = {}
|
77
|
+
@term = false
|
78
|
+
end
|
79
|
+
|
80
|
+
attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status, :started_at
|
81
|
+
attr_writer :pid, :phase
|
82
|
+
|
83
|
+
def booted?
|
84
|
+
@stage == :booted
|
85
|
+
end
|
86
|
+
|
87
|
+
def boot!
|
88
|
+
@last_checkin = Time.now
|
89
|
+
@stage = :booted
|
90
|
+
end
|
91
|
+
|
92
|
+
def term?
|
93
|
+
@term
|
94
|
+
end
|
95
|
+
|
96
|
+
def ping!(status)
|
97
|
+
@last_checkin = Time.now
|
98
|
+
@last_status = JSON.parse(status, symbolize_names: true)
|
99
|
+
end
|
100
|
+
|
101
|
+
def ping_timeout
|
102
|
+
@last_checkin +
|
103
|
+
(booted? ?
|
104
|
+
@options[:worker_timeout] :
|
105
|
+
@options[:worker_boot_timeout]
|
106
|
+
)
|
107
|
+
end
|
108
|
+
|
109
|
+
def term
|
110
|
+
begin
|
111
|
+
if @first_term_sent && (Time.now - @first_term_sent) > @options[:worker_shutdown_timeout]
|
112
|
+
@signal = "KILL"
|
113
|
+
else
|
114
|
+
@term ||= true
|
115
|
+
@first_term_sent ||= Time.now
|
116
|
+
end
|
117
|
+
Process.kill @signal, @pid if @pid
|
118
|
+
rescue Errno::ESRCH
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def kill
|
123
|
+
@signal = 'KILL'
|
124
|
+
term
|
125
|
+
end
|
126
|
+
|
127
|
+
def hup
|
128
|
+
Process.kill "HUP", @pid
|
129
|
+
rescue Errno::ESRCH
|
130
|
+
end
|
131
|
+
end
|
132
|
+
|
133
|
+
def spawn_workers
|
134
|
+
diff = @options[:workers] - @workers.size
|
135
|
+
return if diff < 1
|
136
|
+
|
137
|
+
master = Process.pid
|
138
|
+
if @options[:fork_worker]
|
139
|
+
@fork_writer << "-1\n"
|
140
|
+
end
|
141
|
+
|
142
|
+
diff.times do
|
143
|
+
idx = next_worker_index
|
144
|
+
|
145
|
+
if @options[:fork_worker] && idx != 0
|
146
|
+
@fork_writer << "#{idx}\n"
|
147
|
+
pid = nil
|
148
|
+
else
|
149
|
+
pid = spawn_worker(idx, master)
|
150
|
+
end
|
151
|
+
|
152
|
+
debug "Spawned worker: #{pid}"
|
153
|
+
@workers << Worker.new(idx, pid, @phase, @options)
|
154
|
+
end
|
155
|
+
|
156
|
+
if @options[:fork_worker] &&
|
157
|
+
@workers.all? {|x| x.phase == @phase}
|
158
|
+
|
159
|
+
@fork_writer << "0\n"
|
160
|
+
end
|
161
|
+
end
|
162
|
+
|
163
|
+
def spawn_worker(idx, master)
|
164
|
+
@launcher.config.run_hooks :before_worker_fork, idx, @launcher.events
|
165
|
+
|
166
|
+
pid = fork { worker(idx, master) }
|
167
|
+
if !pid
|
168
|
+
log "! Complete inability to spawn new workers detected"
|
169
|
+
log "! Seppuku is the only choice."
|
170
|
+
exit! 1
|
171
|
+
end
|
172
|
+
|
173
|
+
@launcher.config.run_hooks :after_worker_fork, idx, @launcher.events
|
174
|
+
pid
|
175
|
+
end
|
176
|
+
|
177
|
+
def cull_workers
|
178
|
+
diff = @workers.size - @options[:workers]
|
179
|
+
return if diff < 1
|
180
|
+
|
181
|
+
debug "Culling #{diff.inspect} workers"
|
182
|
+
|
183
|
+
workers_to_cull = @workers[-diff,diff]
|
184
|
+
debug "Workers to cull: #{workers_to_cull.inspect}"
|
185
|
+
|
186
|
+
workers_to_cull.each do |worker|
|
187
|
+
log "- Worker #{worker.index} (pid: #{worker.pid}) terminating"
|
188
|
+
worker.term
|
189
|
+
end
|
190
|
+
end
|
191
|
+
|
192
|
+
def next_worker_index
|
193
|
+
all_positions = 0...@options[:workers]
|
194
|
+
occupied_positions = @workers.map { |w| w.index }
|
195
|
+
available_positions = all_positions.to_a - occupied_positions
|
196
|
+
available_positions.first
|
197
|
+
end
|
198
|
+
|
199
|
+
def all_workers_booted?
|
200
|
+
@workers.count { |w| !w.booted? } == 0
|
201
|
+
end
|
202
|
+
|
203
|
+
def check_workers
|
204
|
+
return if @next_check >= Time.now
|
205
|
+
|
206
|
+
@next_check = Time.now + Const::WORKER_CHECK_INTERVAL
|
207
|
+
|
208
|
+
timeout_workers
|
209
|
+
wait_workers
|
210
|
+
cull_workers
|
211
|
+
spawn_workers
|
212
|
+
|
213
|
+
if all_workers_booted?
|
214
|
+
# If we're running at proper capacity, check to see if
|
215
|
+
# we need to phase any workers out (which will restart
|
216
|
+
# in the right phase).
|
217
|
+
#
|
218
|
+
w = @workers.find { |x| x.phase != @phase }
|
219
|
+
|
220
|
+
if w
|
221
|
+
log "- Stopping #{w.pid} for phased upgrade..."
|
222
|
+
unless w.term?
|
223
|
+
w.term
|
224
|
+
log "- #{w.signal} sent to #{w.pid}..."
|
225
|
+
end
|
226
|
+
end
|
227
|
+
end
|
228
|
+
|
229
|
+
@next_check = [
|
230
|
+
@workers.reject(&:term?).map(&:ping_timeout).min,
|
231
|
+
@next_check
|
232
|
+
].compact.min
|
233
|
+
end
|
234
|
+
|
235
|
+
def wakeup!
|
236
|
+
return unless @wakeup
|
237
|
+
|
238
|
+
begin
|
239
|
+
@wakeup.write "!" unless @wakeup.closed?
|
240
|
+
rescue SystemCallError, IOError
|
241
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
242
|
+
end
|
243
|
+
end
|
244
|
+
|
245
|
+
def worker(index, master)
|
246
|
+
title = "puma: cluster worker #{index}: #{master}"
|
247
|
+
title += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty?
|
248
|
+
$0 = title
|
249
|
+
|
250
|
+
Signal.trap "SIGINT", "IGNORE"
|
251
|
+
|
252
|
+
fork_worker = @options[:fork_worker] && index == 0
|
253
|
+
|
254
|
+
@workers = []
|
255
|
+
if !@options[:fork_worker] || fork_worker
|
256
|
+
@master_read.close
|
257
|
+
@suicide_pipe.close
|
258
|
+
@fork_writer.close
|
259
|
+
end
|
260
|
+
|
261
|
+
Thread.new do
|
262
|
+
Puma.set_thread_name "worker check pipe"
|
263
|
+
IO.select [@check_pipe]
|
264
|
+
log "! Detected parent died, dying"
|
265
|
+
exit! 1
|
266
|
+
end
|
267
|
+
|
268
|
+
# If we're not running under a Bundler context, then
|
269
|
+
# report the info about the context we will be using
|
270
|
+
if !ENV['BUNDLE_GEMFILE']
|
271
|
+
if File.exist?("Gemfile")
|
272
|
+
log "+ Gemfile in context: #{File.expand_path("Gemfile")}"
|
273
|
+
elsif File.exist?("gems.rb")
|
274
|
+
log "+ Gemfile in context: #{File.expand_path("gems.rb")}"
|
275
|
+
end
|
276
|
+
end
|
277
|
+
|
278
|
+
# Invoke any worker boot hooks so they can get
|
279
|
+
# things in shape before booting the app.
|
280
|
+
@launcher.config.run_hooks :before_worker_boot, index, @launcher.events
|
281
|
+
|
282
|
+
server = @server ||= start_server
|
283
|
+
restart_server = Queue.new << true << false
|
284
|
+
|
285
|
+
if fork_worker
|
286
|
+
restart_server.clear
|
287
|
+
Signal.trap "SIGCHLD" do
|
288
|
+
Process.wait(-1, Process::WNOHANG) rescue nil
|
289
|
+
wakeup!
|
290
|
+
end
|
291
|
+
|
292
|
+
Thread.new do
|
293
|
+
Puma.set_thread_name "worker fork pipe"
|
294
|
+
while (idx = @fork_pipe.gets)
|
295
|
+
idx = idx.to_i
|
296
|
+
if idx == -1 # stop server
|
297
|
+
if restart_server.length > 0
|
298
|
+
restart_server.clear
|
299
|
+
server.begin_restart(true)
|
300
|
+
@launcher.config.run_hooks :before_refork, nil, @launcher.events
|
301
|
+
nakayoshi_gc
|
302
|
+
end
|
303
|
+
elsif idx == 0 # restart server
|
304
|
+
restart_server << true << false
|
305
|
+
else # fork worker
|
306
|
+
pid = spawn_worker(idx, master)
|
307
|
+
@worker_write << "f#{pid}:#{idx}\n" rescue nil
|
308
|
+
end
|
309
|
+
end
|
310
|
+
end
|
311
|
+
end
|
312
|
+
|
313
|
+
Signal.trap "SIGTERM" do
|
314
|
+
@worker_write << "e#{Process.pid}\n" rescue nil
|
315
|
+
server.stop
|
316
|
+
restart_server << false
|
317
|
+
end
|
318
|
+
|
319
|
+
begin
|
320
|
+
@worker_write << "b#{Process.pid}:#{index}\n"
|
321
|
+
rescue SystemCallError, IOError
|
322
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
323
|
+
STDERR.puts "Master seems to have exited, exiting."
|
324
|
+
return
|
325
|
+
end
|
326
|
+
|
327
|
+
Thread.new(@worker_write) do |io|
|
328
|
+
Puma.set_thread_name "stat payload"
|
329
|
+
|
330
|
+
while true
|
331
|
+
sleep Const::WORKER_CHECK_INTERVAL
|
332
|
+
begin
|
333
|
+
io << "p#{Process.pid}#{server.stats.to_json}\n"
|
334
|
+
rescue IOError
|
335
|
+
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
336
|
+
break
|
337
|
+
end
|
338
|
+
end
|
339
|
+
end
|
340
|
+
|
341
|
+
server.run.join while restart_server.pop
|
342
|
+
|
343
|
+
# Invoke any worker shutdown hooks so they can prevent the worker
|
344
|
+
# exiting until any background operations are completed
|
345
|
+
@launcher.config.run_hooks :before_worker_shutdown, index, @launcher.events
|
346
|
+
ensure
|
347
|
+
@worker_write << "t#{Process.pid}\n" rescue nil
|
348
|
+
@worker_write.close
|
349
|
+
end
|
350
|
+
|
351
|
+
def restart
|
352
|
+
@restart = true
|
353
|
+
stop
|
354
|
+
end
|
355
|
+
|
356
|
+
def phased_restart
|
357
|
+
return false if @options[:preload_app]
|
358
|
+
|
359
|
+
@phased_restart = true
|
360
|
+
wakeup!
|
361
|
+
|
362
|
+
true
|
363
|
+
end
|
364
|
+
|
365
|
+
def stop
|
366
|
+
@status = :stop
|
367
|
+
wakeup!
|
368
|
+
end
|
369
|
+
|
370
|
+
def stop_blocked
|
371
|
+
@status = :stop if @status == :run
|
372
|
+
wakeup!
|
373
|
+
@control.stop(true) if @control
|
374
|
+
Process.waitall
|
375
|
+
end
|
376
|
+
|
377
|
+
def halt
|
378
|
+
@status = :halt
|
379
|
+
wakeup!
|
380
|
+
end
|
381
|
+
|
382
|
+
def reload_worker_directory
|
383
|
+
dir = @launcher.restart_dir
|
384
|
+
log "+ Changing to #{dir}"
|
385
|
+
Dir.chdir dir
|
386
|
+
end
|
387
|
+
|
388
|
+
# Inside of a child process, this will return all zeroes, as @workers is only populated in
|
389
|
+
# the master process.
|
390
|
+
def stats
|
391
|
+
old_worker_count = @workers.count { |w| w.phase != @phase }
|
392
|
+
worker_status = @workers.map do |w|
|
393
|
+
{
|
394
|
+
started_at: w.started_at.utc.iso8601,
|
395
|
+
pid: w.pid,
|
396
|
+
index: w.index,
|
397
|
+
phase: w.phase,
|
398
|
+
booted: w.booted?,
|
399
|
+
last_checkin: w.last_checkin.utc.iso8601,
|
400
|
+
last_status: w.last_status,
|
401
|
+
}
|
402
|
+
end
|
403
|
+
|
404
|
+
{
|
405
|
+
started_at: @started_at.utc.iso8601,
|
406
|
+
workers: @workers.size,
|
407
|
+
phase: @phase,
|
408
|
+
booted_workers: worker_status.count { |w| w[:booted] },
|
409
|
+
old_workers: old_worker_count,
|
410
|
+
worker_status: worker_status,
|
411
|
+
}
|
412
|
+
end
|
413
|
+
|
414
|
+
def preload?
|
415
|
+
@options[:preload_app]
|
416
|
+
end
|
417
|
+
|
418
|
+
def fork_worker!
|
419
|
+
if (worker = @workers.find { |w| w.index == 0 })
|
420
|
+
worker.phase += 1
|
421
|
+
end
|
422
|
+
phased_restart
|
423
|
+
end
|
424
|
+
|
425
|
+
# We do this in a separate method to keep the lambda scope
|
426
|
+
# of the signals handlers as small as possible.
|
427
|
+
def setup_signals
|
428
|
+
if @options[:fork_worker]
|
429
|
+
Signal.trap "SIGURG" do
|
430
|
+
fork_worker!
|
431
|
+
end
|
432
|
+
|
433
|
+
# Auto-fork after the specified number of requests.
|
434
|
+
if (fork_requests = @options[:fork_worker].to_i) > 0
|
435
|
+
@launcher.events.register(:ping!) do |w|
|
436
|
+
fork_worker! if w.index == 0 &&
|
437
|
+
w.phase == 0 &&
|
438
|
+
w.last_status[:requests_count] >= fork_requests
|
439
|
+
end
|
440
|
+
end
|
441
|
+
end
|
442
|
+
|
443
|
+
Signal.trap "SIGCHLD" do
|
444
|
+
wakeup!
|
445
|
+
end
|
446
|
+
|
447
|
+
Signal.trap "TTIN" do
|
448
|
+
@options[:workers] += 1
|
449
|
+
wakeup!
|
450
|
+
end
|
451
|
+
|
452
|
+
Signal.trap "TTOU" do
|
453
|
+
@options[:workers] -= 1 if @options[:workers] >= 2
|
454
|
+
wakeup!
|
455
|
+
end
|
456
|
+
|
457
|
+
master_pid = Process.pid
|
458
|
+
|
459
|
+
Signal.trap "SIGTERM" do
|
460
|
+
# The worker installs their own SIGTERM when booted.
|
461
|
+
# Until then, this is run by the worker and the worker
|
462
|
+
# should just exit if they get it.
|
463
|
+
if Process.pid != master_pid
|
464
|
+
log "Early termination of worker"
|
465
|
+
exit! 0
|
466
|
+
else
|
467
|
+
@launcher.close_binder_listeners
|
468
|
+
|
469
|
+
stop_workers
|
470
|
+
stop
|
471
|
+
|
472
|
+
raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm]
|
473
|
+
exit 0 # Clean exit, workers were stopped
|
474
|
+
end
|
475
|
+
end
|
476
|
+
end
|
477
|
+
|
478
|
+
def run
|
479
|
+
@status = :run
|
480
|
+
|
481
|
+
output_header "cluster"
|
482
|
+
|
483
|
+
log "* Process workers: #{@options[:workers]}"
|
484
|
+
|
485
|
+
before = Thread.list
|
486
|
+
|
487
|
+
if preload?
|
488
|
+
log "* Preloading application"
|
489
|
+
load_and_bind
|
490
|
+
|
491
|
+
after = Thread.list
|
492
|
+
|
493
|
+
if after.size > before.size
|
494
|
+
threads = (after - before)
|
495
|
+
if threads.first.respond_to? :backtrace
|
496
|
+
log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot:"
|
497
|
+
threads.each do |t|
|
498
|
+
log "! #{t.inspect} - #{t.backtrace ? t.backtrace.first : ''}"
|
499
|
+
end
|
500
|
+
else
|
501
|
+
log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot"
|
502
|
+
end
|
503
|
+
end
|
504
|
+
else
|
505
|
+
log "* Phased restart available"
|
506
|
+
|
507
|
+
unless @launcher.config.app_configured?
|
508
|
+
error "No application configured, nothing to run"
|
509
|
+
exit 1
|
510
|
+
end
|
511
|
+
|
512
|
+
@launcher.binder.parse @options[:binds], self
|
513
|
+
end
|
514
|
+
|
515
|
+
read, @wakeup = Puma::Util.pipe
|
516
|
+
|
517
|
+
setup_signals
|
518
|
+
|
519
|
+
# Used by the workers to detect if the master process dies.
|
520
|
+
# If select says that @check_pipe is ready, it's because the
|
521
|
+
# master has exited and @suicide_pipe has been automatically
|
522
|
+
# closed.
|
523
|
+
#
|
524
|
+
@check_pipe, @suicide_pipe = Puma::Util.pipe
|
525
|
+
|
526
|
+
# Separate pipe used by worker 0 to receive commands to
|
527
|
+
# fork new worker processes.
|
528
|
+
@fork_pipe, @fork_writer = Puma::Util.pipe
|
529
|
+
|
530
|
+
log "Use Ctrl-C to stop"
|
531
|
+
|
532
|
+
redirect_io
|
533
|
+
|
534
|
+
Plugins.fire_background
|
535
|
+
|
536
|
+
@launcher.write_state
|
537
|
+
|
538
|
+
start_control
|
539
|
+
|
540
|
+
@master_read, @worker_write = read, @wakeup
|
541
|
+
|
542
|
+
@launcher.config.run_hooks :before_fork, nil, @launcher.events
|
543
|
+
nakayoshi_gc
|
544
|
+
|
545
|
+
spawn_workers
|
546
|
+
|
547
|
+
Signal.trap "SIGINT" do
|
548
|
+
stop
|
549
|
+
end
|
550
|
+
|
551
|
+
@launcher.events.fire_on_booted!
|
552
|
+
|
553
|
+
begin
|
554
|
+
while @status == :run
|
555
|
+
begin
|
556
|
+
if @phased_restart
|
557
|
+
start_phased_restart
|
558
|
+
@phased_restart = false
|
559
|
+
end
|
560
|
+
|
561
|
+
check_workers
|
562
|
+
|
563
|
+
res = IO.select([read], nil, nil, [0, @next_check - Time.now].max)
|
564
|
+
|
565
|
+
if res
|
566
|
+
req = read.read_nonblock(1)
|
567
|
+
|
568
|
+
@next_check = Time.now if req == "!"
|
569
|
+
next if !req || req == "!"
|
570
|
+
|
571
|
+
result = read.gets
|
572
|
+
pid = result.to_i
|
573
|
+
|
574
|
+
if req == "b" || req == "f"
|
575
|
+
pid, idx = result.split(':').map(&:to_i)
|
576
|
+
w = @workers.find {|x| x.index == idx}
|
577
|
+
w.pid = pid if w.pid.nil?
|
578
|
+
end
|
579
|
+
|
580
|
+
if w = @workers.find { |x| x.pid == pid }
|
581
|
+
case req
|
582
|
+
when "b"
|
583
|
+
w.boot!
|
584
|
+
log "- Worker #{w.index} (pid: #{pid}) booted, phase: #{w.phase}"
|
585
|
+
@next_check = Time.now
|
586
|
+
when "e"
|
587
|
+
# external term, see worker method, Signal.trap "SIGTERM"
|
588
|
+
w.instance_variable_set :@term, true
|
589
|
+
when "t"
|
590
|
+
w.term unless w.term?
|
591
|
+
when "p"
|
592
|
+
w.ping!(result.sub(/^\d+/,'').chomp)
|
593
|
+
@launcher.events.fire(:ping!, w)
|
594
|
+
end
|
595
|
+
else
|
596
|
+
log "! Out-of-sync worker list, no #{pid} worker"
|
597
|
+
end
|
598
|
+
end
|
599
|
+
|
600
|
+
rescue Interrupt
|
601
|
+
@status = :stop
|
602
|
+
end
|
603
|
+
end
|
604
|
+
|
605
|
+
stop_workers unless @status == :halt
|
606
|
+
ensure
|
607
|
+
@check_pipe.close
|
608
|
+
@suicide_pipe.close
|
609
|
+
read.close
|
610
|
+
@wakeup.close
|
611
|
+
end
|
612
|
+
end
|
613
|
+
|
614
|
+
private
|
615
|
+
|
616
|
+
# loops thru @workers, removing workers that exited, and calling
|
617
|
+
# `#term` if needed
|
618
|
+
def wait_workers
|
619
|
+
@workers.reject! do |w|
|
620
|
+
next false if w.pid.nil?
|
621
|
+
begin
|
622
|
+
if Process.wait(w.pid, Process::WNOHANG)
|
623
|
+
true
|
624
|
+
else
|
625
|
+
w.term if w.term?
|
626
|
+
nil
|
627
|
+
end
|
628
|
+
rescue Errno::ECHILD
|
629
|
+
begin
|
630
|
+
Process.kill(0, w.pid)
|
631
|
+
false # child still alive, but has another parent
|
632
|
+
rescue Errno::ESRCH, Errno::EPERM
|
633
|
+
true # child is already terminated
|
634
|
+
end
|
635
|
+
end
|
636
|
+
end
|
637
|
+
end
|
638
|
+
|
639
|
+
def timeout_workers
|
640
|
+
@workers.each do |w|
|
641
|
+
if !w.term? && w.ping_timeout <= Time.now
|
642
|
+
log "! Terminating timed out worker: #{w.pid}"
|
643
|
+
w.kill
|
644
|
+
end
|
645
|
+
end
|
646
|
+
end
|
647
|
+
|
648
|
+
def nakayoshi_gc
|
649
|
+
return unless @options[:nakayoshi_fork]
|
650
|
+
log "! Promoting existing objects to old generation..."
|
651
|
+
4.times { GC.start(full_mark: false) }
|
652
|
+
if GC.respond_to?(:compact)
|
653
|
+
log "! Compacting..."
|
654
|
+
GC.compact
|
655
|
+
end
|
656
|
+
log "! Friendly fork preparation complete."
|
657
|
+
end
|
658
|
+
end
|
659
|
+
end
|