puma 4.3.12 → 6.3.1
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +1729 -521
- data/LICENSE +23 -20
- data/README.md +169 -45
- data/bin/puma-wild +3 -9
- data/docs/architecture.md +63 -26
- data/docs/compile_options.md +55 -0
- data/docs/deployment.md +60 -69
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +9 -0
- data/{tools → docs}/jungle/rc.d/README.md +1 -1
- data/{tools → docs}/jungle/rc.d/puma +2 -2
- data/{tools → docs}/jungle/rc.d/puma.conf +0 -0
- data/docs/kubernetes.md +66 -0
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +15 -15
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +46 -23
- data/docs/signals.md +13 -11
- data/docs/stats.md +142 -0
- data/docs/systemd.md +84 -128
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -4
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +49 -12
- data/ext/puma_http11/http11_parser.c +46 -48
- data/ext/puma_http11/http11_parser.h +2 -2
- data/ext/puma_http11/http11_parser.java.rl +3 -3
- data/ext/puma_http11/http11_parser.rl +3 -3
- data/ext/puma_http11/http11_parser_common.rl +2 -2
- data/ext/puma_http11/mini_ssl.c +278 -93
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +6 -6
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +4 -6
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +241 -96
- data/ext/puma_http11/puma_http11.c +46 -57
- data/lib/puma/app/status.rb +53 -39
- data/lib/puma/binder.rb +237 -121
- data/lib/puma/cli.rb +34 -34
- data/lib/puma/client.rb +172 -98
- data/lib/puma/cluster/worker.rb +180 -0
- data/lib/puma/cluster/worker_handle.rb +97 -0
- data/lib/puma/cluster.rb +226 -231
- data/lib/puma/commonlogger.rb +21 -14
- data/lib/puma/configuration.rb +114 -87
- data/lib/puma/const.rb +139 -95
- data/lib/puma/control_cli.rb +99 -79
- data/lib/puma/detect.rb +33 -2
- data/lib/puma/dsl.rb +516 -110
- data/lib/puma/error_logger.rb +113 -0
- data/lib/puma/events.rb +16 -115
- data/lib/puma/io_buffer.rb +44 -2
- data/lib/puma/jruby_restart.rb +2 -59
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +164 -155
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +36 -19
- data/lib/puma/minissl.rb +230 -55
- data/lib/puma/null_io.rb +18 -1
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +1 -1
- data/lib/puma/plugin.rb +3 -12
- data/lib/puma/rack/builder.rb +7 -11
- data/lib/puma/rack/urlmap.rb +0 -0
- data/lib/puma/rack_default.rb +19 -4
- data/lib/puma/reactor.rb +93 -368
- data/lib/puma/request.rb +671 -0
- data/lib/puma/runner.rb +92 -75
- data/lib/puma/sd_notify.rb +149 -0
- data/lib/puma/server.rb +321 -794
- data/lib/puma/single.rb +20 -74
- data/lib/puma/state_file.rb +45 -8
- data/lib/puma/thread_pool.rb +140 -68
- data/lib/puma/util.rb +21 -4
- data/lib/puma.rb +54 -7
- data/lib/rack/handler/puma.rb +113 -87
- data/tools/{docker/Dockerfile → Dockerfile} +1 -1
- data/tools/trickletest.rb +0 -0
- metadata +33 -24
- data/docs/tcp_mode.md +0 -96
- data/ext/puma_http11/io_buffer.c +0 -155
- data/ext/puma_http11/org/jruby/puma/IOBuffer.java +0 -72
- data/lib/puma/accept_nonblock.rb +0 -29
- data/lib/puma/tcp_logger.rb +0 -41
- data/tools/jungle/README.md +0 -19
- data/tools/jungle/init.d/README.md +0 -61
- data/tools/jungle/init.d/puma +0 -421
- data/tools/jungle/init.d/run-puma +0 -18
- data/tools/jungle/upstart/README.md +0 -61
- data/tools/jungle/upstart/puma-manager.conf +0 -31
- data/tools/jungle/upstart/puma.conf +0 -69
data/lib/puma/cluster.rb
CHANGED
@@ -1,35 +1,34 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
3
|
+
require_relative 'runner'
|
4
|
+
require_relative 'util'
|
5
|
+
require_relative 'plugin'
|
6
|
+
require_relative 'cluster/worker_handle'
|
7
|
+
require_relative 'cluster/worker'
|
8
8
|
|
9
9
|
module Puma
|
10
10
|
# This class is instantiated by the `Puma::Launcher` and used
|
11
11
|
# to boot and serve a Ruby application when puma "workers" are needed
|
12
12
|
# i.e. when using multi-processes. For example `$ puma -w 5`
|
13
13
|
#
|
14
|
-
# At the core of this class is running an instance of `Puma::Server` which
|
15
|
-
# gets created via the `start_server` method from the `Puma::Runner` class
|
16
|
-
# that this inherits from.
|
17
|
-
#
|
18
14
|
# An instance of this class will spawn the number of processes passed in
|
19
15
|
# via the `spawn_workers` method call. Each worker will have it's own
|
20
16
|
# instance of a `Puma::Server`.
|
21
17
|
class Cluster < Runner
|
22
|
-
def initialize(
|
23
|
-
super
|
18
|
+
def initialize(launcher)
|
19
|
+
super(launcher)
|
24
20
|
|
25
21
|
@phase = 0
|
26
22
|
@workers = []
|
27
|
-
@next_check =
|
23
|
+
@next_check = Time.now
|
28
24
|
|
29
|
-
@phased_state = :idle
|
30
25
|
@phased_restart = false
|
31
26
|
end
|
32
27
|
|
28
|
+
# Returns the list of cluster worker handles.
|
29
|
+
# @return [Array<Puma::Cluster::WorkerHandle>]
|
30
|
+
attr_reader :workers
|
31
|
+
|
33
32
|
def stop_workers
|
34
33
|
log "- Gracefully shutting down workers..."
|
35
34
|
@workers.each { |x| x.term }
|
@@ -37,7 +36,7 @@ module Puma
|
|
37
36
|
begin
|
38
37
|
loop do
|
39
38
|
wait_workers
|
40
|
-
break if @workers.empty?
|
39
|
+
break if @workers.reject {|w| w.pid.nil?}.empty?
|
41
40
|
sleep 0.2
|
42
41
|
end
|
43
42
|
rescue Interrupt
|
@@ -46,6 +45,7 @@ module Puma
|
|
46
45
|
end
|
47
46
|
|
48
47
|
def start_phased_restart
|
48
|
+
@events.fire_on_restart!
|
49
49
|
@phase += 1
|
50
50
|
log "- Starting phased worker restart, phase: #{@phase}"
|
51
51
|
|
@@ -62,143 +62,102 @@ module Puma
|
|
62
62
|
@workers.each { |x| x.hup }
|
63
63
|
end
|
64
64
|
|
65
|
-
class Worker
|
66
|
-
def initialize(idx, pid, phase, options)
|
67
|
-
@index = idx
|
68
|
-
@pid = pid
|
69
|
-
@phase = phase
|
70
|
-
@stage = :started
|
71
|
-
@signal = "TERM"
|
72
|
-
@options = options
|
73
|
-
@first_term_sent = nil
|
74
|
-
@started_at = Time.now
|
75
|
-
@last_checkin = Time.now
|
76
|
-
@last_status = '{}'
|
77
|
-
@term = false
|
78
|
-
end
|
79
|
-
|
80
|
-
attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status, :started_at
|
81
|
-
|
82
|
-
def booted?
|
83
|
-
@stage == :booted
|
84
|
-
end
|
85
|
-
|
86
|
-
def boot!
|
87
|
-
@last_checkin = Time.now
|
88
|
-
@stage = :booted
|
89
|
-
end
|
90
|
-
|
91
|
-
def term?
|
92
|
-
@term
|
93
|
-
end
|
94
|
-
|
95
|
-
def ping!(status)
|
96
|
-
@last_checkin = Time.now
|
97
|
-
@last_status = status
|
98
|
-
end
|
99
|
-
|
100
|
-
def ping_timeout?(which)
|
101
|
-
Time.now - @last_checkin > which
|
102
|
-
end
|
103
|
-
|
104
|
-
def term
|
105
|
-
begin
|
106
|
-
if @first_term_sent && (Time.now - @first_term_sent) > @options[:worker_shutdown_timeout]
|
107
|
-
@signal = "KILL"
|
108
|
-
else
|
109
|
-
@term ||= true
|
110
|
-
@first_term_sent ||= Time.now
|
111
|
-
end
|
112
|
-
Process.kill @signal, @pid
|
113
|
-
rescue Errno::ESRCH
|
114
|
-
end
|
115
|
-
end
|
116
|
-
|
117
|
-
def kill
|
118
|
-
Process.kill "KILL", @pid
|
119
|
-
rescue Errno::ESRCH
|
120
|
-
end
|
121
|
-
|
122
|
-
def hup
|
123
|
-
Process.kill "HUP", @pid
|
124
|
-
rescue Errno::ESRCH
|
125
|
-
end
|
126
|
-
end
|
127
|
-
|
128
65
|
def spawn_workers
|
129
66
|
diff = @options[:workers] - @workers.size
|
130
67
|
return if diff < 1
|
131
68
|
|
132
69
|
master = Process.pid
|
70
|
+
if @options[:fork_worker]
|
71
|
+
@fork_writer << "-1\n"
|
72
|
+
end
|
133
73
|
|
134
74
|
diff.times do
|
135
75
|
idx = next_worker_index
|
136
|
-
@launcher.config.run_hooks :before_worker_fork, idx
|
137
76
|
|
138
|
-
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
77
|
+
if @options[:fork_worker] && idx != 0
|
78
|
+
@fork_writer << "#{idx}\n"
|
79
|
+
pid = nil
|
80
|
+
else
|
81
|
+
pid = spawn_worker(idx, master)
|
143
82
|
end
|
144
83
|
|
145
84
|
debug "Spawned worker: #{pid}"
|
146
|
-
@workers <<
|
85
|
+
@workers << WorkerHandle.new(idx, pid, @phase, @options)
|
86
|
+
end
|
87
|
+
|
88
|
+
if @options[:fork_worker] &&
|
89
|
+
@workers.all? {|x| x.phase == @phase}
|
147
90
|
|
148
|
-
@
|
91
|
+
@fork_writer << "0\n"
|
149
92
|
end
|
93
|
+
end
|
94
|
+
|
95
|
+
# @version 5.0.0
|
96
|
+
def spawn_worker(idx, master)
|
97
|
+
@config.run_hooks(:before_worker_fork, idx, @log_writer)
|
150
98
|
|
151
|
-
|
152
|
-
|
99
|
+
pid = fork { worker(idx, master) }
|
100
|
+
if !pid
|
101
|
+
log "! Complete inability to spawn new workers detected"
|
102
|
+
log "! Seppuku is the only choice."
|
103
|
+
exit! 1
|
153
104
|
end
|
105
|
+
|
106
|
+
@config.run_hooks(:after_worker_fork, idx, @log_writer)
|
107
|
+
pid
|
154
108
|
end
|
155
109
|
|
156
110
|
def cull_workers
|
157
111
|
diff = @workers.size - @options[:workers]
|
158
112
|
return if diff < 1
|
113
|
+
debug "Culling #{diff} workers"
|
159
114
|
|
160
|
-
|
161
|
-
|
162
|
-
workers_to_cull = @workers[-diff,diff]
|
163
|
-
debug "Workers to cull: #{workers_to_cull.inspect}"
|
115
|
+
workers = workers_to_cull(diff)
|
116
|
+
debug "Workers to cull: #{workers.inspect}"
|
164
117
|
|
165
|
-
|
166
|
-
log "- Worker #{worker.index} (
|
118
|
+
workers.each do |worker|
|
119
|
+
log "- Worker #{worker.index} (PID: #{worker.pid}) terminating"
|
167
120
|
worker.term
|
168
121
|
end
|
169
122
|
end
|
170
123
|
|
124
|
+
def workers_to_cull(diff)
|
125
|
+
workers = @workers.sort_by(&:started_at)
|
126
|
+
|
127
|
+
# In fork_worker mode, worker 0 acts as our master process.
|
128
|
+
# We should avoid culling it to preserve copy-on-write memory gains.
|
129
|
+
workers.reject! { |w| w.index == 0 } if @options[:fork_worker]
|
130
|
+
|
131
|
+
workers[cull_start_index(diff), diff]
|
132
|
+
end
|
133
|
+
|
134
|
+
def cull_start_index(diff)
|
135
|
+
case @options[:worker_culling_strategy]
|
136
|
+
when :oldest
|
137
|
+
0
|
138
|
+
else # :youngest
|
139
|
+
-diff
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
# @!attribute [r] next_worker_index
|
171
144
|
def next_worker_index
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
145
|
+
occupied_positions = @workers.map(&:index)
|
146
|
+
idx = 0
|
147
|
+
idx += 1 until !occupied_positions.include?(idx)
|
148
|
+
idx
|
176
149
|
end
|
177
150
|
|
178
151
|
def all_workers_booted?
|
179
152
|
@workers.count { |w| !w.booted? } == 0
|
180
153
|
end
|
181
154
|
|
182
|
-
def check_workers
|
183
|
-
return if
|
155
|
+
def check_workers
|
156
|
+
return if @next_check >= Time.now
|
184
157
|
|
185
|
-
@next_check = Time.now +
|
186
|
-
|
187
|
-
any = false
|
188
|
-
|
189
|
-
@workers.each do |w|
|
190
|
-
next if !w.booted? && !w.ping_timeout?(@options[:worker_boot_timeout])
|
191
|
-
if w.ping_timeout?(@options[:worker_timeout])
|
192
|
-
log "! Terminating timed out worker: #{w.pid}"
|
193
|
-
w.kill
|
194
|
-
any = true
|
195
|
-
end
|
196
|
-
end
|
197
|
-
|
198
|
-
# If we killed any timed out workers, try to catch them
|
199
|
-
# during this loop by giving the kernel time to kill them.
|
200
|
-
sleep 1 if any
|
158
|
+
@next_check = Time.now + @options[:worker_check_interval]
|
201
159
|
|
160
|
+
timeout_workers
|
202
161
|
wait_workers
|
203
162
|
cull_workers
|
204
163
|
spawn_workers
|
@@ -211,104 +170,40 @@ module Puma
|
|
211
170
|
w = @workers.find { |x| x.phase != @phase }
|
212
171
|
|
213
172
|
if w
|
214
|
-
|
215
|
-
@phased_state = :waiting
|
216
|
-
log "- Stopping #{w.pid} for phased upgrade..."
|
217
|
-
end
|
218
|
-
|
173
|
+
log "- Stopping #{w.pid} for phased upgrade..."
|
219
174
|
unless w.term?
|
220
175
|
w.term
|
221
176
|
log "- #{w.signal} sent to #{w.pid}..."
|
222
177
|
end
|
223
178
|
end
|
224
179
|
end
|
225
|
-
end
|
226
180
|
|
227
|
-
|
228
|
-
|
181
|
+
t = @workers.reject(&:term?)
|
182
|
+
t.map!(&:ping_timeout)
|
229
183
|
|
230
|
-
|
231
|
-
@wakeup.write "!" unless @wakeup.closed?
|
232
|
-
rescue SystemCallError, IOError
|
233
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
234
|
-
end
|
184
|
+
@next_check = [t.min, @next_check].compact.min
|
235
185
|
end
|
236
186
|
|
237
187
|
def worker(index, master)
|
238
|
-
title = "puma: cluster worker #{index}: #{master}"
|
239
|
-
title += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty?
|
240
|
-
$0 = title
|
241
|
-
|
242
|
-
Signal.trap "SIGINT", "IGNORE"
|
243
|
-
|
244
188
|
@workers = []
|
189
|
+
|
245
190
|
@master_read.close
|
246
191
|
@suicide_pipe.close
|
192
|
+
@fork_writer.close
|
247
193
|
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
exit! 1
|
253
|
-
end
|
254
|
-
|
255
|
-
# If we're not running under a Bundler context, then
|
256
|
-
# report the info about the context we will be using
|
257
|
-
if !ENV['BUNDLE_GEMFILE']
|
258
|
-
if File.exist?("Gemfile")
|
259
|
-
log "+ Gemfile in context: #{File.expand_path("Gemfile")}"
|
260
|
-
elsif File.exist?("gems.rb")
|
261
|
-
log "+ Gemfile in context: #{File.expand_path("gems.rb")}"
|
262
|
-
end
|
194
|
+
pipes = { check_pipe: @check_pipe, worker_write: @worker_write }
|
195
|
+
if @options[:fork_worker]
|
196
|
+
pipes[:fork_pipe] = @fork_pipe
|
197
|
+
pipes[:wakeup] = @wakeup
|
263
198
|
end
|
264
199
|
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
269
|
-
|
270
|
-
|
271
|
-
|
272
|
-
@worker_write << "e#{Process.pid}\n" rescue nil
|
273
|
-
server.stop
|
274
|
-
end
|
275
|
-
|
276
|
-
begin
|
277
|
-
@worker_write << "b#{Process.pid}\n"
|
278
|
-
rescue SystemCallError, IOError
|
279
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
280
|
-
STDERR.puts "Master seems to have exited, exiting."
|
281
|
-
return
|
282
|
-
end
|
283
|
-
|
284
|
-
Thread.new(@worker_write) do |io|
|
285
|
-
Puma.set_thread_name "stat payload"
|
286
|
-
base_payload = "p#{Process.pid}"
|
287
|
-
|
288
|
-
while true
|
289
|
-
sleep Const::WORKER_CHECK_INTERVAL
|
290
|
-
begin
|
291
|
-
b = server.backlog || 0
|
292
|
-
r = server.running || 0
|
293
|
-
t = server.pool_capacity || 0
|
294
|
-
m = server.max_threads || 0
|
295
|
-
payload = %Q!#{base_payload}{ "backlog":#{b}, "running":#{r}, "pool_capacity":#{t}, "max_threads": #{m} }\n!
|
296
|
-
io << payload
|
297
|
-
rescue IOError
|
298
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
299
|
-
break
|
300
|
-
end
|
301
|
-
end
|
302
|
-
end
|
303
|
-
|
304
|
-
server.run.join
|
305
|
-
|
306
|
-
# Invoke any worker shutdown hooks so they can prevent the worker
|
307
|
-
# exiting until any background operations are completed
|
308
|
-
@launcher.config.run_hooks :before_worker_shutdown, index
|
309
|
-
ensure
|
310
|
-
@worker_write << "t#{Process.pid}\n" rescue nil
|
311
|
-
@worker_write.close
|
200
|
+
server = start_server if preload?
|
201
|
+
new_worker = Worker.new index: index,
|
202
|
+
master: master,
|
203
|
+
launcher: @launcher,
|
204
|
+
pipes: pipes,
|
205
|
+
server: server
|
206
|
+
new_worker.run
|
312
207
|
end
|
313
208
|
|
314
209
|
def restart
|
@@ -316,8 +211,8 @@ module Puma
|
|
316
211
|
stop
|
317
212
|
end
|
318
213
|
|
319
|
-
def phased_restart
|
320
|
-
return false if @options[:preload_app]
|
214
|
+
def phased_restart(refork = false)
|
215
|
+
return false if @options[:preload_app] && !refork
|
321
216
|
|
322
217
|
@phased_restart = true
|
323
218
|
wakeup!
|
@@ -333,7 +228,7 @@ module Puma
|
|
333
228
|
def stop_blocked
|
334
229
|
@status = :stop if @status == :run
|
335
230
|
wakeup!
|
336
|
-
@control
|
231
|
+
@control&.stop true
|
337
232
|
Process.waitall
|
338
233
|
end
|
339
234
|
|
@@ -350,20 +245,61 @@ module Puma
|
|
350
245
|
|
351
246
|
# Inside of a child process, this will return all zeroes, as @workers is only populated in
|
352
247
|
# the master process.
|
248
|
+
# @!attribute [r] stats
|
353
249
|
def stats
|
354
250
|
old_worker_count = @workers.count { |w| w.phase != @phase }
|
355
|
-
|
356
|
-
|
357
|
-
|
251
|
+
worker_status = @workers.map do |w|
|
252
|
+
{
|
253
|
+
started_at: utc_iso8601(w.started_at),
|
254
|
+
pid: w.pid,
|
255
|
+
index: w.index,
|
256
|
+
phase: w.phase,
|
257
|
+
booted: w.booted?,
|
258
|
+
last_checkin: utc_iso8601(w.last_checkin),
|
259
|
+
last_status: w.last_status,
|
260
|
+
}
|
261
|
+
end
|
262
|
+
|
263
|
+
{
|
264
|
+
started_at: utc_iso8601(@started_at),
|
265
|
+
workers: @workers.size,
|
266
|
+
phase: @phase,
|
267
|
+
booted_workers: worker_status.count { |w| w[:booted] },
|
268
|
+
old_workers: old_worker_count,
|
269
|
+
worker_status: worker_status,
|
270
|
+
}.merge(super)
|
358
271
|
end
|
359
272
|
|
360
273
|
def preload?
|
361
274
|
@options[:preload_app]
|
362
275
|
end
|
363
276
|
|
277
|
+
# @version 5.0.0
|
278
|
+
def fork_worker!
|
279
|
+
if (worker = @workers.find { |w| w.index == 0 })
|
280
|
+
worker.phase += 1
|
281
|
+
end
|
282
|
+
phased_restart(true)
|
283
|
+
end
|
284
|
+
|
364
285
|
# We do this in a separate method to keep the lambda scope
|
365
286
|
# of the signals handlers as small as possible.
|
366
287
|
def setup_signals
|
288
|
+
if @options[:fork_worker]
|
289
|
+
Signal.trap "SIGURG" do
|
290
|
+
fork_worker!
|
291
|
+
end
|
292
|
+
|
293
|
+
# Auto-fork after the specified number of requests.
|
294
|
+
if (fork_requests = @options[:fork_worker].to_i) > 0
|
295
|
+
@events.register(:ping!) do |w|
|
296
|
+
fork_worker! if w.index == 0 &&
|
297
|
+
w.phase == 0 &&
|
298
|
+
w.last_status[:requests_count] >= fork_requests
|
299
|
+
end
|
300
|
+
end
|
301
|
+
end
|
302
|
+
|
367
303
|
Signal.trap "SIGCHLD" do
|
368
304
|
wakeup!
|
369
305
|
end
|
@@ -392,7 +328,7 @@ module Puma
|
|
392
328
|
|
393
329
|
stop_workers
|
394
330
|
stop
|
395
|
-
|
331
|
+
@events.fire_on_stopped!
|
396
332
|
raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm]
|
397
333
|
exit 0 # Clean exit, workers were stopped
|
398
334
|
end
|
@@ -404,15 +340,25 @@ module Puma
|
|
404
340
|
|
405
341
|
output_header "cluster"
|
406
342
|
|
407
|
-
|
408
|
-
|
409
|
-
before = Thread.list
|
343
|
+
# This is aligned with the output from Runner, see Runner#output_header
|
344
|
+
log "* Workers: #{@options[:workers]}"
|
410
345
|
|
411
346
|
if preload?
|
347
|
+
# Threads explicitly marked as fork safe will be ignored. Used in Rails,
|
348
|
+
# but may be used by anyone. Note that we need to explicit
|
349
|
+
# Process::Waiter check here because there's a bug in Ruby 2.6 and below
|
350
|
+
# where calling thread_variable_get on a Process::Waiter will segfault.
|
351
|
+
# We can drop that clause once those versions of Ruby are no longer
|
352
|
+
# supported.
|
353
|
+
fork_safe = ->(t) { !t.is_a?(Process::Waiter) && t.thread_variable_get(:fork_safe) }
|
354
|
+
|
355
|
+
before = Thread.list.reject(&fork_safe)
|
356
|
+
|
357
|
+
log "* Restarts: (\u2714) hot (\u2716) phased"
|
412
358
|
log "* Preloading application"
|
413
359
|
load_and_bind
|
414
360
|
|
415
|
-
after = Thread.list
|
361
|
+
after = Thread.list.reject(&fork_safe)
|
416
362
|
|
417
363
|
if after.size > before.size
|
418
364
|
threads = (after - before)
|
@@ -426,14 +372,14 @@ module Puma
|
|
426
372
|
end
|
427
373
|
end
|
428
374
|
else
|
429
|
-
log "*
|
375
|
+
log "* Restarts: (\u2714) hot (\u2714) phased"
|
430
376
|
|
431
|
-
unless @
|
377
|
+
unless @config.app_configured?
|
432
378
|
error "No application configured, nothing to run"
|
433
379
|
exit 1
|
434
380
|
end
|
435
381
|
|
436
|
-
@launcher.binder.parse @options[:binds]
|
382
|
+
@launcher.binder.parse @options[:binds]
|
437
383
|
end
|
438
384
|
|
439
385
|
read, @wakeup = Puma::Util.pipe
|
@@ -447,12 +393,13 @@ module Puma
|
|
447
393
|
#
|
448
394
|
@check_pipe, @suicide_pipe = Puma::Util.pipe
|
449
395
|
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
455
|
-
|
396
|
+
# Separate pipe used by worker 0 to receive commands to
|
397
|
+
# fork new worker processes.
|
398
|
+
@fork_pipe, @fork_writer = Puma::Util.pipe
|
399
|
+
|
400
|
+
log "Use Ctrl-C to stop"
|
401
|
+
|
402
|
+
single_worker_warning
|
456
403
|
|
457
404
|
redirect_io
|
458
405
|
|
@@ -464,7 +411,7 @@ module Puma
|
|
464
411
|
|
465
412
|
@master_read, @worker_write = read, @wakeup
|
466
413
|
|
467
|
-
@
|
414
|
+
@config.run_hooks(:before_fork, nil, @log_writer)
|
468
415
|
|
469
416
|
spawn_workers
|
470
417
|
|
@@ -472,51 +419,67 @@ module Puma
|
|
472
419
|
stop
|
473
420
|
end
|
474
421
|
|
475
|
-
@launcher.events.fire_on_booted!
|
476
|
-
|
477
422
|
begin
|
478
|
-
|
423
|
+
booted = false
|
424
|
+
in_phased_restart = false
|
425
|
+
workers_not_booted = @options[:workers]
|
479
426
|
|
480
427
|
while @status == :run
|
481
428
|
begin
|
482
429
|
if @phased_restart
|
483
430
|
start_phased_restart
|
484
431
|
@phased_restart = false
|
432
|
+
in_phased_restart = true
|
433
|
+
workers_not_booted = @options[:workers]
|
485
434
|
end
|
486
435
|
|
487
|
-
check_workers
|
436
|
+
check_workers
|
488
437
|
|
489
|
-
|
490
|
-
|
491
|
-
res = IO.select([read], nil, nil, Const::WORKER_CHECK_INTERVAL)
|
492
|
-
|
493
|
-
if res
|
438
|
+
if read.wait_readable([0, @next_check - Time.now].max)
|
494
439
|
req = read.read_nonblock(1)
|
495
440
|
|
441
|
+
@next_check = Time.now if req == "!"
|
496
442
|
next if !req || req == "!"
|
497
443
|
|
498
444
|
result = read.gets
|
499
445
|
pid = result.to_i
|
500
446
|
|
447
|
+
if req == "b" || req == "f"
|
448
|
+
pid, idx = result.split(':').map(&:to_i)
|
449
|
+
w = @workers.find {|x| x.index == idx}
|
450
|
+
w.pid = pid if w.pid.nil?
|
451
|
+
end
|
452
|
+
|
501
453
|
if w = @workers.find { |x| x.pid == pid }
|
502
454
|
case req
|
503
455
|
when "b"
|
504
456
|
w.boot!
|
505
|
-
log "- Worker #{w.index} (
|
506
|
-
|
457
|
+
log "- Worker #{w.index} (PID: #{pid}) booted in #{w.uptime.round(2)}s, phase: #{w.phase}"
|
458
|
+
@next_check = Time.now
|
459
|
+
workers_not_booted -= 1
|
507
460
|
when "e"
|
508
461
|
# external term, see worker method, Signal.trap "SIGTERM"
|
509
|
-
w.
|
462
|
+
w.term!
|
510
463
|
when "t"
|
511
464
|
w.term unless w.term?
|
512
|
-
force_check = true
|
513
465
|
when "p"
|
514
466
|
w.ping!(result.sub(/^\d+/,'').chomp)
|
467
|
+
@events.fire(:ping!, w)
|
468
|
+
if !booted && @workers.none? {|worker| worker.last_status.empty?}
|
469
|
+
@events.fire_on_booted!
|
470
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
471
|
+
booted = true
|
472
|
+
end
|
515
473
|
end
|
516
474
|
else
|
517
475
|
log "! Out-of-sync worker list, no #{pid} worker"
|
518
476
|
end
|
519
477
|
end
|
478
|
+
if in_phased_restart && workers_not_booted.zero?
|
479
|
+
@events.fire_on_booted!
|
480
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
481
|
+
in_phased_restart = false
|
482
|
+
end
|
520
483
|
|
521
484
|
rescue Interrupt
|
522
485
|
@status = :stop
|
@@ -534,10 +497,20 @@ module Puma
|
|
534
497
|
|
535
498
|
private
|
536
499
|
|
500
|
+
def single_worker_warning
|
501
|
+
return if @options[:workers] != 1 || @options[:silence_single_worker_warning]
|
502
|
+
|
503
|
+
log "! WARNING: Detected running cluster mode with 1 worker."
|
504
|
+
log "! Running Puma in cluster mode with a single worker is often a misconfiguration."
|
505
|
+
log "! Consider running Puma in single-mode (workers = 0) in order to reduce memory overhead."
|
506
|
+
log "! Set the `silence_single_worker_warning` option to silence this warning message."
|
507
|
+
end
|
508
|
+
|
537
509
|
# loops thru @workers, removing workers that exited, and calling
|
538
510
|
# `#term` if needed
|
539
511
|
def wait_workers
|
540
512
|
@workers.reject! do |w|
|
513
|
+
next false if w.pid.nil?
|
541
514
|
begin
|
542
515
|
if Process.wait(w.pid, Process::WNOHANG)
|
543
516
|
true
|
@@ -546,7 +519,29 @@ module Puma
|
|
546
519
|
nil
|
547
520
|
end
|
548
521
|
rescue Errno::ECHILD
|
549
|
-
|
522
|
+
begin
|
523
|
+
Process.kill(0, w.pid)
|
524
|
+
# child still alive but has another parent (e.g., using fork_worker)
|
525
|
+
w.term if w.term?
|
526
|
+
false
|
527
|
+
rescue Errno::ESRCH, Errno::EPERM
|
528
|
+
true # child is already terminated
|
529
|
+
end
|
530
|
+
end
|
531
|
+
end
|
532
|
+
end
|
533
|
+
|
534
|
+
# @version 5.0.0
|
535
|
+
def timeout_workers
|
536
|
+
@workers.each do |w|
|
537
|
+
if !w.term? && w.ping_timeout <= Time.now
|
538
|
+
details = if w.booted?
|
539
|
+
"(worker failed to check in within #{@options[:worker_timeout]} seconds)"
|
540
|
+
else
|
541
|
+
"(worker failed to boot within #{@options[:worker_boot_timeout]} seconds)"
|
542
|
+
end
|
543
|
+
log "! Terminating timed out worker #{details}: #{w.pid}"
|
544
|
+
w.kill
|
550
545
|
end
|
551
546
|
end
|
552
547
|
end
|