puma 3.12.6 → 6.3.0
Sign up to get free protection for your applications and to get access to all the features.
Potentially problematic release.
This version of puma might be problematic. Click here for more details.
- checksums.yaml +4 -4
- data/History.md +1806 -451
- data/LICENSE +23 -20
- data/README.md +217 -65
- data/bin/puma-wild +3 -9
- data/docs/architecture.md +59 -21
- data/docs/compile_options.md +55 -0
- data/docs/deployment.md +69 -58
- data/docs/fork_worker.md +31 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +9 -0
- data/{tools → docs}/jungle/rc.d/README.md +1 -1
- data/{tools → docs}/jungle/rc.d/puma +2 -2
- data/docs/kubernetes.md +66 -0
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +22 -12
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +47 -22
- data/docs/signals.md +13 -11
- data/docs/stats.md +142 -0
- data/docs/systemd.md +94 -120
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +61 -3
- data/ext/puma_http11/http11_parser.c +103 -117
- data/ext/puma_http11/http11_parser.h +2 -2
- data/ext/puma_http11/http11_parser.java.rl +22 -38
- data/ext/puma_http11/http11_parser.rl +3 -3
- data/ext/puma_http11/http11_parser_common.rl +6 -6
- data/ext/puma_http11/mini_ssl.c +389 -99
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +108 -116
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +84 -99
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +248 -92
- data/ext/puma_http11/puma_http11.c +49 -57
- data/lib/puma/app/status.rb +71 -49
- data/lib/puma/binder.rb +244 -150
- data/lib/puma/cli.rb +38 -34
- data/lib/puma/client.rb +388 -244
- data/lib/puma/cluster/worker.rb +180 -0
- data/lib/puma/cluster/worker_handle.rb +97 -0
- data/lib/puma/cluster.rb +261 -243
- data/lib/puma/commonlogger.rb +21 -14
- data/lib/puma/configuration.rb +116 -88
- data/lib/puma/const.rb +154 -104
- data/lib/puma/control_cli.rb +115 -70
- data/lib/puma/detect.rb +33 -2
- data/lib/puma/dsl.rb +764 -134
- data/lib/puma/error_logger.rb +113 -0
- data/lib/puma/events.rb +16 -112
- data/lib/puma/io_buffer.rb +42 -5
- data/lib/puma/jruby_restart.rb +2 -59
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +184 -133
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +93 -0
- data/lib/puma/minissl.rb +263 -70
- data/lib/puma/null_io.rb +18 -1
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +3 -1
- data/lib/puma/plugin.rb +7 -13
- data/lib/puma/rack/builder.rb +9 -11
- data/lib/puma/rack/urlmap.rb +2 -0
- data/lib/puma/rack_default.rb +21 -4
- data/lib/puma/reactor.rb +93 -315
- data/lib/puma/request.rb +671 -0
- data/lib/puma/runner.rb +94 -69
- data/lib/puma/sd_notify.rb +149 -0
- data/lib/puma/server.rb +327 -772
- data/lib/puma/single.rb +20 -74
- data/lib/puma/state_file.rb +45 -8
- data/lib/puma/thread_pool.rb +146 -92
- data/lib/puma/util.rb +22 -10
- data/lib/puma.rb +60 -5
- data/lib/rack/handler/puma.rb +116 -90
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +0 -1
- metadata +54 -32
- data/ext/puma_http11/io_buffer.c +0 -155
- data/lib/puma/accept_nonblock.rb +0 -23
- data/lib/puma/compat.rb +0 -14
- data/lib/puma/convenient.rb +0 -25
- data/lib/puma/daemon_ext.rb +0 -33
- data/lib/puma/delegation.rb +0 -13
- data/lib/puma/java_io_buffer.rb +0 -47
- data/lib/puma/rack/backports/uri/common_193.rb +0 -33
- data/lib/puma/tcp_logger.rb +0 -41
- data/tools/jungle/README.md +0 -19
- data/tools/jungle/init.d/README.md +0 -61
- data/tools/jungle/init.d/puma +0 -421
- data/tools/jungle/init.d/run-puma +0 -18
- data/tools/jungle/upstart/README.md +0 -61
- data/tools/jungle/upstart/puma-manager.conf +0 -31
- data/tools/jungle/upstart/puma.conf +0 -69
- /data/{tools → docs}/jungle/rc.d/puma.conf +0 -0
data/lib/puma/cluster.rb
CHANGED
@@ -1,49 +1,51 @@
|
|
1
1
|
# frozen_string_literal: true
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
3
|
+
require_relative 'runner'
|
4
|
+
require_relative 'util'
|
5
|
+
require_relative 'plugin'
|
6
|
+
require_relative 'cluster/worker_handle'
|
7
|
+
require_relative 'cluster/worker'
|
8
8
|
|
9
9
|
module Puma
|
10
10
|
# This class is instantiated by the `Puma::Launcher` and used
|
11
11
|
# to boot and serve a Ruby application when puma "workers" are needed
|
12
12
|
# i.e. when using multi-processes. For example `$ puma -w 5`
|
13
13
|
#
|
14
|
-
# At the core of this class is running an instance of `Puma::Server` which
|
15
|
-
# gets created via the `start_server` method from the `Puma::Runner` class
|
16
|
-
# that this inherits from.
|
17
|
-
#
|
18
14
|
# An instance of this class will spawn the number of processes passed in
|
19
15
|
# via the `spawn_workers` method call. Each worker will have it's own
|
20
16
|
# instance of a `Puma::Server`.
|
21
17
|
class Cluster < Runner
|
22
|
-
|
23
|
-
|
24
|
-
def initialize(cli, events)
|
25
|
-
super cli, events
|
18
|
+
def initialize(launcher)
|
19
|
+
super(launcher)
|
26
20
|
|
27
21
|
@phase = 0
|
28
22
|
@workers = []
|
29
|
-
@next_check =
|
23
|
+
@next_check = Time.now
|
30
24
|
|
31
|
-
@phased_state = :idle
|
32
25
|
@phased_restart = false
|
33
26
|
end
|
34
27
|
|
28
|
+
# Returns the list of cluster worker handles.
|
29
|
+
# @return [Array<Puma::Cluster::WorkerHandle>]
|
30
|
+
attr_reader :workers
|
31
|
+
|
35
32
|
def stop_workers
|
36
33
|
log "- Gracefully shutting down workers..."
|
37
34
|
@workers.each { |x| x.term }
|
38
35
|
|
39
36
|
begin
|
40
|
-
|
37
|
+
loop do
|
38
|
+
wait_workers
|
39
|
+
break if @workers.reject {|w| w.pid.nil?}.empty?
|
40
|
+
sleep 0.2
|
41
|
+
end
|
41
42
|
rescue Interrupt
|
42
43
|
log "! Cancelled waiting for workers"
|
43
44
|
end
|
44
45
|
end
|
45
46
|
|
46
47
|
def start_phased_restart
|
48
|
+
@events.fire_on_restart!
|
47
49
|
@phase += 1
|
48
50
|
log "- Starting phased worker restart, phase: #{@phase}"
|
49
51
|
|
@@ -60,155 +62,103 @@ module Puma
|
|
60
62
|
@workers.each { |x| x.hup }
|
61
63
|
end
|
62
64
|
|
63
|
-
class Worker
|
64
|
-
def initialize(idx, pid, phase, options)
|
65
|
-
@index = idx
|
66
|
-
@pid = pid
|
67
|
-
@phase = phase
|
68
|
-
@stage = :started
|
69
|
-
@signal = "TERM"
|
70
|
-
@options = options
|
71
|
-
@first_term_sent = nil
|
72
|
-
@last_checkin = Time.now
|
73
|
-
@last_status = '{}'
|
74
|
-
@dead = false
|
75
|
-
end
|
76
|
-
|
77
|
-
attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status
|
78
|
-
|
79
|
-
def booted?
|
80
|
-
@stage == :booted
|
81
|
-
end
|
82
|
-
|
83
|
-
def boot!
|
84
|
-
@last_checkin = Time.now
|
85
|
-
@stage = :booted
|
86
|
-
end
|
87
|
-
|
88
|
-
def dead?
|
89
|
-
@dead
|
90
|
-
end
|
91
|
-
|
92
|
-
def dead!
|
93
|
-
@dead = true
|
94
|
-
end
|
95
|
-
|
96
|
-
def ping!(status)
|
97
|
-
@last_checkin = Time.now
|
98
|
-
@last_status = status
|
99
|
-
end
|
100
|
-
|
101
|
-
def ping_timeout?(which)
|
102
|
-
Time.now - @last_checkin > which
|
103
|
-
end
|
104
|
-
|
105
|
-
def term
|
106
|
-
begin
|
107
|
-
if @first_term_sent && (Time.now - @first_term_sent) > @options[:worker_shutdown_timeout]
|
108
|
-
@signal = "KILL"
|
109
|
-
else
|
110
|
-
@first_term_sent ||= Time.now
|
111
|
-
end
|
112
|
-
|
113
|
-
Process.kill @signal, @pid
|
114
|
-
rescue Errno::ESRCH
|
115
|
-
end
|
116
|
-
end
|
117
|
-
|
118
|
-
def kill
|
119
|
-
Process.kill "KILL", @pid
|
120
|
-
rescue Errno::ESRCH
|
121
|
-
end
|
122
|
-
|
123
|
-
def hup
|
124
|
-
Process.kill "HUP", @pid
|
125
|
-
rescue Errno::ESRCH
|
126
|
-
end
|
127
|
-
end
|
128
|
-
|
129
65
|
def spawn_workers
|
130
66
|
diff = @options[:workers] - @workers.size
|
131
67
|
return if diff < 1
|
132
68
|
|
133
69
|
master = Process.pid
|
70
|
+
if @options[:fork_worker]
|
71
|
+
@fork_writer << "-1\n"
|
72
|
+
end
|
134
73
|
|
135
74
|
diff.times do
|
136
75
|
idx = next_worker_index
|
137
|
-
@launcher.config.run_hooks :before_worker_fork, idx
|
138
76
|
|
139
|
-
|
140
|
-
|
141
|
-
|
142
|
-
|
143
|
-
|
77
|
+
if @options[:fork_worker] && idx != 0
|
78
|
+
@fork_writer << "#{idx}\n"
|
79
|
+
pid = nil
|
80
|
+
else
|
81
|
+
pid = spawn_worker(idx, master)
|
144
82
|
end
|
145
83
|
|
146
84
|
debug "Spawned worker: #{pid}"
|
147
|
-
@workers <<
|
85
|
+
@workers << WorkerHandle.new(idx, pid, @phase, @options)
|
86
|
+
end
|
148
87
|
|
149
|
-
|
88
|
+
if @options[:fork_worker] &&
|
89
|
+
@workers.all? {|x| x.phase == @phase}
|
90
|
+
|
91
|
+
@fork_writer << "0\n"
|
150
92
|
end
|
93
|
+
end
|
151
94
|
|
152
|
-
|
153
|
-
|
95
|
+
# @version 5.0.0
|
96
|
+
def spawn_worker(idx, master)
|
97
|
+
@config.run_hooks(:before_worker_fork, idx, @log_writer)
|
98
|
+
|
99
|
+
pid = fork { worker(idx, master) }
|
100
|
+
if !pid
|
101
|
+
log "! Complete inability to spawn new workers detected"
|
102
|
+
log "! Seppuku is the only choice."
|
103
|
+
exit! 1
|
154
104
|
end
|
105
|
+
|
106
|
+
@config.run_hooks(:after_worker_fork, idx, @log_writer)
|
107
|
+
pid
|
155
108
|
end
|
156
109
|
|
157
110
|
def cull_workers
|
158
111
|
diff = @workers.size - @options[:workers]
|
159
112
|
return if diff < 1
|
113
|
+
debug "Culling #{diff} workers"
|
160
114
|
|
161
|
-
|
162
|
-
|
163
|
-
workers_to_cull = @workers[-diff,diff]
|
164
|
-
debug "Workers to cull: #{workers_to_cull.inspect}"
|
115
|
+
workers = workers_to_cull(diff)
|
116
|
+
debug "Workers to cull: #{workers.inspect}"
|
165
117
|
|
166
|
-
|
167
|
-
log "- Worker #{worker.index} (
|
118
|
+
workers.each do |worker|
|
119
|
+
log "- Worker #{worker.index} (PID: #{worker.pid}) terminating"
|
168
120
|
worker.term
|
169
121
|
end
|
170
122
|
end
|
171
123
|
|
172
|
-
def
|
173
|
-
|
174
|
-
occupied_positions = @workers.map { |w| w.index }
|
175
|
-
available_positions = all_positions.to_a - occupied_positions
|
176
|
-
available_positions.first
|
177
|
-
end
|
178
|
-
|
179
|
-
def all_workers_booted?
|
180
|
-
@workers.count { |w| !w.booted? } == 0
|
181
|
-
end
|
124
|
+
def workers_to_cull(diff)
|
125
|
+
workers = @workers.sort_by(&:started_at)
|
182
126
|
|
183
|
-
|
184
|
-
|
127
|
+
# In fork_worker mode, worker 0 acts as our master process.
|
128
|
+
# We should avoid culling it to preserve copy-on-write memory gains.
|
129
|
+
workers.reject! { |w| w.index == 0 } if @options[:fork_worker]
|
185
130
|
|
186
|
-
|
187
|
-
|
188
|
-
any = false
|
131
|
+
workers[cull_start_index(diff), diff]
|
132
|
+
end
|
189
133
|
|
190
|
-
|
191
|
-
|
192
|
-
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
end
|
134
|
+
def cull_start_index(diff)
|
135
|
+
case @options[:worker_culling_strategy]
|
136
|
+
when :oldest
|
137
|
+
0
|
138
|
+
else # :youngest
|
139
|
+
-diff
|
197
140
|
end
|
141
|
+
end
|
198
142
|
|
199
|
-
|
200
|
-
|
201
|
-
|
143
|
+
# @!attribute [r] next_worker_index
|
144
|
+
def next_worker_index
|
145
|
+
occupied_positions = @workers.map(&:index)
|
146
|
+
idx = 0
|
147
|
+
idx += 1 until !occupied_positions.include?(idx)
|
148
|
+
idx
|
149
|
+
end
|
202
150
|
|
203
|
-
|
204
|
-
|
205
|
-
|
151
|
+
def all_workers_booted?
|
152
|
+
@workers.count { |w| !w.booted? } == 0
|
153
|
+
end
|
206
154
|
|
207
|
-
|
208
|
-
|
155
|
+
def check_workers
|
156
|
+
return if @next_check >= Time.now
|
209
157
|
|
210
|
-
@
|
158
|
+
@next_check = Time.now + @options[:worker_check_interval]
|
211
159
|
|
160
|
+
timeout_workers
|
161
|
+
wait_workers
|
212
162
|
cull_workers
|
213
163
|
spawn_workers
|
214
164
|
|
@@ -220,99 +170,40 @@ module Puma
|
|
220
170
|
w = @workers.find { |x| x.phase != @phase }
|
221
171
|
|
222
172
|
if w
|
223
|
-
|
224
|
-
|
225
|
-
|
173
|
+
log "- Stopping #{w.pid} for phased upgrade..."
|
174
|
+
unless w.term?
|
175
|
+
w.term
|
176
|
+
log "- #{w.signal} sent to #{w.pid}..."
|
226
177
|
end
|
227
|
-
|
228
|
-
w.term
|
229
|
-
log "- #{w.signal} sent to #{w.pid}..."
|
230
178
|
end
|
231
179
|
end
|
232
|
-
end
|
233
180
|
|
234
|
-
|
235
|
-
|
181
|
+
t = @workers.reject(&:term?)
|
182
|
+
t.map!(&:ping_timeout)
|
236
183
|
|
237
|
-
|
238
|
-
@wakeup.write "!" unless @wakeup.closed?
|
239
|
-
rescue SystemCallError, IOError
|
240
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
241
|
-
end
|
184
|
+
@next_check = [t.min, @next_check].compact.min
|
242
185
|
end
|
243
186
|
|
244
187
|
def worker(index, master)
|
245
|
-
title = "puma: cluster worker #{index}: #{master}"
|
246
|
-
title += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty?
|
247
|
-
$0 = title
|
248
|
-
|
249
|
-
Signal.trap "SIGINT", "IGNORE"
|
250
|
-
|
251
188
|
@workers = []
|
189
|
+
|
252
190
|
@master_read.close
|
253
191
|
@suicide_pipe.close
|
192
|
+
@fork_writer.close
|
254
193
|
|
255
|
-
|
256
|
-
|
257
|
-
|
258
|
-
|
259
|
-
end
|
260
|
-
|
261
|
-
# If we're not running under a Bundler context, then
|
262
|
-
# report the info about the context we will be using
|
263
|
-
if !ENV['BUNDLE_GEMFILE']
|
264
|
-
if File.exist?("Gemfile")
|
265
|
-
log "+ Gemfile in context: #{File.expand_path("Gemfile")}"
|
266
|
-
elsif File.exist?("gems.rb")
|
267
|
-
log "+ Gemfile in context: #{File.expand_path("gems.rb")}"
|
268
|
-
end
|
194
|
+
pipes = { check_pipe: @check_pipe, worker_write: @worker_write }
|
195
|
+
if @options[:fork_worker]
|
196
|
+
pipes[:fork_pipe] = @fork_pipe
|
197
|
+
pipes[:wakeup] = @wakeup
|
269
198
|
end
|
270
199
|
|
271
|
-
|
272
|
-
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
|
278
|
-
server.stop
|
279
|
-
end
|
280
|
-
|
281
|
-
begin
|
282
|
-
@worker_write << "b#{Process.pid}\n"
|
283
|
-
rescue SystemCallError, IOError
|
284
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
285
|
-
STDERR.puts "Master seems to have exited, exiting."
|
286
|
-
return
|
287
|
-
end
|
288
|
-
|
289
|
-
Thread.new(@worker_write) do |io|
|
290
|
-
base_payload = "p#{Process.pid}"
|
291
|
-
|
292
|
-
while true
|
293
|
-
sleep WORKER_CHECK_INTERVAL
|
294
|
-
begin
|
295
|
-
b = server.backlog || 0
|
296
|
-
r = server.running || 0
|
297
|
-
t = server.pool_capacity || 0
|
298
|
-
m = server.max_threads || 0
|
299
|
-
payload = %Q!#{base_payload}{ "backlog":#{b}, "running":#{r}, "pool_capacity":#{t}, "max_threads": #{m} }\n!
|
300
|
-
io << payload
|
301
|
-
rescue IOError
|
302
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
303
|
-
break
|
304
|
-
end
|
305
|
-
end
|
306
|
-
end
|
307
|
-
|
308
|
-
server.run.join
|
309
|
-
|
310
|
-
# Invoke any worker shutdown hooks so they can prevent the worker
|
311
|
-
# exiting until any background operations are completed
|
312
|
-
@launcher.config.run_hooks :before_worker_shutdown, index
|
313
|
-
ensure
|
314
|
-
@worker_write << "t#{Process.pid}\n" rescue nil
|
315
|
-
@worker_write.close
|
200
|
+
server = start_server if preload?
|
201
|
+
new_worker = Worker.new index: index,
|
202
|
+
master: master,
|
203
|
+
launcher: @launcher,
|
204
|
+
pipes: pipes,
|
205
|
+
server: server
|
206
|
+
new_worker.run
|
316
207
|
end
|
317
208
|
|
318
209
|
def restart
|
@@ -320,8 +211,8 @@ module Puma
|
|
320
211
|
stop
|
321
212
|
end
|
322
213
|
|
323
|
-
def phased_restart
|
324
|
-
return false if @options[:preload_app]
|
214
|
+
def phased_restart(refork = false)
|
215
|
+
return false if @options[:preload_app] && !refork
|
325
216
|
|
326
217
|
@phased_restart = true
|
327
218
|
wakeup!
|
@@ -337,7 +228,7 @@ module Puma
|
|
337
228
|
def stop_blocked
|
338
229
|
@status = :stop if @status == :run
|
339
230
|
wakeup!
|
340
|
-
@control
|
231
|
+
@control&.stop true
|
341
232
|
Process.waitall
|
342
233
|
end
|
343
234
|
|
@@ -352,20 +243,63 @@ module Puma
|
|
352
243
|
Dir.chdir dir
|
353
244
|
end
|
354
245
|
|
246
|
+
# Inside of a child process, this will return all zeroes, as @workers is only populated in
|
247
|
+
# the master process.
|
248
|
+
# @!attribute [r] stats
|
355
249
|
def stats
|
356
250
|
old_worker_count = @workers.count { |w| w.phase != @phase }
|
357
|
-
|
358
|
-
|
359
|
-
|
251
|
+
worker_status = @workers.map do |w|
|
252
|
+
{
|
253
|
+
started_at: utc_iso8601(w.started_at),
|
254
|
+
pid: w.pid,
|
255
|
+
index: w.index,
|
256
|
+
phase: w.phase,
|
257
|
+
booted: w.booted?,
|
258
|
+
last_checkin: utc_iso8601(w.last_checkin),
|
259
|
+
last_status: w.last_status,
|
260
|
+
}
|
261
|
+
end
|
262
|
+
|
263
|
+
{
|
264
|
+
started_at: utc_iso8601(@started_at),
|
265
|
+
workers: @workers.size,
|
266
|
+
phase: @phase,
|
267
|
+
booted_workers: worker_status.count { |w| w[:booted] },
|
268
|
+
old_workers: old_worker_count,
|
269
|
+
worker_status: worker_status,
|
270
|
+
}.merge(super)
|
360
271
|
end
|
361
272
|
|
362
273
|
def preload?
|
363
274
|
@options[:preload_app]
|
364
275
|
end
|
365
276
|
|
277
|
+
# @version 5.0.0
|
278
|
+
def fork_worker!
|
279
|
+
if (worker = @workers.find { |w| w.index == 0 })
|
280
|
+
worker.phase += 1
|
281
|
+
end
|
282
|
+
phased_restart(true)
|
283
|
+
end
|
284
|
+
|
366
285
|
# We do this in a separate method to keep the lambda scope
|
367
286
|
# of the signals handlers as small as possible.
|
368
287
|
def setup_signals
|
288
|
+
if @options[:fork_worker]
|
289
|
+
Signal.trap "SIGURG" do
|
290
|
+
fork_worker!
|
291
|
+
end
|
292
|
+
|
293
|
+
# Auto-fork after the specified number of requests.
|
294
|
+
if (fork_requests = @options[:fork_worker].to_i) > 0
|
295
|
+
@events.register(:ping!) do |w|
|
296
|
+
fork_worker! if w.index == 0 &&
|
297
|
+
w.phase == 0 &&
|
298
|
+
w.last_status[:requests_count] >= fork_requests
|
299
|
+
end
|
300
|
+
end
|
301
|
+
end
|
302
|
+
|
369
303
|
Signal.trap "SIGCHLD" do
|
370
304
|
wakeup!
|
371
305
|
end
|
@@ -390,10 +324,13 @@ module Puma
|
|
390
324
|
log "Early termination of worker"
|
391
325
|
exit! 0
|
392
326
|
else
|
327
|
+
@launcher.close_binder_listeners
|
328
|
+
|
393
329
|
stop_workers
|
394
330
|
stop
|
395
|
-
|
396
|
-
raise
|
331
|
+
@events.fire_on_stopped!
|
332
|
+
raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm]
|
333
|
+
exit 0 # Clean exit, workers were stopped
|
397
334
|
end
|
398
335
|
end
|
399
336
|
end
|
@@ -403,15 +340,25 @@ module Puma
|
|
403
340
|
|
404
341
|
output_header "cluster"
|
405
342
|
|
406
|
-
|
407
|
-
|
408
|
-
before = Thread.list
|
343
|
+
# This is aligned with the output from Runner, see Runner#output_header
|
344
|
+
log "* Workers: #{@options[:workers]}"
|
409
345
|
|
410
346
|
if preload?
|
347
|
+
# Threads explicitly marked as fork safe will be ignored. Used in Rails,
|
348
|
+
# but may be used by anyone. Note that we need to explicit
|
349
|
+
# Process::Waiter check here because there's a bug in Ruby 2.6 and below
|
350
|
+
# where calling thread_variable_get on a Process::Waiter will segfault.
|
351
|
+
# We can drop that clause once those versions of Ruby are no longer
|
352
|
+
# supported.
|
353
|
+
fork_safe = ->(t) { !t.is_a?(Process::Waiter) && t.thread_variable_get(:fork_safe) }
|
354
|
+
|
355
|
+
before = Thread.list.reject(&fork_safe)
|
356
|
+
|
357
|
+
log "* Restarts: (\u2714) hot (\u2716) phased"
|
411
358
|
log "* Preloading application"
|
412
359
|
load_and_bind
|
413
360
|
|
414
|
-
after = Thread.list
|
361
|
+
after = Thread.list.reject(&fork_safe)
|
415
362
|
|
416
363
|
if after.size > before.size
|
417
364
|
threads = (after - before)
|
@@ -425,14 +372,14 @@ module Puma
|
|
425
372
|
end
|
426
373
|
end
|
427
374
|
else
|
428
|
-
log "*
|
375
|
+
log "* Restarts: (\u2714) hot (\u2714) phased"
|
429
376
|
|
430
|
-
unless @
|
377
|
+
unless @config.app_configured?
|
431
378
|
error "No application configured, nothing to run"
|
432
379
|
exit 1
|
433
380
|
end
|
434
381
|
|
435
|
-
@launcher.binder.parse @options[:binds]
|
382
|
+
@launcher.binder.parse @options[:binds]
|
436
383
|
end
|
437
384
|
|
438
385
|
read, @wakeup = Puma::Util.pipe
|
@@ -446,12 +393,13 @@ module Puma
|
|
446
393
|
#
|
447
394
|
@check_pipe, @suicide_pipe = Puma::Util.pipe
|
448
395
|
|
449
|
-
|
450
|
-
|
451
|
-
|
452
|
-
|
453
|
-
|
454
|
-
|
396
|
+
# Separate pipe used by worker 0 to receive commands to
|
397
|
+
# fork new worker processes.
|
398
|
+
@fork_pipe, @fork_writer = Puma::Util.pipe
|
399
|
+
|
400
|
+
log "Use Ctrl-C to stop"
|
401
|
+
|
402
|
+
single_worker_warning
|
455
403
|
|
456
404
|
redirect_io
|
457
405
|
|
@@ -463,7 +411,7 @@ module Puma
|
|
463
411
|
|
464
412
|
@master_read, @worker_write = read, @wakeup
|
465
413
|
|
466
|
-
@
|
414
|
+
@config.run_hooks(:before_fork, nil, @log_writer)
|
467
415
|
|
468
416
|
spawn_workers
|
469
417
|
|
@@ -471,48 +419,67 @@ module Puma
|
|
471
419
|
stop
|
472
420
|
end
|
473
421
|
|
474
|
-
@launcher.events.fire_on_booted!
|
475
|
-
|
476
422
|
begin
|
477
|
-
|
423
|
+
booted = false
|
424
|
+
in_phased_restart = false
|
425
|
+
workers_not_booted = @options[:workers]
|
478
426
|
|
479
427
|
while @status == :run
|
480
428
|
begin
|
481
429
|
if @phased_restart
|
482
430
|
start_phased_restart
|
483
431
|
@phased_restart = false
|
432
|
+
in_phased_restart = true
|
433
|
+
workers_not_booted = @options[:workers]
|
484
434
|
end
|
485
435
|
|
486
|
-
check_workers
|
436
|
+
check_workers
|
487
437
|
|
488
|
-
|
489
|
-
|
490
|
-
res = IO.select([read], nil, nil, WORKER_CHECK_INTERVAL)
|
491
|
-
|
492
|
-
if res
|
438
|
+
if read.wait_readable([0, @next_check - Time.now].max)
|
493
439
|
req = read.read_nonblock(1)
|
494
440
|
|
441
|
+
@next_check = Time.now if req == "!"
|
495
442
|
next if !req || req == "!"
|
496
443
|
|
497
444
|
result = read.gets
|
498
445
|
pid = result.to_i
|
499
446
|
|
447
|
+
if req == "b" || req == "f"
|
448
|
+
pid, idx = result.split(':').map(&:to_i)
|
449
|
+
w = @workers.find {|x| x.index == idx}
|
450
|
+
w.pid = pid if w.pid.nil?
|
451
|
+
end
|
452
|
+
|
500
453
|
if w = @workers.find { |x| x.pid == pid }
|
501
454
|
case req
|
502
455
|
when "b"
|
503
456
|
w.boot!
|
504
|
-
log "- Worker #{w.index} (
|
505
|
-
|
457
|
+
log "- Worker #{w.index} (PID: #{pid}) booted in #{w.uptime.round(2)}s, phase: #{w.phase}"
|
458
|
+
@next_check = Time.now
|
459
|
+
workers_not_booted -= 1
|
460
|
+
when "e"
|
461
|
+
# external term, see worker method, Signal.trap "SIGTERM"
|
462
|
+
w.term!
|
506
463
|
when "t"
|
507
|
-
w.
|
508
|
-
force_check = true
|
464
|
+
w.term unless w.term?
|
509
465
|
when "p"
|
510
466
|
w.ping!(result.sub(/^\d+/,'').chomp)
|
467
|
+
@events.fire(:ping!, w)
|
468
|
+
if !booted && @workers.none? {|worker| worker.last_status.empty?}
|
469
|
+
@events.fire_on_booted!
|
470
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
471
|
+
booted = true
|
472
|
+
end
|
511
473
|
end
|
512
474
|
else
|
513
475
|
log "! Out-of-sync worker list, no #{pid} worker"
|
514
476
|
end
|
515
477
|
end
|
478
|
+
if in_phased_restart && workers_not_booted.zero?
|
479
|
+
@events.fire_on_booted!
|
480
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
481
|
+
in_phased_restart = false
|
482
|
+
end
|
516
483
|
|
517
484
|
rescue Interrupt
|
518
485
|
@status = :stop
|
@@ -527,5 +494,56 @@ module Puma
|
|
527
494
|
@wakeup.close
|
528
495
|
end
|
529
496
|
end
|
497
|
+
|
498
|
+
private
|
499
|
+
|
500
|
+
def single_worker_warning
|
501
|
+
return if @options[:workers] != 1 || @options[:silence_single_worker_warning]
|
502
|
+
|
503
|
+
log "! WARNING: Detected running cluster mode with 1 worker."
|
504
|
+
log "! Running Puma in cluster mode with a single worker is often a misconfiguration."
|
505
|
+
log "! Consider running Puma in single-mode (workers = 0) in order to reduce memory overhead."
|
506
|
+
log "! Set the `silence_single_worker_warning` option to silence this warning message."
|
507
|
+
end
|
508
|
+
|
509
|
+
# loops thru @workers, removing workers that exited, and calling
|
510
|
+
# `#term` if needed
|
511
|
+
def wait_workers
|
512
|
+
@workers.reject! do |w|
|
513
|
+
next false if w.pid.nil?
|
514
|
+
begin
|
515
|
+
if Process.wait(w.pid, Process::WNOHANG)
|
516
|
+
true
|
517
|
+
else
|
518
|
+
w.term if w.term?
|
519
|
+
nil
|
520
|
+
end
|
521
|
+
rescue Errno::ECHILD
|
522
|
+
begin
|
523
|
+
Process.kill(0, w.pid)
|
524
|
+
# child still alive but has another parent (e.g., using fork_worker)
|
525
|
+
w.term if w.term?
|
526
|
+
false
|
527
|
+
rescue Errno::ESRCH, Errno::EPERM
|
528
|
+
true # child is already terminated
|
529
|
+
end
|
530
|
+
end
|
531
|
+
end
|
532
|
+
end
|
533
|
+
|
534
|
+
# @version 5.0.0
|
535
|
+
def timeout_workers
|
536
|
+
@workers.each do |w|
|
537
|
+
if !w.term? && w.ping_timeout <= Time.now
|
538
|
+
details = if w.booted?
|
539
|
+
"(worker failed to check in within #{@options[:worker_timeout]} seconds)"
|
540
|
+
else
|
541
|
+
"(worker failed to boot within #{@options[:worker_boot_timeout]} seconds)"
|
542
|
+
end
|
543
|
+
log "! Terminating timed out worker #{details}: #{w.pid}"
|
544
|
+
w.kill
|
545
|
+
end
|
546
|
+
end
|
547
|
+
end
|
530
548
|
end
|
531
549
|
end
|