puma 3.11.1 → 6.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +5 -5
- data/History.md +2092 -422
- data/LICENSE +23 -20
- data/README.md +301 -69
- data/bin/puma-wild +3 -9
- data/docs/architecture.md +59 -21
- data/docs/compile_options.md +55 -0
- data/docs/deployment.md +69 -58
- data/docs/fork_worker.md +41 -0
- data/docs/java_options.md +54 -0
- data/docs/jungle/README.md +9 -0
- data/docs/jungle/rc.d/README.md +74 -0
- data/docs/jungle/rc.d/puma +61 -0
- data/docs/jungle/rc.d/puma.conf +10 -0
- data/docs/kubernetes.md +78 -0
- data/docs/nginx.md +2 -2
- data/docs/plugins.md +26 -12
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +48 -22
- data/docs/signals.md +13 -11
- data/docs/stats.md +147 -0
- data/docs/systemd.md +108 -117
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +2 -2
- data/ext/puma_http11/ext_help.h +1 -1
- data/ext/puma_http11/extconf.rb +68 -3
- data/ext/puma_http11/http11_parser.c +106 -118
- data/ext/puma_http11/http11_parser.h +2 -2
- data/ext/puma_http11/http11_parser.java.rl +22 -38
- data/ext/puma_http11/http11_parser.rl +6 -4
- data/ext/puma_http11/http11_parser_common.rl +6 -6
- data/ext/puma_http11/mini_ssl.c +474 -94
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +136 -121
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +84 -99
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +251 -88
- data/ext/puma_http11/puma_http11.c +53 -58
- data/lib/puma/app/status.rb +71 -49
- data/lib/puma/binder.rb +257 -151
- data/lib/puma/cli.rb +61 -38
- data/lib/puma/client.rb +464 -224
- data/lib/puma/cluster/worker.rb +183 -0
- data/lib/puma/cluster/worker_handle.rb +96 -0
- data/lib/puma/cluster.rb +343 -239
- data/lib/puma/commonlogger.rb +23 -14
- data/lib/puma/configuration.rb +144 -96
- data/lib/puma/const.rb +194 -115
- data/lib/puma/control_cli.rb +135 -81
- data/lib/puma/detect.rb +34 -2
- data/lib/puma/dsl.rb +1092 -153
- data/lib/puma/error_logger.rb +113 -0
- data/lib/puma/events.rb +17 -111
- data/lib/puma/io_buffer.rb +44 -5
- data/lib/puma/jruby_restart.rb +2 -73
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +205 -138
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +96 -0
- data/lib/puma/minissl.rb +279 -70
- data/lib/puma/null_io.rb +61 -2
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +3 -1
- data/lib/puma/plugin.rb +9 -13
- data/lib/puma/rack/builder.rb +10 -11
- data/lib/puma/rack/urlmap.rb +3 -1
- data/lib/puma/rack_default.rb +21 -4
- data/lib/puma/reactor.rb +97 -185
- data/lib/puma/request.rb +688 -0
- data/lib/puma/runner.rb +114 -69
- data/lib/puma/sd_notify.rb +146 -0
- data/lib/puma/server.rb +409 -704
- data/lib/puma/single.rb +29 -72
- data/lib/puma/state_file.rb +48 -9
- data/lib/puma/thread_pool.rb +234 -93
- data/lib/puma/util.rb +23 -10
- data/lib/puma.rb +68 -5
- data/lib/rack/handler/puma.rb +119 -86
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +0 -1
- metadata +55 -33
- data/ext/puma_http11/io_buffer.c +0 -155
- data/lib/puma/accept_nonblock.rb +0 -23
- data/lib/puma/compat.rb +0 -14
- data/lib/puma/convenient.rb +0 -23
- data/lib/puma/daemon_ext.rb +0 -31
- data/lib/puma/delegation.rb +0 -11
- data/lib/puma/java_io_buffer.rb +0 -45
- data/lib/puma/rack/backports/uri/common_193.rb +0 -33
- data/lib/puma/tcp_logger.rb +0 -39
- data/tools/jungle/README.md +0 -13
- data/tools/jungle/init.d/README.md +0 -59
- data/tools/jungle/init.d/puma +0 -421
- data/tools/jungle/init.d/run-puma +0 -18
- data/tools/jungle/upstart/README.md +0 -61
- data/tools/jungle/upstart/puma-manager.conf +0 -31
- data/tools/jungle/upstart/puma.conf +0 -69
data/lib/puma/cluster.rb
CHANGED
@@ -1,36 +1,51 @@
|
|
1
|
-
|
2
|
-
require 'puma/util'
|
3
|
-
require 'puma/plugin'
|
1
|
+
# frozen_string_literal: true
|
4
2
|
|
5
|
-
|
3
|
+
require_relative 'runner'
|
4
|
+
require_relative 'util'
|
5
|
+
require_relative 'plugin'
|
6
|
+
require_relative 'cluster/worker_handle'
|
7
|
+
require_relative 'cluster/worker'
|
6
8
|
|
7
9
|
module Puma
|
10
|
+
# This class is instantiated by the `Puma::Launcher` and used
|
11
|
+
# to boot and serve a Ruby application when puma "workers" are needed
|
12
|
+
# i.e. when using multi-processes. For example `$ puma -w 5`
|
13
|
+
#
|
14
|
+
# An instance of this class will spawn the number of processes passed in
|
15
|
+
# via the `spawn_workers` method call. Each worker will have it's own
|
16
|
+
# instance of a `Puma::Server`.
|
8
17
|
class Cluster < Runner
|
9
|
-
|
10
|
-
|
11
|
-
def initialize(cli, events)
|
12
|
-
super cli, events
|
18
|
+
def initialize(launcher)
|
19
|
+
super(launcher)
|
13
20
|
|
14
21
|
@phase = 0
|
15
22
|
@workers = []
|
16
|
-
@next_check =
|
23
|
+
@next_check = Time.now
|
17
24
|
|
18
|
-
@phased_state = :idle
|
19
25
|
@phased_restart = false
|
20
26
|
end
|
21
27
|
|
28
|
+
# Returns the list of cluster worker handles.
|
29
|
+
# @return [Array<Puma::Cluster::WorkerHandle>]
|
30
|
+
attr_reader :workers
|
31
|
+
|
22
32
|
def stop_workers
|
23
33
|
log "- Gracefully shutting down workers..."
|
24
34
|
@workers.each { |x| x.term }
|
25
35
|
|
26
36
|
begin
|
27
|
-
|
37
|
+
loop do
|
38
|
+
wait_workers
|
39
|
+
break if @workers.reject {|w| w.pid.nil?}.empty?
|
40
|
+
sleep 0.2
|
41
|
+
end
|
28
42
|
rescue Interrupt
|
29
43
|
log "! Cancelled waiting for workers"
|
30
44
|
end
|
31
45
|
end
|
32
46
|
|
33
47
|
def start_phased_restart
|
48
|
+
@events.fire_on_restart!
|
34
49
|
@phase += 1
|
35
50
|
log "- Starting phased worker restart, phase: #{@phase}"
|
36
51
|
|
@@ -47,155 +62,117 @@ module Puma
|
|
47
62
|
@workers.each { |x| x.hup }
|
48
63
|
end
|
49
64
|
|
50
|
-
class Worker
|
51
|
-
def initialize(idx, pid, phase, options)
|
52
|
-
@index = idx
|
53
|
-
@pid = pid
|
54
|
-
@phase = phase
|
55
|
-
@stage = :started
|
56
|
-
@signal = "TERM"
|
57
|
-
@options = options
|
58
|
-
@first_term_sent = nil
|
59
|
-
@last_checkin = Time.now
|
60
|
-
@last_status = '{}'
|
61
|
-
@dead = false
|
62
|
-
end
|
63
|
-
|
64
|
-
attr_reader :index, :pid, :phase, :signal, :last_checkin, :last_status
|
65
|
-
|
66
|
-
def booted?
|
67
|
-
@stage == :booted
|
68
|
-
end
|
69
|
-
|
70
|
-
def boot!
|
71
|
-
@last_checkin = Time.now
|
72
|
-
@stage = :booted
|
73
|
-
end
|
74
|
-
|
75
|
-
def dead?
|
76
|
-
@dead
|
77
|
-
end
|
78
|
-
|
79
|
-
def dead!
|
80
|
-
@dead = true
|
81
|
-
end
|
82
|
-
|
83
|
-
def ping!(status)
|
84
|
-
@last_checkin = Time.now
|
85
|
-
@last_status = status
|
86
|
-
end
|
87
|
-
|
88
|
-
def ping_timeout?(which)
|
89
|
-
Time.now - @last_checkin > which
|
90
|
-
end
|
91
|
-
|
92
|
-
def term
|
93
|
-
begin
|
94
|
-
if @first_term_sent && (Time.now - @first_term_sent) > @options[:worker_shutdown_timeout]
|
95
|
-
@signal = "KILL"
|
96
|
-
else
|
97
|
-
@first_term_sent ||= Time.now
|
98
|
-
end
|
99
|
-
|
100
|
-
Process.kill @signal, @pid
|
101
|
-
rescue Errno::ESRCH
|
102
|
-
end
|
103
|
-
end
|
104
|
-
|
105
|
-
def kill
|
106
|
-
Process.kill "KILL", @pid
|
107
|
-
rescue Errno::ESRCH
|
108
|
-
end
|
109
|
-
|
110
|
-
def hup
|
111
|
-
Process.kill "HUP", @pid
|
112
|
-
rescue Errno::ESRCH
|
113
|
-
end
|
114
|
-
end
|
115
|
-
|
116
65
|
def spawn_workers
|
117
66
|
diff = @options[:workers] - @workers.size
|
118
67
|
return if diff < 1
|
119
68
|
|
120
69
|
master = Process.pid
|
70
|
+
if @options[:fork_worker]
|
71
|
+
@fork_writer << "-1\n"
|
72
|
+
end
|
121
73
|
|
122
74
|
diff.times do
|
123
75
|
idx = next_worker_index
|
124
|
-
@launcher.config.run_hooks :before_worker_fork, idx
|
125
76
|
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
77
|
+
if @options[:fork_worker] && idx != 0
|
78
|
+
@fork_writer << "#{idx}\n"
|
79
|
+
pid = nil
|
80
|
+
else
|
81
|
+
pid = spawn_worker(idx, master)
|
131
82
|
end
|
132
83
|
|
133
84
|
debug "Spawned worker: #{pid}"
|
134
|
-
@workers <<
|
85
|
+
@workers << WorkerHandle.new(idx, pid, @phase, @options)
|
86
|
+
end
|
135
87
|
|
136
|
-
|
88
|
+
if @options[:fork_worker] && all_workers_in_phase?
|
89
|
+
@fork_writer << "0\n"
|
90
|
+
|
91
|
+
if worker_at(0).phase > 0
|
92
|
+
@fork_writer << "-2\n"
|
93
|
+
end
|
137
94
|
end
|
95
|
+
end
|
96
|
+
|
97
|
+
# @version 5.0.0
|
98
|
+
def spawn_worker(idx, master)
|
99
|
+
@config.run_hooks(:before_worker_fork, idx, @log_writer)
|
138
100
|
|
139
|
-
|
140
|
-
|
101
|
+
pid = fork { worker(idx, master) }
|
102
|
+
if !pid
|
103
|
+
log "! Complete inability to spawn new workers detected"
|
104
|
+
log "! Seppuku is the only choice."
|
105
|
+
exit! 1
|
141
106
|
end
|
107
|
+
|
108
|
+
@config.run_hooks(:after_worker_fork, idx, @log_writer)
|
109
|
+
pid
|
142
110
|
end
|
143
111
|
|
144
112
|
def cull_workers
|
145
113
|
diff = @workers.size - @options[:workers]
|
146
114
|
return if diff < 1
|
115
|
+
debug "Culling #{diff} workers"
|
147
116
|
|
148
|
-
|
149
|
-
|
150
|
-
workers_to_cull = @workers[-diff,diff]
|
151
|
-
debug "Workers to cull: #{workers_to_cull.inspect}"
|
117
|
+
workers = workers_to_cull(diff)
|
118
|
+
debug "Workers to cull: #{workers.inspect}"
|
152
119
|
|
153
|
-
|
154
|
-
log "- Worker #{worker.index} (
|
120
|
+
workers.each do |worker|
|
121
|
+
log "- Worker #{worker.index} (PID: #{worker.pid}) terminating"
|
155
122
|
worker.term
|
156
123
|
end
|
157
124
|
end
|
158
125
|
|
159
|
-
def
|
160
|
-
|
161
|
-
occupied_positions = @workers.map { |w| w.index }
|
162
|
-
available_positions = all_positions.to_a - occupied_positions
|
163
|
-
available_positions.first
|
164
|
-
end
|
126
|
+
def workers_to_cull(diff)
|
127
|
+
workers = @workers.sort_by(&:started_at)
|
165
128
|
|
166
|
-
|
167
|
-
|
129
|
+
# In fork_worker mode, worker 0 acts as our master process.
|
130
|
+
# We should avoid culling it to preserve copy-on-write memory gains.
|
131
|
+
workers.reject! { |w| w.index == 0 } if @options[:fork_worker]
|
132
|
+
|
133
|
+
workers[cull_start_index(diff), diff]
|
168
134
|
end
|
169
135
|
|
170
|
-
def
|
171
|
-
|
136
|
+
def cull_start_index(diff)
|
137
|
+
case @options[:worker_culling_strategy]
|
138
|
+
when :oldest
|
139
|
+
0
|
140
|
+
else # :youngest
|
141
|
+
-diff
|
142
|
+
end
|
143
|
+
end
|
172
144
|
|
173
|
-
|
145
|
+
# @!attribute [r] next_worker_index
|
146
|
+
def next_worker_index
|
147
|
+
occupied_positions = @workers.map(&:index)
|
148
|
+
idx = 0
|
149
|
+
idx += 1 until !occupied_positions.include?(idx)
|
150
|
+
idx
|
151
|
+
end
|
174
152
|
|
175
|
-
|
153
|
+
def worker_at(idx)
|
154
|
+
@workers.find { |w| w.index == idx }
|
155
|
+
end
|
176
156
|
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
log "! Terminating timed out worker: #{w.pid}"
|
181
|
-
w.kill
|
182
|
-
any = true
|
183
|
-
end
|
184
|
-
end
|
157
|
+
def all_workers_booted?
|
158
|
+
@workers.count { |w| !w.booted? } == 0
|
159
|
+
end
|
185
160
|
|
186
|
-
|
187
|
-
|
188
|
-
|
161
|
+
def all_workers_in_phase?
|
162
|
+
@workers.all? { |w| w.phase == @phase }
|
163
|
+
end
|
189
164
|
|
190
|
-
|
191
|
-
|
192
|
-
|
165
|
+
def all_workers_idle_timed_out?
|
166
|
+
(@workers.map(&:pid) - idle_timed_out_worker_pids).empty?
|
167
|
+
end
|
193
168
|
|
194
|
-
|
195
|
-
|
169
|
+
def check_workers
|
170
|
+
return if @next_check >= Time.now
|
196
171
|
|
197
|
-
@
|
172
|
+
@next_check = Time.now + @options[:worker_check_interval]
|
198
173
|
|
174
|
+
timeout_workers
|
175
|
+
wait_workers
|
199
176
|
cull_workers
|
200
177
|
spawn_workers
|
201
178
|
|
@@ -207,97 +184,40 @@ module Puma
|
|
207
184
|
w = @workers.find { |x| x.phase != @phase }
|
208
185
|
|
209
186
|
if w
|
210
|
-
|
211
|
-
|
212
|
-
|
187
|
+
log "- Stopping #{w.pid} for phased upgrade..."
|
188
|
+
unless w.term?
|
189
|
+
w.term
|
190
|
+
log "- #{w.signal} sent to #{w.pid}..."
|
213
191
|
end
|
214
|
-
|
215
|
-
w.term
|
216
|
-
log "- #{w.signal} sent to #{w.pid}..."
|
217
192
|
end
|
218
193
|
end
|
219
|
-
end
|
220
194
|
|
221
|
-
|
222
|
-
|
195
|
+
t = @workers.reject(&:term?)
|
196
|
+
t.map!(&:ping_timeout)
|
223
197
|
|
224
|
-
|
225
|
-
@wakeup.write "!" unless @wakeup.closed?
|
226
|
-
rescue SystemCallError, IOError
|
227
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
228
|
-
end
|
198
|
+
@next_check = [t.min, @next_check].compact.min
|
229
199
|
end
|
230
200
|
|
231
201
|
def worker(index, master)
|
232
|
-
title = "puma: cluster worker #{index}: #{master}"
|
233
|
-
title += " [#{@options[:tag]}]" if @options[:tag] && !@options[:tag].empty?
|
234
|
-
$0 = title
|
235
|
-
|
236
|
-
Signal.trap "SIGINT", "IGNORE"
|
237
|
-
|
238
202
|
@workers = []
|
203
|
+
|
239
204
|
@master_read.close
|
240
205
|
@suicide_pipe.close
|
206
|
+
@fork_writer.close
|
241
207
|
|
242
|
-
|
243
|
-
|
244
|
-
|
245
|
-
|
208
|
+
pipes = { check_pipe: @check_pipe, worker_write: @worker_write }
|
209
|
+
if @options[:fork_worker]
|
210
|
+
pipes[:fork_pipe] = @fork_pipe
|
211
|
+
pipes[:wakeup] = @wakeup
|
246
212
|
end
|
247
213
|
|
248
|
-
|
249
|
-
|
250
|
-
|
251
|
-
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
end
|
256
|
-
end
|
257
|
-
|
258
|
-
# Invoke any worker boot hooks so they can get
|
259
|
-
# things in shape before booting the app.
|
260
|
-
@launcher.config.run_hooks :before_worker_boot, index
|
261
|
-
|
262
|
-
server = start_server
|
263
|
-
|
264
|
-
Signal.trap "SIGTERM" do
|
265
|
-
server.stop
|
266
|
-
end
|
267
|
-
|
268
|
-
begin
|
269
|
-
@worker_write << "b#{Process.pid}\n"
|
270
|
-
rescue SystemCallError, IOError
|
271
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
272
|
-
STDERR.puts "Master seems to have exited, exiting."
|
273
|
-
return
|
274
|
-
end
|
275
|
-
|
276
|
-
Thread.new(@worker_write) do |io|
|
277
|
-
base_payload = "p#{Process.pid}"
|
278
|
-
|
279
|
-
while true
|
280
|
-
sleep WORKER_CHECK_INTERVAL
|
281
|
-
begin
|
282
|
-
b = server.backlog || 0
|
283
|
-
r = server.running || 0
|
284
|
-
payload = %Q!#{base_payload}{ "backlog":#{b}, "running":#{r} }\n!
|
285
|
-
io << payload
|
286
|
-
rescue IOError
|
287
|
-
Thread.current.purge_interrupt_queue if Thread.current.respond_to? :purge_interrupt_queue
|
288
|
-
break
|
289
|
-
end
|
290
|
-
end
|
291
|
-
end
|
292
|
-
|
293
|
-
server.run.join
|
294
|
-
|
295
|
-
# Invoke any worker shutdown hooks so they can prevent the worker
|
296
|
-
# exiting until any background operations are completed
|
297
|
-
@launcher.config.run_hooks :before_worker_shutdown, index
|
298
|
-
ensure
|
299
|
-
@worker_write << "t#{Process.pid}\n" rescue nil
|
300
|
-
@worker_write.close
|
214
|
+
server = start_server if preload?
|
215
|
+
new_worker = Worker.new index: index,
|
216
|
+
master: master,
|
217
|
+
launcher: @launcher,
|
218
|
+
pipes: pipes,
|
219
|
+
server: server
|
220
|
+
new_worker.run
|
301
221
|
end
|
302
222
|
|
303
223
|
def restart
|
@@ -305,8 +225,8 @@ module Puma
|
|
305
225
|
stop
|
306
226
|
end
|
307
227
|
|
308
|
-
def phased_restart
|
309
|
-
return false if @options[:preload_app]
|
228
|
+
def phased_restart(refork = false)
|
229
|
+
return false if @options[:preload_app] && !refork
|
310
230
|
|
311
231
|
@phased_restart = true
|
312
232
|
wakeup!
|
@@ -322,7 +242,7 @@ module Puma
|
|
322
242
|
def stop_blocked
|
323
243
|
@status = :stop if @status == :run
|
324
244
|
wakeup!
|
325
|
-
@control
|
245
|
+
@control&.stop true
|
326
246
|
Process.waitall
|
327
247
|
end
|
328
248
|
|
@@ -337,20 +257,63 @@ module Puma
|
|
337
257
|
Dir.chdir dir
|
338
258
|
end
|
339
259
|
|
260
|
+
# Inside of a child process, this will return all zeroes, as @workers is only populated in
|
261
|
+
# the master process.
|
262
|
+
# @!attribute [r] stats
|
340
263
|
def stats
|
341
264
|
old_worker_count = @workers.count { |w| w.phase != @phase }
|
342
|
-
|
343
|
-
|
344
|
-
|
265
|
+
worker_status = @workers.map do |w|
|
266
|
+
{
|
267
|
+
started_at: utc_iso8601(w.started_at),
|
268
|
+
pid: w.pid,
|
269
|
+
index: w.index,
|
270
|
+
phase: w.phase,
|
271
|
+
booted: w.booted?,
|
272
|
+
last_checkin: utc_iso8601(w.last_checkin),
|
273
|
+
last_status: w.last_status,
|
274
|
+
}
|
275
|
+
end
|
276
|
+
|
277
|
+
{
|
278
|
+
started_at: utc_iso8601(@started_at),
|
279
|
+
workers: @workers.size,
|
280
|
+
phase: @phase,
|
281
|
+
booted_workers: worker_status.count { |w| w[:booted] },
|
282
|
+
old_workers: old_worker_count,
|
283
|
+
worker_status: worker_status,
|
284
|
+
}.merge(super)
|
345
285
|
end
|
346
286
|
|
347
287
|
def preload?
|
348
288
|
@options[:preload_app]
|
349
289
|
end
|
350
290
|
|
291
|
+
# @version 5.0.0
|
292
|
+
def fork_worker!
|
293
|
+
if (worker = worker_at 0)
|
294
|
+
worker.phase += 1
|
295
|
+
end
|
296
|
+
phased_restart(true)
|
297
|
+
end
|
298
|
+
|
351
299
|
# We do this in a separate method to keep the lambda scope
|
352
300
|
# of the signals handlers as small as possible.
|
353
301
|
def setup_signals
|
302
|
+
if @options[:fork_worker]
|
303
|
+
Signal.trap "SIGURG" do
|
304
|
+
fork_worker!
|
305
|
+
end
|
306
|
+
|
307
|
+
# Auto-fork after the specified number of requests.
|
308
|
+
if (fork_requests = @options[:fork_worker].to_i) > 0
|
309
|
+
@events.register(:ping!) do |w|
|
310
|
+
fork_worker! if w.index == 0 &&
|
311
|
+
w.phase == 0 &&
|
312
|
+
w.last_status[:requests_count] >= fork_requests
|
313
|
+
end
|
314
|
+
end
|
315
|
+
end
|
316
|
+
|
354
317
|
Signal.trap "SIGCHLD" do
|
355
318
|
wakeup!
|
356
319
|
end
|
@@ -375,10 +338,13 @@ module Puma
|
|
375
338
|
log "Early termination of worker"
|
376
339
|
exit! 0
|
377
340
|
else
|
341
|
+
@launcher.close_binder_listeners
|
342
|
+
|
378
343
|
stop_workers
|
379
344
|
stop
|
380
|
-
|
381
|
-
raise
|
345
|
+
@events.fire_on_stopped!
|
346
|
+
raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm]
|
347
|
+
exit 0 # Clean exit, workers were stopped
|
382
348
|
end
|
383
349
|
end
|
384
350
|
end
|
@@ -388,15 +354,25 @@ module Puma
|
|
388
354
|
|
389
355
|
output_header "cluster"
|
390
356
|
|
391
|
-
|
392
|
-
|
393
|
-
before = Thread.list
|
357
|
+
# This is aligned with the output from Runner, see Runner#output_header
|
358
|
+
log "* Workers: #{@options[:workers]}"
|
394
359
|
|
395
360
|
if preload?
|
361
|
+
# Threads explicitly marked as fork safe will be ignored. Used in Rails,
|
362
|
+
# but may be used by anyone. Note that we need to explicit
|
363
|
+
# Process::Waiter check here because there's a bug in Ruby 2.6 and below
|
364
|
+
# where calling thread_variable_get on a Process::Waiter will segfault.
|
365
|
+
# We can drop that clause once those versions of Ruby are no longer
|
366
|
+
# supported.
|
367
|
+
fork_safe = ->(t) { !t.is_a?(Process::Waiter) && t.thread_variable_get(:fork_safe) }
|
368
|
+
|
369
|
+
before = Thread.list.reject(&fork_safe)
|
370
|
+
|
371
|
+
log "* Restarts: (\u2714) hot (\u2716) phased"
|
396
372
|
log "* Preloading application"
|
397
373
|
load_and_bind
|
398
374
|
|
399
|
-
after = Thread.list
|
375
|
+
after = Thread.list.reject(&fork_safe)
|
400
376
|
|
401
377
|
if after.size > before.size
|
402
378
|
threads = (after - before)
|
@@ -410,14 +386,14 @@ module Puma
|
|
410
386
|
end
|
411
387
|
end
|
412
388
|
else
|
413
|
-
log "*
|
389
|
+
log "* Restarts: (\u2714) hot (\u2714) phased"
|
414
390
|
|
415
|
-
unless @
|
391
|
+
unless @config.app_configured?
|
416
392
|
error "No application configured, nothing to run"
|
417
393
|
exit 1
|
418
394
|
end
|
419
395
|
|
420
|
-
@launcher.binder.parse @options[:binds]
|
396
|
+
@launcher.binder.parse @options[:binds]
|
421
397
|
end
|
422
398
|
|
423
399
|
read, @wakeup = Puma::Util.pipe
|
@@ -431,12 +407,13 @@ module Puma
|
|
431
407
|
#
|
432
408
|
@check_pipe, @suicide_pipe = Puma::Util.pipe
|
433
409
|
|
434
|
-
|
435
|
-
|
436
|
-
|
437
|
-
|
438
|
-
|
439
|
-
|
410
|
+
# Separate pipe used by worker 0 to receive commands to
|
411
|
+
# fork new worker processes.
|
412
|
+
@fork_pipe, @fork_writer = Puma::Util.pipe
|
413
|
+
|
414
|
+
log "Use Ctrl-C to stop"
|
415
|
+
|
416
|
+
single_worker_warning
|
440
417
|
|
441
418
|
redirect_io
|
442
419
|
|
@@ -448,7 +425,9 @@ module Puma
|
|
448
425
|
|
449
426
|
@master_read, @worker_write = read, @wakeup
|
450
427
|
|
451
|
-
@
|
428
|
+
@options[:worker_write] = @worker_write
|
429
|
+
|
430
|
+
@config.run_hooks(:before_fork, nil, @log_writer)
|
452
431
|
|
453
432
|
spawn_workers
|
454
433
|
|
@@ -456,49 +435,89 @@ module Puma
|
|
456
435
|
stop
|
457
436
|
end
|
458
437
|
|
459
|
-
@launcher.events.fire_on_booted!
|
460
|
-
|
461
438
|
begin
|
462
|
-
|
439
|
+
booted = false
|
440
|
+
in_phased_restart = false
|
441
|
+
workers_not_booted = @options[:workers]
|
463
442
|
|
464
443
|
while @status == :run
|
465
444
|
begin
|
445
|
+
if @options[:idle_timeout] && all_workers_idle_timed_out?
|
446
|
+
log "- All workers reached idle timeout"
|
447
|
+
break
|
448
|
+
end
|
449
|
+
|
466
450
|
if @phased_restart
|
467
451
|
start_phased_restart
|
468
452
|
@phased_restart = false
|
453
|
+
in_phased_restart = true
|
454
|
+
workers_not_booted = @options[:workers]
|
469
455
|
end
|
470
456
|
|
471
|
-
check_workers
|
472
|
-
|
473
|
-
force_check = false
|
474
|
-
|
475
|
-
res = IO.select([read], nil, nil, WORKER_CHECK_INTERVAL)
|
457
|
+
check_workers
|
476
458
|
|
477
|
-
if
|
459
|
+
if read.wait_readable([0, @next_check - Time.now].max)
|
478
460
|
req = read.read_nonblock(1)
|
461
|
+
next unless req
|
479
462
|
|
480
|
-
|
463
|
+
if req == PIPE_WAKEUP
|
464
|
+
@next_check = Time.now
|
465
|
+
next
|
466
|
+
end
|
481
467
|
|
482
468
|
result = read.gets
|
483
469
|
pid = result.to_i
|
484
470
|
|
471
|
+
if req == PIPE_BOOT || req == PIPE_FORK
|
472
|
+
pid, idx = result.split(':').map(&:to_i)
|
473
|
+
w = worker_at idx
|
474
|
+
w.pid = pid if w.pid.nil?
|
475
|
+
end
|
476
|
+
|
485
477
|
if w = @workers.find { |x| x.pid == pid }
|
486
478
|
case req
|
487
|
-
when
|
479
|
+
when PIPE_BOOT
|
488
480
|
w.boot!
|
489
|
-
log "- Worker #{w.index} (
|
490
|
-
|
491
|
-
|
492
|
-
|
493
|
-
|
494
|
-
|
495
|
-
|
481
|
+
log "- Worker #{w.index} (PID: #{pid}) booted in #{w.uptime.round(2)}s, phase: #{w.phase}"
|
482
|
+
@next_check = Time.now
|
483
|
+
workers_not_booted -= 1
|
484
|
+
when PIPE_EXTERNAL_TERM
|
485
|
+
# external term, see worker method, Signal.trap "SIGTERM"
|
486
|
+
w.term!
|
487
|
+
when PIPE_TERM
|
488
|
+
w.term unless w.term?
|
489
|
+
when PIPE_PING
|
490
|
+
status = result.sub(/^\d+/,'').chomp
|
491
|
+
w.ping!(status)
|
492
|
+
@events.fire(:ping!, w)
|
493
|
+
|
494
|
+
if in_phased_restart && @options[:fork_worker] && workers_not_booted.positive? && w0 = worker_at(0)
|
495
|
+
w0.ping!(status)
|
496
|
+
@events.fire(:ping!, w0)
|
497
|
+
end
|
498
|
+
|
499
|
+
if !booted && @workers.none? {|worker| worker.last_status.empty?}
|
500
|
+
@events.fire_on_booted!
|
501
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
502
|
+
booted = true
|
503
|
+
end
|
504
|
+
when PIPE_IDLE
|
505
|
+
if idle_workers[pid]
|
506
|
+
idle_workers.delete pid
|
507
|
+
else
|
508
|
+
idle_workers[pid] = true
|
509
|
+
end
|
496
510
|
end
|
497
511
|
else
|
498
512
|
log "! Out-of-sync worker list, no #{pid} worker"
|
499
513
|
end
|
500
514
|
end
|
501
515
|
|
516
|
+
if in_phased_restart && workers_not_booted.zero?
|
517
|
+
@events.fire_on_booted!
|
518
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
519
|
+
in_phased_restart = false
|
520
|
+
end
|
502
521
|
rescue Interrupt
|
503
522
|
@status = :stop
|
504
523
|
end
|
@@ -512,5 +531,90 @@ module Puma
|
|
512
531
|
@wakeup.close
|
513
532
|
end
|
514
533
|
end
|
534
|
+
|
535
|
+
private
|
536
|
+
|
537
|
+
def single_worker_warning
|
538
|
+
return if @options[:workers] != 1 || @options[:silence_single_worker_warning]
|
539
|
+
|
540
|
+
log "! WARNING: Detected running cluster mode with 1 worker."
|
541
|
+
log "! Running Puma in cluster mode with a single worker is often a misconfiguration."
|
542
|
+
log "! Consider running Puma in single-mode (workers = 0) in order to reduce memory overhead."
|
543
|
+
log "! Set the `silence_single_worker_warning` option to silence this warning message."
|
544
|
+
end
|
545
|
+
|
546
|
+
# loops thru @workers, removing workers that exited, and calling
|
547
|
+
# `#term` if needed
|
548
|
+
def wait_workers
|
549
|
+
# Reap all children, known workers or otherwise.
|
550
|
+
# If puma has PID 1, as it's common in containerized environments,
|
551
|
+
# then it's responsible for reaping orphaned processes, so we must reap
|
552
|
+
# all our dead children, regardless of whether they are workers we spawned
|
553
|
+
# or some reattached processes.
|
554
|
+
reaped_children = {}
|
555
|
+
loop do
|
556
|
+
begin
|
557
|
+
pid, status = Process.wait2(-1, Process::WNOHANG)
|
558
|
+
break unless pid
|
559
|
+
reaped_children[pid] = status
|
560
|
+
rescue Errno::ECHILD
|
561
|
+
break
|
562
|
+
end
|
563
|
+
end
|
564
|
+
|
565
|
+
@workers.reject! do |w|
|
566
|
+
next false if w.pid.nil?
|
567
|
+
begin
|
568
|
+
# We may need to check the PID individually because:
|
569
|
+
# 1. From Ruby versions 2.6 to 3.2, `Process.detach` can prevent or delay
|
570
|
+
# `Process.wait2(-1)` from detecting a terminated process: https://bugs.ruby-lang.org/issues/19837.
|
571
|
+
# 2. When `fork_worker` is enabled, some worker may not be direct children,
|
572
|
+
# but grand children. Because of this they won't be reaped by `Process.wait2(-1)`.
|
573
|
+
if reaped_children.delete(w.pid) || Process.wait(w.pid, Process::WNOHANG)
|
574
|
+
true
|
575
|
+
else
|
576
|
+
w.term if w.term?
|
577
|
+
nil
|
578
|
+
end
|
579
|
+
rescue Errno::ECHILD
|
580
|
+
begin
|
581
|
+
Process.kill(0, w.pid)
|
582
|
+
# child still alive but has another parent (e.g., using fork_worker)
|
583
|
+
w.term if w.term?
|
584
|
+
false
|
585
|
+
rescue Errno::ESRCH, Errno::EPERM
|
586
|
+
true # child is already terminated
|
587
|
+
end
|
588
|
+
end
|
589
|
+
end
|
590
|
+
|
591
|
+
# Log unknown children
|
592
|
+
reaped_children.each do |pid, status|
|
593
|
+
log "! reaped unknown child process pid=#{pid} status=#{status}"
|
594
|
+
end
|
595
|
+
end
|
596
|
+
|
597
|
+
# @version 5.0.0
|
598
|
+
def timeout_workers
|
599
|
+
@workers.each do |w|
|
600
|
+
if !w.term? && w.ping_timeout <= Time.now
|
601
|
+
details = if w.booted?
|
602
|
+
"(Worker #{w.index} failed to check in within #{@options[:worker_timeout]} seconds)"
|
603
|
+
else
|
604
|
+
"(Worker #{w.index} failed to boot within #{@options[:worker_boot_timeout]} seconds)"
|
605
|
+
end
|
606
|
+
log "! Terminating timed out worker #{details}: #{w.pid}"
|
607
|
+
w.kill
|
608
|
+
end
|
609
|
+
end
|
610
|
+
end
|
611
|
+
|
612
|
+
def idle_timed_out_worker_pids
|
613
|
+
idle_workers.keys
|
614
|
+
end
|
615
|
+
|
616
|
+
def idle_workers
|
617
|
+
@idle_workers ||= {}
|
618
|
+
end
|
515
619
|
end
|
516
620
|
end
|