jun-puma 1.0.0-java
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +7 -0
- data/History.md +2897 -0
- data/LICENSE +29 -0
- data/README.md +475 -0
- data/bin/puma +10 -0
- data/bin/puma-wild +25 -0
- data/bin/pumactl +12 -0
- data/docs/architecture.md +74 -0
- data/docs/compile_options.md +55 -0
- data/docs/deployment.md +102 -0
- data/docs/fork_worker.md +35 -0
- data/docs/images/puma-connection-flow-no-reactor.png +0 -0
- data/docs/images/puma-connection-flow.png +0 -0
- data/docs/images/puma-general-arch.png +0 -0
- data/docs/jungle/README.md +9 -0
- data/docs/jungle/rc.d/README.md +74 -0
- data/docs/jungle/rc.d/puma +61 -0
- data/docs/jungle/rc.d/puma.conf +10 -0
- data/docs/kubernetes.md +78 -0
- data/docs/nginx.md +80 -0
- data/docs/plugins.md +38 -0
- data/docs/rails_dev_mode.md +28 -0
- data/docs/restart.md +65 -0
- data/docs/signals.md +98 -0
- data/docs/stats.md +142 -0
- data/docs/systemd.md +253 -0
- data/docs/testing_benchmarks_local_files.md +150 -0
- data/docs/testing_test_rackup_ci_files.md +36 -0
- data/ext/puma_http11/PumaHttp11Service.java +17 -0
- data/ext/puma_http11/ext_help.h +15 -0
- data/ext/puma_http11/extconf.rb +80 -0
- data/ext/puma_http11/http11_parser.c +1057 -0
- data/ext/puma_http11/http11_parser.h +65 -0
- data/ext/puma_http11/http11_parser.java.rl +145 -0
- data/ext/puma_http11/http11_parser.rl +149 -0
- data/ext/puma_http11/http11_parser_common.rl +54 -0
- data/ext/puma_http11/mini_ssl.c +842 -0
- data/ext/puma_http11/no_ssl/PumaHttp11Service.java +15 -0
- data/ext/puma_http11/org/jruby/puma/Http11.java +228 -0
- data/ext/puma_http11/org/jruby/puma/Http11Parser.java +455 -0
- data/ext/puma_http11/org/jruby/puma/MiniSSL.java +509 -0
- data/ext/puma_http11/puma_http11.c +495 -0
- data/lib/puma/app/status.rb +96 -0
- data/lib/puma/binder.rb +502 -0
- data/lib/puma/cli.rb +247 -0
- data/lib/puma/client.rb +682 -0
- data/lib/puma/cluster/worker.rb +180 -0
- data/lib/puma/cluster/worker_handle.rb +96 -0
- data/lib/puma/cluster.rb +616 -0
- data/lib/puma/commonlogger.rb +115 -0
- data/lib/puma/configuration.rb +390 -0
- data/lib/puma/const.rb +307 -0
- data/lib/puma/control_cli.rb +316 -0
- data/lib/puma/detect.rb +45 -0
- data/lib/puma/dsl.rb +1425 -0
- data/lib/puma/error_logger.rb +113 -0
- data/lib/puma/events.rb +57 -0
- data/lib/puma/io_buffer.rb +46 -0
- data/lib/puma/jruby_restart.rb +11 -0
- data/lib/puma/json_serialization.rb +96 -0
- data/lib/puma/launcher/bundle_pruner.rb +104 -0
- data/lib/puma/launcher.rb +488 -0
- data/lib/puma/log_writer.rb +147 -0
- data/lib/puma/minissl/context_builder.rb +96 -0
- data/lib/puma/minissl.rb +459 -0
- data/lib/puma/null_io.rb +84 -0
- data/lib/puma/plugin/systemd.rb +90 -0
- data/lib/puma/plugin/tmp_restart.rb +36 -0
- data/lib/puma/plugin.rb +111 -0
- data/lib/puma/puma_http11.jar +0 -0
- data/lib/puma/rack/builder.rb +297 -0
- data/lib/puma/rack/urlmap.rb +93 -0
- data/lib/puma/rack_default.rb +24 -0
- data/lib/puma/reactor.rb +125 -0
- data/lib/puma/request.rb +688 -0
- data/lib/puma/runner.rb +213 -0
- data/lib/puma/sd_notify.rb +149 -0
- data/lib/puma/server.rb +680 -0
- data/lib/puma/single.rb +69 -0
- data/lib/puma/state_file.rb +68 -0
- data/lib/puma/thread_pool.rb +434 -0
- data/lib/puma/util.rb +141 -0
- data/lib/puma.rb +78 -0
- data/lib/rack/handler/puma.rb +144 -0
- data/tools/Dockerfile +16 -0
- data/tools/trickletest.rb +44 -0
- metadata +153 -0
data/lib/puma/cluster.rb
ADDED
@@ -0,0 +1,616 @@
|
|
1
|
+
# frozen_string_literal: true
|
2
|
+
|
3
|
+
require_relative 'runner'
|
4
|
+
require_relative 'util'
|
5
|
+
require_relative 'plugin'
|
6
|
+
require_relative 'cluster/worker_handle'
|
7
|
+
require_relative 'cluster/worker'
|
8
|
+
|
9
|
+
module Puma
|
10
|
+
# This class is instantiated by the `Puma::Launcher` and used
|
11
|
+
# to boot and serve a Ruby application when puma "workers" are needed
|
12
|
+
# i.e. when using multi-processes. For example `$ puma -w 5`
|
13
|
+
#
|
14
|
+
# An instance of this class will spawn the number of processes passed in
|
15
|
+
# via the `spawn_workers` method call. Each worker will have it's own
|
16
|
+
# instance of a `Puma::Server`.
|
17
|
+
class Cluster < Runner
|
18
|
+
def initialize(launcher)
|
19
|
+
super(launcher)
|
20
|
+
|
21
|
+
@phase = 0
|
22
|
+
@workers = []
|
23
|
+
@next_check = Time.now
|
24
|
+
|
25
|
+
@phased_restart = false
|
26
|
+
end
|
27
|
+
|
28
|
+
# Returns the list of cluster worker handles.
|
29
|
+
# @return [Array<Puma::Cluster::WorkerHandle>]
|
30
|
+
attr_reader :workers
|
31
|
+
|
32
|
+
def stop_workers
|
33
|
+
log "- Gracefully shutting down workers..."
|
34
|
+
@workers.each { |x| x.term }
|
35
|
+
|
36
|
+
begin
|
37
|
+
loop do
|
38
|
+
wait_workers
|
39
|
+
break if @workers.reject {|w| w.pid.nil?}.empty?
|
40
|
+
sleep 0.2
|
41
|
+
end
|
42
|
+
rescue Interrupt
|
43
|
+
log "! Cancelled waiting for workers"
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def start_phased_restart
|
48
|
+
@events.fire_on_restart!
|
49
|
+
@phase += 1
|
50
|
+
log "- Starting phased worker restart, phase: #{@phase}"
|
51
|
+
|
52
|
+
# Be sure to change the directory again before loading
|
53
|
+
# the app. This way we can pick up new code.
|
54
|
+
dir = @launcher.restart_dir
|
55
|
+
log "+ Changing to #{dir}"
|
56
|
+
Dir.chdir dir
|
57
|
+
end
|
58
|
+
|
59
|
+
def redirect_io
|
60
|
+
super
|
61
|
+
|
62
|
+
@workers.each { |x| x.hup }
|
63
|
+
end
|
64
|
+
|
65
|
+
def spawn_workers
|
66
|
+
diff = @options[:workers] - @workers.size
|
67
|
+
return if diff < 1
|
68
|
+
|
69
|
+
master = Process.pid
|
70
|
+
if @options[:fork_worker]
|
71
|
+
@fork_writer << "-1\n"
|
72
|
+
end
|
73
|
+
|
74
|
+
diff.times do
|
75
|
+
idx = next_worker_index
|
76
|
+
|
77
|
+
if @options[:fork_worker] && idx != 0
|
78
|
+
@fork_writer << "#{idx}\n"
|
79
|
+
pid = nil
|
80
|
+
else
|
81
|
+
pid = spawn_worker(idx, master)
|
82
|
+
end
|
83
|
+
|
84
|
+
debug "Spawned worker: #{pid}"
|
85
|
+
@workers << WorkerHandle.new(idx, pid, @phase, @options)
|
86
|
+
end
|
87
|
+
|
88
|
+
if @options[:fork_worker] && all_workers_in_phase?
|
89
|
+
@fork_writer << "0\n"
|
90
|
+
end
|
91
|
+
end
|
92
|
+
|
93
|
+
# @version 5.0.0
|
94
|
+
def spawn_worker(idx, master)
|
95
|
+
@config.run_hooks(:before_worker_fork, idx, @log_writer)
|
96
|
+
|
97
|
+
pid = fork { worker(idx, master) }
|
98
|
+
if !pid
|
99
|
+
log "! Complete inability to spawn new workers detected"
|
100
|
+
log "! Seppuku is the only choice."
|
101
|
+
exit! 1
|
102
|
+
end
|
103
|
+
|
104
|
+
@config.run_hooks(:after_worker_fork, idx, @log_writer)
|
105
|
+
pid
|
106
|
+
end
|
107
|
+
|
108
|
+
def cull_workers
|
109
|
+
diff = @workers.size - @options[:workers]
|
110
|
+
return if diff < 1
|
111
|
+
debug "Culling #{diff} workers"
|
112
|
+
|
113
|
+
workers = workers_to_cull(diff)
|
114
|
+
debug "Workers to cull: #{workers.inspect}"
|
115
|
+
|
116
|
+
workers.each do |worker|
|
117
|
+
log "- Worker #{worker.index} (PID: #{worker.pid}) terminating"
|
118
|
+
worker.term
|
119
|
+
end
|
120
|
+
end
|
121
|
+
|
122
|
+
def workers_to_cull(diff)
|
123
|
+
workers = @workers.sort_by(&:started_at)
|
124
|
+
|
125
|
+
# In fork_worker mode, worker 0 acts as our master process.
|
126
|
+
# We should avoid culling it to preserve copy-on-write memory gains.
|
127
|
+
workers.reject! { |w| w.index == 0 } if @options[:fork_worker]
|
128
|
+
|
129
|
+
workers[cull_start_index(diff), diff]
|
130
|
+
end
|
131
|
+
|
132
|
+
def cull_start_index(diff)
|
133
|
+
case @options[:worker_culling_strategy]
|
134
|
+
when :oldest
|
135
|
+
0
|
136
|
+
else # :youngest
|
137
|
+
-diff
|
138
|
+
end
|
139
|
+
end
|
140
|
+
|
141
|
+
# @!attribute [r] next_worker_index
|
142
|
+
def next_worker_index
|
143
|
+
occupied_positions = @workers.map(&:index)
|
144
|
+
idx = 0
|
145
|
+
idx += 1 until !occupied_positions.include?(idx)
|
146
|
+
idx
|
147
|
+
end
|
148
|
+
|
149
|
+
def worker_at(idx)
|
150
|
+
@workers.find { |w| w.index == idx }
|
151
|
+
end
|
152
|
+
|
153
|
+
def all_workers_booted?
|
154
|
+
@workers.count { |w| !w.booted? } == 0
|
155
|
+
end
|
156
|
+
|
157
|
+
def all_workers_in_phase?
|
158
|
+
@workers.all? { |w| w.phase == @phase }
|
159
|
+
end
|
160
|
+
|
161
|
+
def all_workers_idle_timed_out?
|
162
|
+
(@workers.map(&:pid) - idle_timed_out_worker_pids).empty?
|
163
|
+
end
|
164
|
+
|
165
|
+
def check_workers
|
166
|
+
return if @next_check >= Time.now
|
167
|
+
|
168
|
+
@next_check = Time.now + @options[:worker_check_interval]
|
169
|
+
|
170
|
+
timeout_workers
|
171
|
+
wait_workers
|
172
|
+
cull_workers
|
173
|
+
spawn_workers
|
174
|
+
|
175
|
+
if all_workers_booted?
|
176
|
+
# If we're running at proper capacity, check to see if
|
177
|
+
# we need to phase any workers out (which will restart
|
178
|
+
# in the right phase).
|
179
|
+
#
|
180
|
+
w = @workers.find { |x| x.phase != @phase }
|
181
|
+
|
182
|
+
if w
|
183
|
+
log "- Stopping #{w.pid} for phased upgrade..."
|
184
|
+
unless w.term?
|
185
|
+
w.term
|
186
|
+
log "- #{w.signal} sent to #{w.pid}..."
|
187
|
+
end
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
t = @workers.reject(&:term?)
|
192
|
+
t.map!(&:ping_timeout)
|
193
|
+
|
194
|
+
@next_check = [t.min, @next_check].compact.min
|
195
|
+
end
|
196
|
+
|
197
|
+
def worker(index, master)
|
198
|
+
@workers = []
|
199
|
+
|
200
|
+
@master_read.close
|
201
|
+
@suicide_pipe.close
|
202
|
+
@fork_writer.close
|
203
|
+
|
204
|
+
pipes = { check_pipe: @check_pipe, worker_write: @worker_write }
|
205
|
+
if @options[:fork_worker]
|
206
|
+
pipes[:fork_pipe] = @fork_pipe
|
207
|
+
pipes[:wakeup] = @wakeup
|
208
|
+
end
|
209
|
+
|
210
|
+
server = start_server if preload?
|
211
|
+
new_worker = Worker.new index: index,
|
212
|
+
master: master,
|
213
|
+
launcher: @launcher,
|
214
|
+
pipes: pipes,
|
215
|
+
server: server
|
216
|
+
new_worker.run
|
217
|
+
end
|
218
|
+
|
219
|
+
def restart
|
220
|
+
@restart = true
|
221
|
+
stop
|
222
|
+
end
|
223
|
+
|
224
|
+
def phased_restart(refork = false)
|
225
|
+
return false if @options[:preload_app] && !refork
|
226
|
+
|
227
|
+
@phased_restart = true
|
228
|
+
wakeup!
|
229
|
+
|
230
|
+
true
|
231
|
+
end
|
232
|
+
|
233
|
+
def stop
|
234
|
+
@status = :stop
|
235
|
+
wakeup!
|
236
|
+
end
|
237
|
+
|
238
|
+
def stop_blocked
|
239
|
+
@status = :stop if @status == :run
|
240
|
+
wakeup!
|
241
|
+
@control&.stop true
|
242
|
+
Process.waitall
|
243
|
+
end
|
244
|
+
|
245
|
+
def halt
|
246
|
+
@status = :halt
|
247
|
+
wakeup!
|
248
|
+
end
|
249
|
+
|
250
|
+
def reload_worker_directory
|
251
|
+
dir = @launcher.restart_dir
|
252
|
+
log "+ Changing to #{dir}"
|
253
|
+
Dir.chdir dir
|
254
|
+
end
|
255
|
+
|
256
|
+
# Inside of a child process, this will return all zeroes, as @workers is only populated in
|
257
|
+
# the master process.
|
258
|
+
# @!attribute [r] stats
|
259
|
+
def stats
|
260
|
+
old_worker_count = @workers.count { |w| w.phase != @phase }
|
261
|
+
worker_status = @workers.map do |w|
|
262
|
+
{
|
263
|
+
started_at: utc_iso8601(w.started_at),
|
264
|
+
pid: w.pid,
|
265
|
+
index: w.index,
|
266
|
+
phase: w.phase,
|
267
|
+
booted: w.booted?,
|
268
|
+
last_checkin: utc_iso8601(w.last_checkin),
|
269
|
+
last_status: w.last_status,
|
270
|
+
}
|
271
|
+
end
|
272
|
+
|
273
|
+
{
|
274
|
+
started_at: utc_iso8601(@started_at),
|
275
|
+
workers: @workers.size,
|
276
|
+
phase: @phase,
|
277
|
+
booted_workers: worker_status.count { |w| w[:booted] },
|
278
|
+
old_workers: old_worker_count,
|
279
|
+
worker_status: worker_status,
|
280
|
+
}.merge(super)
|
281
|
+
end
|
282
|
+
|
283
|
+
def preload?
|
284
|
+
@options[:preload_app]
|
285
|
+
end
|
286
|
+
|
287
|
+
# @version 5.0.0
|
288
|
+
def fork_worker!
|
289
|
+
if (worker = worker_at 0)
|
290
|
+
worker.phase += 1
|
291
|
+
end
|
292
|
+
phased_restart(true)
|
293
|
+
end
|
294
|
+
|
295
|
+
# We do this in a separate method to keep the lambda scope
|
296
|
+
# of the signals handlers as small as possible.
|
297
|
+
def setup_signals
|
298
|
+
if @options[:fork_worker]
|
299
|
+
Signal.trap "SIGURG" do
|
300
|
+
fork_worker!
|
301
|
+
end
|
302
|
+
|
303
|
+
# Auto-fork after the specified number of requests.
|
304
|
+
if (fork_requests = @options[:fork_worker].to_i) > 0
|
305
|
+
@events.register(:ping!) do |w|
|
306
|
+
fork_worker! if w.index == 0 &&
|
307
|
+
w.phase == 0 &&
|
308
|
+
w.last_status[:requests_count] >= fork_requests
|
309
|
+
end
|
310
|
+
end
|
311
|
+
end
|
312
|
+
|
313
|
+
Signal.trap "SIGCHLD" do
|
314
|
+
wakeup!
|
315
|
+
end
|
316
|
+
|
317
|
+
Signal.trap "TTIN" do
|
318
|
+
@options[:workers] += 1
|
319
|
+
wakeup!
|
320
|
+
end
|
321
|
+
|
322
|
+
Signal.trap "TTOU" do
|
323
|
+
@options[:workers] -= 1 if @options[:workers] >= 2
|
324
|
+
wakeup!
|
325
|
+
end
|
326
|
+
|
327
|
+
master_pid = Process.pid
|
328
|
+
|
329
|
+
Signal.trap "SIGTERM" do
|
330
|
+
# The worker installs their own SIGTERM when booted.
|
331
|
+
# Until then, this is run by the worker and the worker
|
332
|
+
# should just exit if they get it.
|
333
|
+
if Process.pid != master_pid
|
334
|
+
log "Early termination of worker"
|
335
|
+
exit! 0
|
336
|
+
else
|
337
|
+
@launcher.close_binder_listeners
|
338
|
+
|
339
|
+
stop_workers
|
340
|
+
stop
|
341
|
+
@events.fire_on_stopped!
|
342
|
+
raise(SignalException, "SIGTERM") if @options[:raise_exception_on_sigterm]
|
343
|
+
exit 0 # Clean exit, workers were stopped
|
344
|
+
end
|
345
|
+
end
|
346
|
+
end
|
347
|
+
|
348
|
+
def run
|
349
|
+
@status = :run
|
350
|
+
|
351
|
+
output_header "cluster"
|
352
|
+
|
353
|
+
# This is aligned with the output from Runner, see Runner#output_header
|
354
|
+
log "* Workers: #{@options[:workers]}"
|
355
|
+
|
356
|
+
if preload?
|
357
|
+
# Threads explicitly marked as fork safe will be ignored. Used in Rails,
|
358
|
+
# but may be used by anyone. Note that we need to explicit
|
359
|
+
# Process::Waiter check here because there's a bug in Ruby 2.6 and below
|
360
|
+
# where calling thread_variable_get on a Process::Waiter will segfault.
|
361
|
+
# We can drop that clause once those versions of Ruby are no longer
|
362
|
+
# supported.
|
363
|
+
fork_safe = ->(t) { !t.is_a?(Process::Waiter) && t.thread_variable_get(:fork_safe) }
|
364
|
+
|
365
|
+
before = Thread.list.reject(&fork_safe)
|
366
|
+
|
367
|
+
log "* Restarts: (\u2714) hot (\u2716) phased"
|
368
|
+
log "* Preloading application"
|
369
|
+
load_and_bind
|
370
|
+
|
371
|
+
after = Thread.list.reject(&fork_safe)
|
372
|
+
|
373
|
+
if after.size > before.size
|
374
|
+
threads = (after - before)
|
375
|
+
if threads.first.respond_to? :backtrace
|
376
|
+
log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot:"
|
377
|
+
threads.each do |t|
|
378
|
+
log "! #{t.inspect} - #{t.backtrace ? t.backtrace.first : ''}"
|
379
|
+
end
|
380
|
+
else
|
381
|
+
log "! WARNING: Detected #{after.size-before.size} Thread(s) started in app boot"
|
382
|
+
end
|
383
|
+
end
|
384
|
+
else
|
385
|
+
log "* Restarts: (\u2714) hot (\u2714) phased"
|
386
|
+
|
387
|
+
unless @config.app_configured?
|
388
|
+
error "No application configured, nothing to run"
|
389
|
+
exit 1
|
390
|
+
end
|
391
|
+
|
392
|
+
@launcher.binder.parse @options[:binds]
|
393
|
+
end
|
394
|
+
|
395
|
+
read, @wakeup = Puma::Util.pipe
|
396
|
+
|
397
|
+
setup_signals
|
398
|
+
|
399
|
+
# Used by the workers to detect if the master process dies.
|
400
|
+
# If select says that @check_pipe is ready, it's because the
|
401
|
+
# master has exited and @suicide_pipe has been automatically
|
402
|
+
# closed.
|
403
|
+
#
|
404
|
+
@check_pipe, @suicide_pipe = Puma::Util.pipe
|
405
|
+
|
406
|
+
# Separate pipe used by worker 0 to receive commands to
|
407
|
+
# fork new worker processes.
|
408
|
+
@fork_pipe, @fork_writer = Puma::Util.pipe
|
409
|
+
|
410
|
+
log "Use Ctrl-C to stop"
|
411
|
+
|
412
|
+
single_worker_warning
|
413
|
+
|
414
|
+
redirect_io
|
415
|
+
|
416
|
+
Plugins.fire_background
|
417
|
+
|
418
|
+
@launcher.write_state
|
419
|
+
|
420
|
+
start_control
|
421
|
+
|
422
|
+
@master_read, @worker_write = read, @wakeup
|
423
|
+
|
424
|
+
@options[:worker_write] = @worker_write
|
425
|
+
|
426
|
+
@config.run_hooks(:before_fork, nil, @log_writer)
|
427
|
+
|
428
|
+
spawn_workers
|
429
|
+
|
430
|
+
Signal.trap "SIGINT" do
|
431
|
+
stop
|
432
|
+
end
|
433
|
+
|
434
|
+
begin
|
435
|
+
booted = false
|
436
|
+
in_phased_restart = false
|
437
|
+
workers_not_booted = @options[:workers]
|
438
|
+
|
439
|
+
while @status == :run
|
440
|
+
begin
|
441
|
+
if @options[:idle_timeout] && all_workers_idle_timed_out?
|
442
|
+
log "- All workers reached idle timeout"
|
443
|
+
break
|
444
|
+
end
|
445
|
+
|
446
|
+
if @phased_restart
|
447
|
+
start_phased_restart
|
448
|
+
@phased_restart = false
|
449
|
+
in_phased_restart = true
|
450
|
+
workers_not_booted = @options[:workers]
|
451
|
+
end
|
452
|
+
|
453
|
+
check_workers
|
454
|
+
|
455
|
+
if read.wait_readable([0, @next_check - Time.now].max)
|
456
|
+
req = read.read_nonblock(1)
|
457
|
+
next unless req
|
458
|
+
|
459
|
+
if req == Puma::Const::PipeRequest::WAKEUP
|
460
|
+
@next_check = Time.now
|
461
|
+
next
|
462
|
+
end
|
463
|
+
|
464
|
+
result = read.gets
|
465
|
+
pid = result.to_i
|
466
|
+
|
467
|
+
if req == Puma::Const::PipeRequest::BOOT || req == Puma::Const::PipeRequest::FORK
|
468
|
+
pid, idx = result.split(':').map(&:to_i)
|
469
|
+
w = worker_at idx
|
470
|
+
w.pid = pid if w.pid.nil?
|
471
|
+
end
|
472
|
+
|
473
|
+
if w = @workers.find { |x| x.pid == pid }
|
474
|
+
case req
|
475
|
+
when Puma::Const::PipeRequest::BOOT
|
476
|
+
w.boot!
|
477
|
+
log "- Worker #{w.index} (PID: #{pid}) booted in #{w.uptime.round(2)}s, phase: #{w.phase}"
|
478
|
+
@next_check = Time.now
|
479
|
+
workers_not_booted -= 1
|
480
|
+
when Puma::Const::PipeRequest::EXTERNAL_TERM
|
481
|
+
# external term, see worker method, Signal.trap "SIGTERM"
|
482
|
+
w.term!
|
483
|
+
when Puma::Const::PipeRequest::TERM
|
484
|
+
w.term unless w.term?
|
485
|
+
when Puma::Const::PipeRequest::PING
|
486
|
+
status = result.sub(/^\d+/,'').chomp
|
487
|
+
w.ping!(status)
|
488
|
+
@events.fire(:ping!, w)
|
489
|
+
|
490
|
+
if in_phased_restart && workers_not_booted.positive? && w0 = worker_at(0)
|
491
|
+
w0.ping!(status)
|
492
|
+
@events.fire(:ping!, w0)
|
493
|
+
end
|
494
|
+
|
495
|
+
if !booted && @workers.none? {|worker| worker.last_status.empty?}
|
496
|
+
@events.fire_on_booted!
|
497
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
498
|
+
booted = true
|
499
|
+
end
|
500
|
+
when Puma::Const::PipeRequest::IDLE
|
501
|
+
if idle_workers[pid]
|
502
|
+
idle_workers.delete pid
|
503
|
+
else
|
504
|
+
idle_workers[pid] = true
|
505
|
+
end
|
506
|
+
end
|
507
|
+
else
|
508
|
+
log "! Out-of-sync worker list, no #{pid} worker"
|
509
|
+
end
|
510
|
+
end
|
511
|
+
|
512
|
+
if in_phased_restart && workers_not_booted.zero?
|
513
|
+
@events.fire_on_booted!
|
514
|
+
debug_loaded_extensions("Loaded Extensions - master:") if @log_writer.debug?
|
515
|
+
in_phased_restart = false
|
516
|
+
end
|
517
|
+
rescue Interrupt
|
518
|
+
@status = :stop
|
519
|
+
end
|
520
|
+
end
|
521
|
+
|
522
|
+
stop_workers unless @status == :halt
|
523
|
+
ensure
|
524
|
+
@check_pipe.close
|
525
|
+
@suicide_pipe.close
|
526
|
+
read.close
|
527
|
+
@wakeup.close
|
528
|
+
end
|
529
|
+
end
|
530
|
+
|
531
|
+
private
|
532
|
+
|
533
|
+
def single_worker_warning
|
534
|
+
return if @options[:workers] != 1 || @options[:silence_single_worker_warning]
|
535
|
+
|
536
|
+
log "! WARNING: Detected running cluster mode with 1 worker."
|
537
|
+
log "! Running Puma in cluster mode with a single worker is often a misconfiguration."
|
538
|
+
log "! Consider running Puma in single-mode (workers = 0) in order to reduce memory overhead."
|
539
|
+
log "! Set the `silence_single_worker_warning` option to silence this warning message."
|
540
|
+
end
|
541
|
+
|
542
|
+
# loops thru @workers, removing workers that exited, and calling
|
543
|
+
# `#term` if needed
|
544
|
+
def wait_workers
|
545
|
+
# Reap all children, known workers or otherwise.
|
546
|
+
# If puma has PID 1, as it's common in containerized environments,
|
547
|
+
# then it's responsible for reaping orphaned processes, so we must reap
|
548
|
+
# all our dead children, regardless of whether they are workers we spawned
|
549
|
+
# or some reattached processes.
|
550
|
+
reaped_children = {}
|
551
|
+
loop do
|
552
|
+
begin
|
553
|
+
pid, status = Process.wait2(-1, Process::WNOHANG)
|
554
|
+
break unless pid
|
555
|
+
reaped_children[pid] = status
|
556
|
+
rescue Errno::ECHILD
|
557
|
+
break
|
558
|
+
end
|
559
|
+
end
|
560
|
+
|
561
|
+
@workers.reject! do |w|
|
562
|
+
next false if w.pid.nil?
|
563
|
+
begin
|
564
|
+
# We may need to check the PID individually because:
|
565
|
+
# 1. From Ruby versions 2.6 to 3.2, `Process.detach` can prevent or delay
|
566
|
+
# `Process.wait2(-1)` from detecting a terminated process: https://bugs.ruby-lang.org/issues/19837.
|
567
|
+
# 2. When `fork_worker` is enabled, some worker may not be direct children,
|
568
|
+
# but grand children. Because of this they won't be reaped by `Process.wait2(-1)`.
|
569
|
+
if reaped_children.delete(w.pid) || Process.wait(w.pid, Process::WNOHANG)
|
570
|
+
true
|
571
|
+
else
|
572
|
+
w.term if w.term?
|
573
|
+
nil
|
574
|
+
end
|
575
|
+
rescue Errno::ECHILD
|
576
|
+
begin
|
577
|
+
Process.kill(0, w.pid)
|
578
|
+
# child still alive but has another parent (e.g., using fork_worker)
|
579
|
+
w.term if w.term?
|
580
|
+
false
|
581
|
+
rescue Errno::ESRCH, Errno::EPERM
|
582
|
+
true # child is already terminated
|
583
|
+
end
|
584
|
+
end
|
585
|
+
end
|
586
|
+
|
587
|
+
# Log unknown children
|
588
|
+
reaped_children.each do |pid, status|
|
589
|
+
log "! reaped unknown child process pid=#{pid} status=#{status}"
|
590
|
+
end
|
591
|
+
end
|
592
|
+
|
593
|
+
# @version 5.0.0
|
594
|
+
def timeout_workers
|
595
|
+
@workers.each do |w|
|
596
|
+
if !w.term? && w.ping_timeout <= Time.now
|
597
|
+
details = if w.booted?
|
598
|
+
"(Worker #{w.index} failed to check in within #{@options[:worker_timeout]} seconds)"
|
599
|
+
else
|
600
|
+
"(Worker #{w.index} failed to boot within #{@options[:worker_boot_timeout]} seconds)"
|
601
|
+
end
|
602
|
+
log "! Terminating timed out worker #{details}: #{w.pid}"
|
603
|
+
w.kill
|
604
|
+
end
|
605
|
+
end
|
606
|
+
end
|
607
|
+
|
608
|
+
def idle_timed_out_worker_pids
|
609
|
+
idle_workers.keys
|
610
|
+
end
|
611
|
+
|
612
|
+
def idle_workers
|
613
|
+
@idle_workers ||= {}
|
614
|
+
end
|
615
|
+
end
|
616
|
+
end
|