pitchfork 0.16.0 → 0.17.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.github/workflows/ci.yml +8 -2
- data/CHANGELOG.md +10 -0
- data/Gemfile +1 -1
- data/README.md +12 -12
- data/Rakefile +14 -25
- data/docs/CONFIGURATION.md +36 -13
- data/docs/DESIGN.md +7 -7
- data/docs/FORK_SAFETY.md +3 -0
- data/docs/PHILOSOPHY.md +2 -2
- data/docs/REFORKING.md +12 -12
- data/docs/SIGNALS.md +7 -8
- data/docs/TUNING.md +3 -3
- data/examples/nginx.conf +1 -1
- data/examples/pitchfork.conf.rb +1 -1
- data/ext/pitchfork_http/c_util.h +2 -2
- data/ext/pitchfork_http/epollexclusive.h +2 -2
- data/lib/pitchfork/children.rb +1 -1
- data/lib/pitchfork/configurator.rb +13 -12
- data/lib/pitchfork/http_server.rb +64 -60
- data/lib/pitchfork/info.rb +3 -2
- data/lib/pitchfork/refork_condition.rb +1 -1
- data/lib/pitchfork/shared_memory.rb +36 -9
- data/lib/pitchfork/version.rb +1 -1
- data/lib/pitchfork/worker.rb +57 -34
- metadata +3 -3
@@ -14,7 +14,7 @@ module Pitchfork
|
|
14
14
|
include Pitchfork
|
15
15
|
|
16
16
|
# :stopdoc:
|
17
|
-
attr_accessor :set, :config_file
|
17
|
+
attr_accessor :set, :config_file
|
18
18
|
|
19
19
|
# used to stash stuff for deferred processing of cli options in
|
20
20
|
# config.ru. Do not rely on
|
@@ -41,21 +41,17 @@ module Pitchfork
|
|
41
41
|
:worker_processes => 1,
|
42
42
|
:before_fork => nil,
|
43
43
|
:after_worker_fork => lambda { |server, worker|
|
44
|
-
server.logger.info("
|
44
|
+
server.logger.info("#{worker.to_log} spawned")
|
45
45
|
},
|
46
46
|
:after_mold_fork => lambda { |server, worker|
|
47
|
-
server.logger.info("
|
47
|
+
server.logger.info("#{worker.to_log} spawned")
|
48
48
|
},
|
49
49
|
:before_worker_exit => nil,
|
50
50
|
:after_worker_exit => lambda { |server, worker, status|
|
51
51
|
m = if worker.nil?
|
52
|
-
"
|
53
|
-
elsif worker.mold?
|
54
|
-
"mold pid=#{worker.pid rescue 'unknown'} gen=#{worker.generation rescue 'unknown'} reaped (#{status.inspect})"
|
55
|
-
elsif worker.service?
|
56
|
-
"service pid=#{worker.pid rescue 'unknown'} gen=#{worker.generation rescue 'unknown'} reaped (#{status.inspect})"
|
52
|
+
"reaped unknown process (#{status.inspect})"
|
57
53
|
else
|
58
|
-
"
|
54
|
+
"#{worker.to_log} reaped (#{status.inspect})"
|
59
55
|
end
|
60
56
|
if status.success?
|
61
57
|
server.logger.info(m)
|
@@ -64,10 +60,10 @@ module Pitchfork
|
|
64
60
|
end
|
65
61
|
},
|
66
62
|
:after_worker_ready => lambda { |server, worker|
|
67
|
-
server.logger.info("
|
63
|
+
server.logger.info("#{worker.to_log} ready")
|
68
64
|
},
|
69
65
|
:after_monitor_ready => lambda { |server|
|
70
|
-
server.logger.info("
|
66
|
+
server.logger.info("monitor pid=#{Process.pid} ready")
|
71
67
|
},
|
72
68
|
:after_worker_timeout => nil,
|
73
69
|
:after_worker_hard_timeout => nil,
|
@@ -79,13 +75,14 @@ module Pitchfork
|
|
79
75
|
:client_body_buffer_size => Pitchfork::Const::MAX_BODY,
|
80
76
|
:before_service_worker_ready => nil,
|
81
77
|
:before_service_worker_exit => nil,
|
78
|
+
:setpgid => false,
|
82
79
|
}
|
83
80
|
#:startdoc:
|
84
81
|
|
85
82
|
def initialize(defaults = {}) #:nodoc:
|
86
83
|
self.set = Hash.new(:unset)
|
87
84
|
@use_defaults = defaults.delete(:use_defaults)
|
88
|
-
self.config_file = defaults.delete(:config_file)
|
85
|
+
self.config_file = defaults.delete(:config_file) { "config/pitchfork.rb" if File.exist?("config/pitchfork.rb") }
|
89
86
|
|
90
87
|
set.merge!(DEFAULTS) if @use_defaults
|
91
88
|
defaults.each { |key, value| self.__send__(key, value) }
|
@@ -207,6 +204,10 @@ module Pitchfork
|
|
207
204
|
end
|
208
205
|
end
|
209
206
|
|
207
|
+
def setpgid(bool)
|
208
|
+
set_bool(:setpgid, bool)
|
209
|
+
end
|
210
|
+
|
210
211
|
def spawn_timeout(seconds)
|
211
212
|
set_int(:spawn_timeout, seconds, 1)
|
212
213
|
end
|
@@ -11,7 +11,7 @@ require 'pitchfork/info'
|
|
11
11
|
module Pitchfork
|
12
12
|
# This is the process manager of Pitchfork. This manages worker
|
13
13
|
# processes which in turn handle the I/O and application process.
|
14
|
-
# Listener sockets are started in the
|
14
|
+
# Listener sockets are started in the monitor process and shared with
|
15
15
|
# forked worker children.
|
16
16
|
class HttpServer
|
17
17
|
class TimeoutHandler
|
@@ -50,7 +50,7 @@ module Pitchfork
|
|
50
50
|
|
51
51
|
def call(original_thread) # :nodoc:
|
52
52
|
begin
|
53
|
-
@server.logger.error("
|
53
|
+
@server.logger.error("#{@worker.to_log} timed out, exiting")
|
54
54
|
if @callback
|
55
55
|
@callback.call(@server, @worker, Info.new(original_thread, @rack_env))
|
56
56
|
end
|
@@ -80,7 +80,7 @@ module Pitchfork
|
|
80
80
|
attr_accessor :app, :timeout, :timeout_signal, :soft_timeout, :cleanup_timeout, :spawn_timeout, :worker_processes,
|
81
81
|
:before_fork, :after_worker_fork, :after_mold_fork, :before_service_worker_ready, :before_service_worker_exit,
|
82
82
|
:listener_opts, :children,
|
83
|
-
:orig_app, :config, :ready_pipe, :early_hints
|
83
|
+
:orig_app, :config, :ready_pipe, :early_hints, :setpgid
|
84
84
|
attr_writer :after_worker_exit, :before_worker_exit, :after_worker_ready, :after_request_complete,
|
85
85
|
:refork_condition, :after_worker_timeout, :after_worker_hard_timeout, :after_monitor_ready
|
86
86
|
|
@@ -116,11 +116,11 @@ module Pitchfork
|
|
116
116
|
|
117
117
|
proc_name role: 'monitor', status: ARGV.join(' ')
|
118
118
|
|
119
|
-
# We use @control_socket differently in the
|
119
|
+
# We use @control_socket differently in the monitor and worker processes:
|
120
120
|
#
|
121
|
-
# * The
|
122
|
-
# initialized. Signal handlers in the
|
123
|
-
# it to wake up the
|
121
|
+
# * The monitor process never closes or reinitializes this once
|
122
|
+
# initialized. Signal handlers in the monitor process will write to
|
123
|
+
# it to wake up the monitor from IO.select in exactly the same manner
|
124
124
|
# djb describes in https://cr.yp.to/docs/selfpipe.html
|
125
125
|
#
|
126
126
|
# * The workers immediately close the pipe they inherit. See the
|
@@ -142,7 +142,7 @@ module Pitchfork
|
|
142
142
|
# attempt to connect to the listener(s)
|
143
143
|
config.commit!(self, :skip => [:listeners, :pid])
|
144
144
|
@orig_app = app
|
145
|
-
# list of signals we care about and trap in
|
145
|
+
# list of signals we care about and trap in monitor.
|
146
146
|
@queue_sigs = [
|
147
147
|
:QUIT, :INT, :TERM, :USR2, :TTIN, :TTOU ]
|
148
148
|
|
@@ -157,16 +157,16 @@ module Pitchfork
|
|
157
157
|
# This socketpair is used to wake us up from select(2) in #join when signals
|
158
158
|
# are trapped. See trap_deferred.
|
159
159
|
# It's also used by newly spawned children to send their soft_signal pipe
|
160
|
-
# to the
|
160
|
+
# to the monitor when they are spawned.
|
161
161
|
@control_socket.replace(Pitchfork.socketpair)
|
162
162
|
Info.keep_ios(@control_socket)
|
163
|
-
@
|
163
|
+
@monitor_pid = $$
|
164
164
|
|
165
165
|
# setup signal handlers before writing pid file in case people get
|
166
166
|
# trigger happy and send signals as soon as the pid file exists.
|
167
167
|
# Note that signals don't actually get handled until the #join method
|
168
|
-
@queue_sigs.each { |sig| trap(sig) { @sig_queue << sig;
|
169
|
-
trap(:CHLD) {
|
168
|
+
@queue_sigs.each { |sig| trap(sig) { @sig_queue << sig; awaken_monitor } }
|
169
|
+
trap(:CHLD) { awaken_monitor }
|
170
170
|
|
171
171
|
if REFORKING_AVAILABLE
|
172
172
|
spawn_initial_mold
|
@@ -224,7 +224,7 @@ module Pitchfork
|
|
224
224
|
# to delay between retries.
|
225
225
|
# A negative value for +:tries+ indicates the listen will be
|
226
226
|
# retried indefinitely, this is useful when workers belonging to
|
227
|
-
# different
|
227
|
+
# different monitors are spawned during a transparent upgrade.
|
228
228
|
def listen(address, opt = listener_opts[address] || {})
|
229
229
|
address = config.expand_addr(address)
|
230
230
|
return if String === address && listener_names.include?(address)
|
@@ -291,7 +291,7 @@ module Pitchfork
|
|
291
291
|
|
292
292
|
proc_name role: 'monitor', status: ARGV.join(' ')
|
293
293
|
|
294
|
-
logger.info "
|
294
|
+
logger.info "monitor process ready" # test_exec.rb relies on this message
|
295
295
|
if @ready_pipe
|
296
296
|
begin
|
297
297
|
@ready_pipe.syswrite($$.to_s)
|
@@ -306,11 +306,11 @@ module Pitchfork
|
|
306
306
|
break
|
307
307
|
end
|
308
308
|
rescue => e
|
309
|
-
Pitchfork.log_error(@logger, "
|
309
|
+
Pitchfork.log_error(@logger, "monitor loop error", e)
|
310
310
|
end
|
311
311
|
end
|
312
312
|
stop # gracefully shutdown all workers on our way out
|
313
|
-
logger.info "
|
313
|
+
logger.info "monitor complete status=#{@exit_status}"
|
314
314
|
@exit_status
|
315
315
|
end
|
316
316
|
|
@@ -326,7 +326,7 @@ module Pitchfork
|
|
326
326
|
|
327
327
|
case message = @sig_queue.shift
|
328
328
|
when nil
|
329
|
-
# avoid murdering workers after our
|
329
|
+
# avoid murdering workers after our monitor process (or the
|
330
330
|
# machine) comes out of suspend/hibernation
|
331
331
|
if (@last_check + @timeout) >= (@last_check = Pitchfork.time_now)
|
332
332
|
sleep_time = murder_lazy_workers
|
@@ -339,7 +339,7 @@ module Pitchfork
|
|
339
339
|
restart_outdated_workers if REFORKING_AVAILABLE
|
340
340
|
end
|
341
341
|
|
342
|
-
|
342
|
+
monitor_sleep(sleep_time) if sleep
|
343
343
|
when :QUIT, :TERM # graceful shutdown
|
344
344
|
SharedMemory.shutting_down!
|
345
345
|
logger.info "#{message} received, starting graceful shutdown"
|
@@ -363,19 +363,19 @@ module Pitchfork
|
|
363
363
|
when Message::WorkerSpawned
|
364
364
|
worker = @children.update(message)
|
365
365
|
# TODO: should we send a message to the worker to acknowledge?
|
366
|
-
logger.info "
|
366
|
+
logger.info "#{worker.to_log} registered"
|
367
367
|
when Message::MoldSpawned
|
368
368
|
new_mold = @children.update(message)
|
369
|
-
logger.info("
|
369
|
+
logger.info("#{new_mold.to_log} spawned")
|
370
370
|
when Message::ServiceSpawned
|
371
371
|
new_service = @children.update(message)
|
372
|
-
logger.info("
|
372
|
+
logger.info("#{new_service.to_log} spawned")
|
373
373
|
when Message::MoldReady
|
374
374
|
old_molds = @children.molds
|
375
375
|
new_mold = @children.update(message)
|
376
|
-
logger.info("
|
376
|
+
logger.info("#{new_mold.to_log} ready")
|
377
377
|
old_molds.each do |old_mold|
|
378
|
-
logger.info("Terminating old
|
378
|
+
logger.info("Terminating old #{old_mold.to_log}")
|
379
379
|
old_mold.soft_kill(:TERM)
|
380
380
|
end
|
381
381
|
else
|
@@ -384,7 +384,7 @@ module Pitchfork
|
|
384
384
|
end
|
385
385
|
end
|
386
386
|
|
387
|
-
# Terminates all workers, but does not exit
|
387
|
+
# Terminates all workers, but does not exit monitor process
|
388
388
|
def stop(graceful = true)
|
389
389
|
proc_name role: 'monitor', status: 'shutting down'
|
390
390
|
@respawn = false
|
@@ -413,7 +413,7 @@ module Pitchfork
|
|
413
413
|
end
|
414
414
|
|
415
415
|
def worker_exit(worker)
|
416
|
-
logger.info "
|
416
|
+
logger.info "#{worker.to_log} exiting"
|
417
417
|
proc_name status: "exiting"
|
418
418
|
|
419
419
|
if @before_worker_exit
|
@@ -427,7 +427,7 @@ module Pitchfork
|
|
427
427
|
end
|
428
428
|
|
429
429
|
def service_exit(service)
|
430
|
-
logger.info "
|
430
|
+
logger.info "#{service.to_log} exiting"
|
431
431
|
proc_name status: "exiting"
|
432
432
|
|
433
433
|
if @before_service_worker_exit
|
@@ -468,7 +468,7 @@ module Pitchfork
|
|
468
468
|
private
|
469
469
|
|
470
470
|
# wait for a signal handler to wake us up and then consume the pipe
|
471
|
-
def
|
471
|
+
def monitor_sleep(sec)
|
472
472
|
@control_socket[0].wait(sec) or return
|
473
473
|
case message = @control_socket[0].recvmsg_nonblock(exception: false)
|
474
474
|
when :wait_readable, NOOP
|
@@ -478,9 +478,9 @@ module Pitchfork
|
|
478
478
|
end
|
479
479
|
end
|
480
480
|
|
481
|
-
def
|
482
|
-
return if $$ != @
|
483
|
-
@control_socket[1].sendmsg_nonblock(NOOP, exception: false) # wakeup
|
481
|
+
def awaken_monitor
|
482
|
+
return if $$ != @monitor_pid
|
483
|
+
@control_socket[1].sendmsg_nonblock(NOOP, exception: false) # wakeup monitor process from select
|
484
484
|
end
|
485
485
|
|
486
486
|
# reaps all unreaped workers
|
@@ -548,11 +548,7 @@ module Pitchfork
|
|
548
548
|
end
|
549
549
|
end
|
550
550
|
|
551
|
-
|
552
|
-
logger.error "mold pid=#{child.pid} gen=#{child.generation} timed out, killing"
|
553
|
-
else
|
554
|
-
logger.error "worker=#{child.nr} pid=#{child.pid} gen=#{child.generation} timed out, killing"
|
555
|
-
end
|
551
|
+
logger.error "#{child.to_log} timed out, killing"
|
556
552
|
@children.hard_kill(@timeout_signal.call(child.pid), child) # take no prisoners for hard timeout violations
|
557
553
|
end
|
558
554
|
|
@@ -572,7 +568,7 @@ module Pitchfork
|
|
572
568
|
|
573
569
|
def after_fork_internal
|
574
570
|
@promotion_lock.at_fork
|
575
|
-
@control_socket[0].close_write # this is
|
571
|
+
@control_socket[0].close_write # this is monitor-only, now
|
576
572
|
@ready_pipe.close if @ready_pipe
|
577
573
|
Pitchfork::Configurator::RACKUP.clear
|
578
574
|
@ready_pipe = @init_listeners = nil
|
@@ -583,12 +579,13 @@ module Pitchfork
|
|
583
579
|
end
|
584
580
|
|
585
581
|
def spawn_worker(worker, detach:)
|
586
|
-
logger.info("
|
582
|
+
logger.info("#{worker.to_log} spawning...")
|
587
583
|
|
588
584
|
# We set the deadline before spawning the child so that if for some
|
589
585
|
# reason it gets stuck before reaching the worker loop,
|
590
586
|
# the monitor process will kill it.
|
591
587
|
worker.update_deadline(@spawn_timeout)
|
588
|
+
|
592
589
|
@before_fork&.call(self)
|
593
590
|
fork_sibling("spawn_worker") do
|
594
591
|
worker.pid = Process.pid
|
@@ -646,7 +643,7 @@ module Pitchfork
|
|
646
643
|
end
|
647
644
|
|
648
645
|
def spawn_service(service, detach:)
|
649
|
-
logger.info("
|
646
|
+
logger.info("#{service.to_log} spawning...")
|
650
647
|
|
651
648
|
# We set the deadline before spawning the child so that if for some
|
652
649
|
# reason it gets stuck before reaching the worker loop,
|
@@ -667,7 +664,7 @@ module Pitchfork
|
|
667
664
|
def spawn_initial_mold
|
668
665
|
mold = Worker.new(nil)
|
669
666
|
mold.create_socketpair!
|
670
|
-
mold.pid = Pitchfork.clean_fork do
|
667
|
+
mold.pid = Pitchfork.clean_fork(setpgid: setpgid) do
|
671
668
|
mold.pid = Process.pid
|
672
669
|
@promotion_lock.try_lock
|
673
670
|
mold.after_fork_in_child
|
@@ -712,7 +709,7 @@ module Pitchfork
|
|
712
709
|
spawn_worker(worker, detach: false)
|
713
710
|
end
|
714
711
|
# We could directly register workers when we spawn from the
|
715
|
-
#
|
712
|
+
# monitor, like pitchfork does. However it is preferable to
|
716
713
|
# always go through the asynchronous registering process for
|
717
714
|
# consistency.
|
718
715
|
@children.register(worker)
|
@@ -724,7 +721,7 @@ module Pitchfork
|
|
724
721
|
|
725
722
|
def wait_for_pending_workers
|
726
723
|
while @children.pending_workers?
|
727
|
-
|
724
|
+
monitor_sleep(0.5)
|
728
725
|
if monitor_loop(false) == StopIteration
|
729
726
|
return StopIteration
|
730
727
|
end
|
@@ -759,9 +756,9 @@ module Pitchfork
|
|
759
756
|
if service = @children.service
|
760
757
|
if service.outdated?
|
761
758
|
if service.soft_kill(:TERM)
|
762
|
-
logger.info("Sent SIGTERM to
|
759
|
+
logger.info("Sent SIGTERM to #{service.to_log}")
|
763
760
|
else
|
764
|
-
logger.info("Failed to send SIGTERM to
|
761
|
+
logger.info("Failed to send SIGTERM to #{service.to_log}")
|
765
762
|
end
|
766
763
|
end
|
767
764
|
end
|
@@ -770,10 +767,10 @@ module Pitchfork
|
|
770
767
|
outdated_workers = @children.workers.select { |w| !w.exiting? && w.generation < @children.mold.generation }
|
771
768
|
outdated_workers.each do |worker|
|
772
769
|
if worker.soft_kill(:TERM)
|
773
|
-
logger.info("Sent SIGTERM to
|
770
|
+
logger.info("Sent SIGTERM to #{worker.to_log}")
|
774
771
|
workers_to_restart -= 1
|
775
772
|
else
|
776
|
-
logger.info("Failed to send SIGTERM to
|
773
|
+
logger.info("Failed to send SIGTERM to #{worker.to_log}")
|
777
774
|
end
|
778
775
|
break if workers_to_restart <= 0
|
779
776
|
end
|
@@ -885,7 +882,6 @@ module Pitchfork
|
|
885
882
|
end
|
886
883
|
end
|
887
884
|
timeout_handler.finished
|
888
|
-
env
|
889
885
|
end
|
890
886
|
|
891
887
|
def nuke_listeners!(readers)
|
@@ -895,14 +891,18 @@ module Pitchfork
|
|
895
891
|
tmp.each { |io| io.close rescue nil } # break out of IO.select
|
896
892
|
end
|
897
893
|
|
894
|
+
def reset_signal_handlers
|
895
|
+
[:QUIT, :TERM, :INT].each { |sig| trap(sig) { exit!(0) } }
|
896
|
+
end
|
897
|
+
|
898
898
|
# gets rid of stuff the worker has no business keeping track of
|
899
899
|
# to free some resources and drops all sig handlers.
|
900
|
-
# traps for USR2, and HUP may be set in the
|
900
|
+
# traps for USR2, and HUP may be set in the after_worker_fork/after_mold_fork Procs
|
901
901
|
# by the user.
|
902
902
|
def init_worker_process(worker)
|
903
903
|
proc_name role: "(gen:#{worker.generation}) worker[#{worker.nr}]", status: "init"
|
904
904
|
worker.reset
|
905
|
-
worker.
|
905
|
+
worker.register_to_monitor(@control_socket[1])
|
906
906
|
# we'll re-trap :QUIT and :TERM later for graceful shutdown iff we accept clients
|
907
907
|
exit_sigs = [ :QUIT, :TERM, :INT ]
|
908
908
|
exit_sigs.each { |sig| trap(sig) { exit!(0) } }
|
@@ -927,8 +927,8 @@ module Pitchfork
|
|
927
927
|
|
928
928
|
def init_service_process(service)
|
929
929
|
proc_name role: "(gen:#{service.generation}) mold", status: "init"
|
930
|
-
LISTENERS.each(&:close) # Don't appear as listening to incoming requests
|
931
|
-
service.
|
930
|
+
LISTENERS.each(&:close).clear # Don't appear as listening to incoming requests
|
931
|
+
service.register_to_monitor(@control_socket[1])
|
932
932
|
readers = [service]
|
933
933
|
trap(:QUIT) { nuke_listeners!(readers) }
|
934
934
|
trap(:TERM) { nuke_listeners!(readers) }
|
@@ -971,6 +971,8 @@ module Pitchfork
|
|
971
971
|
|
972
972
|
proc_name status: "ready"
|
973
973
|
|
974
|
+
worker.ready = true
|
975
|
+
|
974
976
|
while readers[0]
|
975
977
|
begin
|
976
978
|
worker.update_deadline(@timeout)
|
@@ -986,7 +988,7 @@ module Pitchfork
|
|
986
988
|
if Info.fork_safe?
|
987
989
|
spawn_mold(worker)
|
988
990
|
else
|
989
|
-
logger.error("
|
991
|
+
logger.error("#{worker.to_log} is no longer fork safe, can't refork")
|
990
992
|
end
|
991
993
|
when Message
|
992
994
|
worker.update(client)
|
@@ -1006,7 +1008,7 @@ module Pitchfork
|
|
1006
1008
|
if @refork_condition.met?(worker, logger)
|
1007
1009
|
proc_name status: "requests: #{worker.requests_count}, spawning mold"
|
1008
1010
|
if spawn_mold(worker)
|
1009
|
-
logger.info("
|
1011
|
+
logger.info("#{worker.to_log} refork condition met, promoting ourselves")
|
1010
1012
|
end
|
1011
1013
|
@refork_condition.backoff!
|
1012
1014
|
end
|
@@ -1069,11 +1071,11 @@ module Pitchfork
|
|
1069
1071
|
spawn_worker(Worker.new(message.nr, generation: mold.generation), detach: true)
|
1070
1072
|
rescue ForkFailure
|
1071
1073
|
if retries > 0
|
1072
|
-
@logger.fatal("
|
1074
|
+
@logger.fatal("#{mold.to_log} failed to spawn a worker, retrying")
|
1073
1075
|
retries -= 1
|
1074
1076
|
retry
|
1075
1077
|
else
|
1076
|
-
@logger.fatal("
|
1078
|
+
@logger.fatal("#{mold.to_log} failed to spawn a worker twice in a row - corrupted mold process?")
|
1077
1079
|
Process.exit(1)
|
1078
1080
|
end
|
1079
1081
|
rescue => error
|
@@ -1085,11 +1087,11 @@ module Pitchfork
|
|
1085
1087
|
spawn_service(Service.new(generation: mold.generation), detach: true)
|
1086
1088
|
rescue ForkFailure
|
1087
1089
|
if retries > 0
|
1088
|
-
@logger.fatal("
|
1090
|
+
@logger.fatal("#{mold.to_log} failed to spawn a service, retrying")
|
1089
1091
|
retries -= 1
|
1090
1092
|
retry
|
1091
1093
|
else
|
1092
|
-
@logger.fatal("
|
1094
|
+
@logger.fatal("#{mold.to_log} failed to spawn a service twice in a row - corrupted mold process?")
|
1093
1095
|
Process.exit(1)
|
1094
1096
|
end
|
1095
1097
|
rescue => error
|
@@ -1163,12 +1165,14 @@ module Pitchfork
|
|
1163
1165
|
FORK_TIMEOUT = 5
|
1164
1166
|
|
1165
1167
|
def fork_sibling(role, &block)
|
1168
|
+
reset_signal_handlers
|
1169
|
+
|
1166
1170
|
if REFORKING_AVAILABLE
|
1167
1171
|
r, w = Pitchfork::Info.keep_ios(IO.pipe)
|
1168
1172
|
# We double fork so that the new worker is re-attached back
|
1169
|
-
# to the
|
1173
|
+
# to the monitor.
|
1170
1174
|
# This requires either PR_SET_CHILD_SUBREAPER which is exclusive to Linux 3.4
|
1171
|
-
# or the
|
1175
|
+
# or the monitor to be PID 1.
|
1172
1176
|
if middle_pid = FORK_LOCK.synchronize { Process.fork } # parent
|
1173
1177
|
w.close
|
1174
1178
|
# We need to wait(2) so that the middle process doesn't end up a zombie.
|
@@ -1188,7 +1192,7 @@ module Pitchfork
|
|
1188
1192
|
else # first child
|
1189
1193
|
r.close
|
1190
1194
|
Process.setproctitle("<pitchfork fork_sibling(#{role})>")
|
1191
|
-
pid = Pitchfork.clean_fork do
|
1195
|
+
pid = Pitchfork.clean_fork(setpgid: setpgid) do
|
1192
1196
|
# detach into a grand child
|
1193
1197
|
w.close
|
1194
1198
|
yield
|
@@ -1203,7 +1207,7 @@ module Pitchfork
|
|
1203
1207
|
exit!
|
1204
1208
|
end
|
1205
1209
|
else
|
1206
|
-
Pitchfork.clean_fork(&block)
|
1210
|
+
Pitchfork.clean_fork(setpgid: setpgid, &block)
|
1207
1211
|
end
|
1208
1212
|
end
|
1209
1213
|
|
data/lib/pitchfork/info.rb
CHANGED
@@ -98,13 +98,14 @@ module Pitchfork
|
|
98
98
|
def live_workers_count
|
99
99
|
now = Pitchfork.time_now(true)
|
100
100
|
(0...workers_count).count do |nr|
|
101
|
-
SharedMemory.
|
101
|
+
state = SharedMemory.worker_state(nr)
|
102
|
+
state.ready? && state.deadline > now
|
102
103
|
end
|
103
104
|
end
|
104
105
|
|
105
106
|
# Returns true if the server is shutting down.
|
106
107
|
# This can be useful to implement health check endpoints, so they
|
107
|
-
# can fail immediately after TERM/QUIT/INT was received by the
|
108
|
+
# can fail immediately after TERM/QUIT/INT was received by the monitor
|
108
109
|
# process.
|
109
110
|
# Otherwise they may succeed while Pitchfork is draining requests causing
|
110
111
|
# more requests to be sent.
|
@@ -19,7 +19,7 @@ module Pitchfork
|
|
19
19
|
if worker.requests_count >= limit
|
20
20
|
return false if backoff?
|
21
21
|
|
22
|
-
logger.info("
|
22
|
+
logger.info("#{worker.to_log} processed #{worker.requests_count} requests, triggering a refork")
|
23
23
|
return true
|
24
24
|
end
|
25
25
|
end
|
@@ -44,20 +44,47 @@ module Pitchfork
|
|
44
44
|
end
|
45
45
|
end
|
46
46
|
|
47
|
-
|
48
|
-
|
47
|
+
class WorkerState
|
48
|
+
def initialize(field)
|
49
|
+
@field = field
|
50
|
+
end
|
51
|
+
|
52
|
+
def ready?
|
53
|
+
(@field.value & 1) == 1
|
54
|
+
end
|
55
|
+
|
56
|
+
def ready=(bool)
|
57
|
+
if bool
|
58
|
+
@field.value |= 1
|
59
|
+
else
|
60
|
+
@field.value &= ~1
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
def deadline=(value)
|
65
|
+
# Shift the value up and preserve the current ready bit.
|
66
|
+
@field.value = (value << 1) | (@field.value & 1)
|
67
|
+
end
|
68
|
+
|
69
|
+
def deadline
|
70
|
+
@field.value >> 1
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
def mold_state
|
75
|
+
WorkerState.new(self[MOLD_TICK_OFFSET])
|
49
76
|
end
|
50
77
|
|
51
|
-
def
|
52
|
-
self[MOLD_PROMOTION_TICK_OFFSET]
|
78
|
+
def mold_promotion_state
|
79
|
+
WorkerState.new(self[MOLD_PROMOTION_TICK_OFFSET])
|
53
80
|
end
|
54
81
|
|
55
|
-
def
|
56
|
-
self[SERVICE_TICK_OFFSET]
|
82
|
+
def service_state
|
83
|
+
WorkerState.new(self[SERVICE_TICK_OFFSET])
|
57
84
|
end
|
58
85
|
|
59
|
-
def
|
60
|
-
self[WORKER_TICK_OFFSET + worker_nr]
|
86
|
+
def worker_state(worker_nr)
|
87
|
+
WorkerState.new(self[WORKER_TICK_OFFSET + worker_nr])
|
61
88
|
end
|
62
89
|
|
63
90
|
def [](offset)
|
@@ -75,4 +102,4 @@ module Pitchfork
|
|
75
102
|
end
|
76
103
|
end
|
77
104
|
end
|
78
|
-
end
|
105
|
+
end
|