unicorn 1.1.7 → 2.0.0pre1

Sign up to get free protection for your applications and to get access to all the features.
Files changed (45) hide show
  1. data/GIT-VERSION-GEN +1 -1
  2. data/GNUmakefile +14 -5
  3. data/Rakefile +3 -28
  4. data/TODO +7 -0
  5. data/bin/unicorn +9 -13
  6. data/bin/unicorn_rails +12 -14
  7. data/examples/big_app_gc.rb +33 -2
  8. data/ext/unicorn_http/global_variables.h +3 -1
  9. data/ext/unicorn_http/unicorn_http.rl +15 -6
  10. data/lib/unicorn.rb +67 -820
  11. data/lib/unicorn/app/exec_cgi.rb +3 -4
  12. data/lib/unicorn/configurator.rb +20 -25
  13. data/lib/unicorn/const.rb +26 -25
  14. data/lib/unicorn/http_request.rb +64 -57
  15. data/lib/unicorn/http_response.rb +16 -35
  16. data/lib/unicorn/http_server.rb +700 -0
  17. data/lib/unicorn/launcher.rb +4 -3
  18. data/lib/unicorn/oob_gc.rb +50 -61
  19. data/lib/unicorn/socket_helper.rb +4 -4
  20. data/lib/unicorn/tee_input.rb +18 -26
  21. data/lib/unicorn/tmpio.rb +29 -0
  22. data/lib/unicorn/util.rb +51 -85
  23. data/lib/unicorn/worker.rb +40 -0
  24. data/local.mk.sample +0 -9
  25. data/script/isolate_for_tests +43 -0
  26. data/t/GNUmakefile +8 -1
  27. data/t/t0003-working_directory.sh +0 -5
  28. data/t/t0010-reap-logging.sh +55 -0
  29. data/t/t0303-rails3-alt-working_directory_config.ru.sh +0 -5
  30. data/t/test-rails3.sh +1 -1
  31. data/test/exec/test_exec.rb +1 -1
  32. data/test/unit/test_http_parser_ng.rb +11 -0
  33. data/test/unit/test_request.rb +12 -0
  34. data/test/unit/test_response.rb +23 -21
  35. data/test/unit/test_signals.rb +1 -1
  36. data/test/unit/test_tee_input.rb +21 -19
  37. data/unicorn.gemspec +3 -2
  38. metadata +47 -25
  39. data/t/oob_gc.ru +0 -21
  40. data/t/oob_gc_path.ru +0 -21
  41. data/t/t0012-reload-empty-config.sh +0 -82
  42. data/t/t0018-write-on-close.sh +0 -23
  43. data/t/t9001-oob_gc.sh +0 -47
  44. data/t/t9002-oob_gc-path.sh +0 -75
  45. data/t/write-on-close.ru +0 -11
@@ -43,7 +43,7 @@ module Unicorn::App
43
43
 
44
44
  # Calls the app
45
45
  def call(env)
46
- out, err = Unicorn::Util.tmpio, Unicorn::Util.tmpio
46
+ out, err = Unicorn::TmpIO.new, Unicorn::TmpIO.new
47
47
  inp = force_file_input(env)
48
48
  pid = fork { run_child(inp, out, err, env) }
49
49
  inp.close
@@ -113,9 +113,8 @@ module Unicorn::App
113
113
  when /^[ \t]/ then headers[prev] << "\n#{line}" if prev
114
114
  end
115
115
  end
116
- status = headers.delete("Status") || 200
117
116
  headers['Content-Length'] = size.to_s
118
- [ status, headers, out ]
117
+ [ 200, headers, out ]
119
118
  end
120
119
 
121
120
  # ensures rack.input is a file handle that we can redirect stdin to
@@ -125,7 +124,7 @@ module Unicorn::App
125
124
  if inp.respond_to?(:size) && inp.size == 0
126
125
  ::File.open('/dev/null', 'rb')
127
126
  else
128
- tmp = Unicorn::Util.tmpio
127
+ tmp = Unicorn::TmpIO.new
129
128
 
130
129
  buf = inp.read(CHUNK_SIZE)
131
130
  begin
@@ -9,13 +9,19 @@ require 'logger'
9
9
  # nginx is also available at
10
10
  # http://unicorn.bogomips.org/examples/nginx.conf
11
11
  class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
12
+ # :stopdoc:
12
13
  # used to stash stuff for deferred processing of cli options in
13
14
  # config.ru after "working_directory" is bound. Do not rely on
14
15
  # this being around later on...
15
- RACKUP = {} # :nodoc:
16
+ RACKUP = {
17
+ :daemonize => false,
18
+ :host => Unicorn::Const::DEFAULT_HOST,
19
+ :port => Unicorn::Const::DEFAULT_PORT,
20
+ :set_listener => false,
21
+ :options => { :listeners => [] }
22
+ }
16
23
 
17
24
  # Default settings for Unicorn
18
- # :stopdoc:
19
25
  DEFAULTS = {
20
26
  :timeout => 60,
21
27
  :logger => Logger.new($stderr),
@@ -36,28 +42,28 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
36
42
 
37
43
  def initialize(defaults = {}) #:nodoc:
38
44
  self.set = Hash.new(:unset)
39
- @use_defaults = defaults.delete(:use_defaults)
45
+ use_defaults = defaults.delete(:use_defaults)
40
46
  self.config_file = defaults.delete(:config_file)
41
47
 
42
48
  # after_reload is only used by unicorn_rails, unsupported otherwise
43
49
  self.after_reload = defaults.delete(:after_reload)
44
50
 
45
- set.merge!(DEFAULTS) if @use_defaults
46
- defaults.each { |key, value| self.__send__(key, value) }
51
+ set.merge!(DEFAULTS) if use_defaults
52
+ defaults.each { |key, value| self.send(key, value) }
47
53
  Hash === set[:listener_opts] or
48
54
  set[:listener_opts] = Hash.new { |hash,key| hash[key] = {} }
49
55
  Array === set[:listeners] or set[:listeners] = []
50
- reload(false)
56
+ reload
51
57
  end
52
58
 
53
- def reload(merge_defaults = true) #:nodoc:
54
- if merge_defaults && @use_defaults
55
- set.merge!(DEFAULTS) if @use_defaults
56
- end
59
+ def reload #:nodoc:
57
60
  instance_eval(File.read(config_file), config_file) if config_file
58
61
 
59
62
  parse_rackup_file
60
63
 
64
+ RACKUP[:set_listener] and
65
+ set[:listeners] << "#{RACKUP[:host]}:#{RACKUP[:port]}"
66
+
61
67
  # unicorn_rails creates dirs here after working_directory is bound
62
68
  after_reload.call if after_reload
63
69
 
@@ -395,10 +401,7 @@ class Unicorn::Configurator < Struct.new(:set, :config_file, :after_reload)
395
401
 
396
402
  # sets the working directory for Unicorn. This ensures SIGUSR2 will
397
403
  # start a new instance of Unicorn in this directory. This may be
398
- # a symlink, a common scenario for Capistrano users. Unlike
399
- # all other Unicorn configuration directives, this binds immediately
400
- # for error checking and cannot be undone by unsetting it in the
401
- # configuration file and reloading.
404
+ # a symlink, a common scenario for Capistrano users.
402
405
  def working_directory(path)
403
406
  # just let chdir raise errors
404
407
  path = File.expand_path(path)
@@ -495,23 +498,15 @@ private
495
498
  /^#\\(.*)/ =~ File.read(ru) or return
496
499
  RACKUP[:optparse].parse!($1.split(/\s+/))
497
500
 
498
- # XXX ugly as hell, WILL FIX in 2.x (along with Rainbows!/Zbatery)
499
- host, port, set_listener, options, daemonize =
500
- eval("[ host, port, set_listener, options, daemonize ]",
501
- TOPLEVEL_BINDING)
502
-
503
- # XXX duplicate code from bin/unicorn{,_rails}
504
- set[:listeners] << "#{host}:#{port}" if set_listener
505
-
506
- if daemonize
501
+ if RACKUP[:daemonize]
507
502
  # unicorn_rails wants a default pid path, (not plain 'unicorn')
508
503
  if after_reload
509
504
  spid = set[:pid]
510
505
  pid('tmp/pids/unicorn.pid') if spid.nil? || spid == :unset
511
506
  end
512
507
  unless RACKUP[:daemonized]
513
- Unicorn::Launcher.daemonize!(options)
514
- RACKUP[:ready_pipe] = options.delete(:ready_pipe)
508
+ Unicorn::Launcher.daemonize!(RACKUP[:options])
509
+ RACKUP[:ready_pipe] = RACKUP[:options].delete(:ready_pipe)
515
510
  end
516
511
  end
517
512
  end
data/lib/unicorn/const.rb CHANGED
@@ -1,36 +1,37 @@
1
1
  # -*- encoding: binary -*-
2
2
 
3
- module Unicorn
3
+ # Frequently used constants when constructing requests or responses.
4
+ # Many times the constant just refers to a string with the same
5
+ # contents. Using these constants gave about a 3% to 10% performance
6
+ # improvement over using the strings directly. Symbols did not really
7
+ # improve things much compared to constants.
8
+ module Unicorn::Const
4
9
 
5
- # Frequently used constants when constructing requests or responses. Many times
6
- # the constant just refers to a string with the same contents. Using these constants
7
- # gave about a 3% to 10% performance improvement over using the strings directly.
8
- # Symbols did not really improve things much compared to constants.
9
- module Const
10
+ # The current version of Unicorn, currently 2.0.0pre1
11
+ UNICORN_VERSION = "2.0.0pre1"
10
12
 
11
- # The current version of Unicorn, currently 1.1.7
12
- UNICORN_VERSION="1.1.7"
13
+ # default TCP listen host address (0.0.0.0, all interfaces)
14
+ DEFAULT_HOST = "0.0.0.0"
13
15
 
14
- DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
15
- DEFAULT_PORT = 8080 # default TCP listen port
16
- DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
16
+ # default TCP listen port (8080)
17
+ DEFAULT_PORT = 8080
17
18
 
18
- # The basic max request size we'll try to read.
19
- CHUNK_SIZE=(16 * 1024)
19
+ # default TCP listen address and port (0.0.0.0:8080)
20
+ DEFAULT_LISTEN = "#{DEFAULT_HOST}:#{DEFAULT_PORT}"
20
21
 
21
- # Maximum request body size before it is moved out of memory and into a
22
- # temporary file for reading (112 kilobytes).
23
- MAX_BODY=1024 * 112
22
+ # The basic request body size we'll try to read at once (16 kilobytes).
23
+ CHUNK_SIZE = 16 * 1024
24
24
 
25
- # common errors we'll send back
26
- ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
27
- ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
28
- EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
25
+ # Maximum request body size before it is moved out of memory and into a
26
+ # temporary file for reading (112 kilobytes).
27
+ MAX_BODY = 1024 * 112
29
28
 
30
- # A frozen format for this is about 15% faster
31
- REMOTE_ADDR="REMOTE_ADDR".freeze
32
- RACK_INPUT="rack.input".freeze
33
- HTTP_EXPECT="HTTP_EXPECT"
34
- end
29
+ # :stopdoc:
30
+ # common errors we'll send back
31
+ ERROR_400_RESPONSE = "HTTP/1.1 400 Bad Request\r\n\r\n"
32
+ ERROR_500_RESPONSE = "HTTP/1.1 500 Internal Server Error\r\n\r\n"
33
+ EXPECT_100_RESPONSE = "HTTP/1.1 100 Continue\r\n\r\n"
35
34
 
35
+ HTTP_EXPECT = "HTTP_EXPECT"
36
+ # :startdoc:
36
37
  end
@@ -2,70 +2,77 @@
2
2
 
3
3
  require 'unicorn_http'
4
4
 
5
- module Unicorn
6
- class HttpRequest
5
+ class Unicorn::HttpRequest
7
6
 
8
- # default parameters we merge into the request env for Rack handlers
9
- DEFAULTS = {
10
- "rack.errors" => $stderr,
11
- "rack.multiprocess" => true,
12
- "rack.multithread" => false,
13
- "rack.run_once" => false,
14
- "rack.version" => [1, 1],
15
- "SCRIPT_NAME" => "",
7
+ # default parameters we merge into the request env for Rack handlers
8
+ DEFAULTS = {
9
+ "rack.errors" => $stderr,
10
+ "rack.multiprocess" => true,
11
+ "rack.multithread" => false,
12
+ "rack.run_once" => false,
13
+ "rack.version" => [1, 1],
14
+ "SCRIPT_NAME" => "",
16
15
 
17
- # this is not in the Rack spec, but some apps may rely on it
18
- "SERVER_SOFTWARE" => "Unicorn #{Const::UNICORN_VERSION}"
19
- }
16
+ # this is not in the Rack spec, but some apps may rely on it
17
+ "SERVER_SOFTWARE" => "Unicorn #{Unicorn::Const::UNICORN_VERSION}"
18
+ }
20
19
 
21
- NULL_IO = StringIO.new("")
22
- LOCALHOST = '127.0.0.1'
20
+ NULL_IO = StringIO.new("")
23
21
 
24
- # Being explicitly single-threaded, we have certain advantages in
25
- # not having to worry about variables being clobbered :)
26
- BUF = ""
27
- PARSER = HttpParser.new
28
- REQ = {}
22
+ # :stopdoc:
23
+ # A frozen format for this is about 15% faster
24
+ REMOTE_ADDR = 'REMOTE_ADDR'.freeze
25
+ RACK_INPUT = 'rack.input'.freeze
26
+ # :startdoc:
29
27
 
30
- # Does the majority of the IO processing. It has been written in
31
- # Ruby using about 8 different IO processing strategies.
32
- #
33
- # It is currently carefully constructed to make sure that it gets
34
- # the best possible performance for the common case: GET requests
35
- # that are fully complete after a single read(2)
36
- #
37
- # Anyone who thinks they can make it faster is more than welcome to
38
- # take a crack at it.
39
- #
40
- # returns an environment hash suitable for Rack if successful
41
- # This does minimal exception trapping and it is up to the caller
42
- # to handle any socket errors (e.g. user aborted upload).
43
- def read(socket)
44
- REQ.clear
45
- PARSER.reset
28
+ attr_reader :env, :parser, :buf
46
29
 
47
- # From http://www.ietf.org/rfc/rfc3875:
48
- # "Script authors should be aware that the REMOTE_ADDR and
49
- # REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
50
- # may not identify the ultimate source of the request. They
51
- # identify the client for the immediate request to the server;
52
- # that client may be a proxy, gateway, or other intermediary
53
- # acting on behalf of the actual source client."
54
- REQ[Const::REMOTE_ADDR] =
55
- TCPSocket === socket ? socket.peeraddr[-1] : LOCALHOST
30
+ def initialize
31
+ @parser = Unicorn::HttpParser.new
32
+ @buf = ""
33
+ @env = {}
34
+ end
56
35
 
57
- # short circuit the common case with small GET requests first
58
- if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
59
- # Parser is not done, queue up more data to read and continue parsing
60
- # an Exception thrown from the PARSER will throw us out of the loop
61
- begin
62
- BUF << socket.readpartial(Const::CHUNK_SIZE)
63
- end while PARSER.headers(REQ, BUF).nil?
64
- end
65
- REQ[Const::RACK_INPUT] = 0 == PARSER.content_length ?
66
- NULL_IO : Unicorn::TeeInput.new(socket, REQ, PARSER, BUF)
67
- REQ.update(DEFAULTS)
68
- end
36
+ def response_headers?
37
+ @parser.headers?
38
+ end
69
39
 
40
+ # Does the majority of the IO processing. It has been written in
41
+ # Ruby using about 8 different IO processing strategies.
42
+ #
43
+ # It is currently carefully constructed to make sure that it gets
44
+ # the best possible performance for the common case: GET requests
45
+ # that are fully complete after a single read(2)
46
+ #
47
+ # Anyone who thinks they can make it faster is more than welcome to
48
+ # take a crack at it.
49
+ #
50
+ # returns an environment hash suitable for Rack if successful
51
+ # This does minimal exception trapping and it is up to the caller
52
+ # to handle any socket errors (e.g. user aborted upload).
53
+ def read(socket)
54
+ @env.clear
55
+ @parser.reset
56
+
57
+ # From http://www.ietf.org/rfc/rfc3875:
58
+ # "Script authors should be aware that the REMOTE_ADDR and
59
+ # REMOTE_HOST meta-variables (see sections 4.1.8 and 4.1.9)
60
+ # may not identify the ultimate source of the request. They
61
+ # identify the client for the immediate request to the server;
62
+ # that client may be a proxy, gateway, or other intermediary
63
+ # acting on behalf of the actual source client."
64
+ @env[REMOTE_ADDR] = socket.kgio_addr
65
+
66
+ # short circuit the common case with small GET requests first
67
+ if @parser.headers(@env, socket.kgio_read!(16384, @buf)).nil?
68
+ # Parser is not done, queue up more data to read and continue parsing
69
+ # an Exception thrown from the PARSER will throw us out of the loop
70
+ begin
71
+ @buf << socket.kgio_read!(16384)
72
+ end while @parser.headers(@env, @buf).nil?
73
+ end
74
+ @env[RACK_INPUT] = 0 == @parser.content_length ?
75
+ NULL_IO : Unicorn::TeeInput.new(socket, self)
76
+ @env.merge!(DEFAULTS)
70
77
  end
71
78
  end
@@ -5,19 +5,12 @@ require 'time'
5
5
  # You use it by simply doing:
6
6
  #
7
7
  # status, headers, body = rack_app.call(env)
8
- # HttpResponse.write(socket, [ status, headers, body ])
8
+ # http_response_write(socket, [ status, headers, body ])
9
9
  #
10
10
  # Most header correctness (including Content-Length and Content-Type)
11
- # is the job of Rack, with the exception of the "Connection: close"
12
- # and "Date" headers.
11
+ # is the job of Rack, with the exception of the "Date" and "Status" header.
13
12
  #
14
- # A design decision was made to force the client to not pipeline or
15
- # keepalive requests. HTTP/1.1 pipelining really kills the
16
- # performance due to how it has to be handled and how unclear the
17
- # standard is. To fix this the HttpResponse always gives a
18
- # "Connection: close" header which forces the client to close right
19
- # away. The bonus for this is that it gives a pretty nice speed boost
20
- # to most clients since they can close their connection immediately.
13
+ # TODO: allow keepalive
21
14
  module Unicorn::HttpResponse
22
15
 
23
16
  # Every standard HTTP code mapped to the appropriate message.
@@ -25,44 +18,32 @@ module Unicorn::HttpResponse
25
18
  hash[code] = "#{code} #{msg}"
26
19
  hash
27
20
  }
28
-
29
- # Rack does not set/require a Date: header. We always override the
30
- # Connection: and Date: headers no matter what (if anything) our
31
- # Rack application sent us.
32
- SKIP = { 'connection' => true, 'date' => true, 'status' => true }
21
+ CRLF = "\r\n"
33
22
 
34
23
  # writes the rack_response to socket as an HTTP response
35
- def self.write(socket, rack_response, have_header = true)
24
+ def http_response_write(socket, rack_response)
36
25
  status, headers, body = rack_response
26
+ status = CODES[status.to_i] || status
37
27
 
38
- if have_header
39
- status = CODES[status.to_i] || status
40
- out = []
41
-
42
- # Don't bother enforcing duplicate supression, it's a Hash most of
43
- # the time anyways so just hope our app knows what it's doing
28
+ if headers
29
+ buf = "HTTP/1.1 #{status}\r\n" \
30
+ "Date: #{Time.now.httpdate}\r\n" \
31
+ "Status: #{status}\r\n" \
32
+ "Connection: close\r\n"
44
33
  headers.each do |key, value|
45
- next if SKIP.include?(key.downcase)
34
+ next if %r{\A(?:Date\z|Status\z|Connection\z)}i =~ key
46
35
  if value =~ /\n/
47
36
  # avoiding blank, key-only cookies with /\n+/
48
- out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
37
+ buf << value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" }.join('')
49
38
  else
50
- out << "#{key}: #{value}\r\n"
39
+ buf << "#{key}: #{value}\r\n"
51
40
  end
52
41
  end
53
-
54
- # Rack should enforce Content-Length or chunked transfer encoding,
55
- # so don't worry or care about them.
56
- # Date is required by HTTP/1.1 as long as our clock can be trusted.
57
- # Some broken clients require a "Status" header so we accomodate them
58
- socket.write("HTTP/1.1 #{status}\r\n" \
59
- "Date: #{Time.now.httpdate}\r\n" \
60
- "Status: #{status}\r\n" \
61
- "Connection: close\r\n" \
62
- "#{out.join('')}\r\n")
42
+ socket.write(buf << CRLF)
63
43
  end
64
44
 
65
45
  body.each { |chunk| socket.write(chunk) }
46
+ socket.close # flushes and uncorks the socket immediately
66
47
  ensure
67
48
  body.respond_to?(:close) and body.close
68
49
  end
@@ -0,0 +1,700 @@
1
+ # -*- encoding: binary -*-
2
+
3
+ # This is the process manager of Unicorn. This manages worker
4
+ # processes which in turn handle the I/O and application process.
5
+ # Listener sockets are started in the master process and shared with
6
+ # forked worker children.
7
+ class Unicorn::HttpServer
8
+ attr_accessor :app, :request, :timeout, :worker_processes,
9
+ :before_fork, :after_fork, :before_exec,
10
+ :logger, :pid, :listener_opts, :preload_app,
11
+ :reexec_pid, :orig_app, :init_listeners,
12
+ :master_pid, :config, :ready_pipe, :user
13
+
14
+ # :stopdoc:
15
+ include Unicorn::SocketHelper
16
+ include Unicorn::HttpResponse
17
+
18
+ # backwards compatibility with 1.x
19
+ Worker = Unicorn::Worker
20
+
21
+ # prevents IO objects in here from being GC-ed
22
+ IO_PURGATORY = []
23
+
24
+ # all bound listener sockets
25
+ LISTENERS = []
26
+
27
+ # This hash maps PIDs to Workers
28
+ WORKERS = {}
29
+
30
+ # We use SELF_PIPE differently in the master and worker processes:
31
+ #
32
+ # * The master process never closes or reinitializes this once
33
+ # initialized. Signal handlers in the master process will write to
34
+ # it to wake up the master from IO.select in exactly the same manner
35
+ # djb describes in http://cr.yp.to/docs/selfpipe.html
36
+ #
37
+ # * The workers immediately close the pipe they inherit from the
38
+ # master and replace it with a new pipe after forking. This new
39
+ # pipe is also used to wakeup from IO.select from inside (worker)
40
+ # signal handlers. However, workers *close* the pipe descriptors in
41
+ # the signal handlers to raise EBADF in IO.select instead of writing
42
+ # like we do in the master. We cannot easily use the reader set for
43
+ # IO.select because LISTENERS is already that set, and it's extra
44
+ # work (and cycles) to distinguish the pipe FD from the reader set
45
+ # once IO.select returns. So we're lazy and just close the pipe when
46
+ # a (rare) signal arrives in the worker and reinitialize the pipe later.
47
+ SELF_PIPE = []
48
+
49
+ # signal queue used for self-piping
50
+ SIG_QUEUE = []
51
+
52
+ # We populate this at startup so we can figure out how to reexecute
53
+ # and upgrade the currently running instance of Unicorn
54
+ # This Hash is considered a stable interface and changing its contents
55
+ # will allow you to switch between different installations of Unicorn
56
+ # or even different installations of the same applications without
57
+ # downtime. Keys of this constant Hash are described as follows:
58
+ #
59
+ # * 0 - the path to the unicorn/unicorn_rails executable
60
+ # * :argv - a deep copy of the ARGV array the executable originally saw
61
+ # * :cwd - the working directory of the application, this is where
62
+ # you originally started Unicorn.
63
+ #
64
+ # To change your unicorn executable to a different path without downtime,
65
+ # you can set the following in your Unicorn config file, HUP and then
66
+ # continue with the traditional USR2 + QUIT upgrade steps:
67
+ #
68
+ # Unicorn::HttpServer::START_CTX[0] = "/home/bofh/1.9.2/bin/unicorn"
69
+ START_CTX = {
70
+ :argv => ARGV.map { |arg| arg.dup },
71
+ :cwd => lambda {
72
+ # favor ENV['PWD'] since it is (usually) symlink aware for
73
+ # Capistrano and like systems
74
+ begin
75
+ a = File.stat(pwd = ENV['PWD'])
76
+ b = File.stat(Dir.pwd)
77
+ a.ino == b.ino && a.dev == b.dev ? pwd : Dir.pwd
78
+ rescue
79
+ Dir.pwd
80
+ end
81
+ }.call,
82
+ 0 => $0.dup,
83
+ }
84
+ # :startdoc:
85
+
86
+ # Creates a working server on host:port (strange things happen if
87
+ # port isn't a Number). Use HttpServer::run to start the server and
88
+ # HttpServer.run.join to join the thread that's processing
89
+ # incoming requests on the socket.
90
+ def initialize(app, options = {})
91
+ @app = app
92
+ @request = Unicorn::HttpRequest.new
93
+ self.reexec_pid = 0
94
+ options = options.dup
95
+ self.ready_pipe = options.delete(:ready_pipe)
96
+ self.init_listeners = options[:listeners] ? options[:listeners].dup : []
97
+ options[:use_defaults] = true
98
+ self.config = Unicorn::Configurator.new(options)
99
+ self.listener_opts = {}
100
+
101
+ # we try inheriting listeners first, so we bind them later.
102
+ # we don't write the pid file until we've bound listeners in case
103
+ # unicorn was started twice by mistake. Even though our #pid= method
104
+ # checks for stale/existing pid files, race conditions are still
105
+ # possible (and difficult/non-portable to avoid) and can be likely
106
+ # to clobber the pid if the second start was in quick succession
107
+ # after the first, so we rely on the listener binding to fail in
108
+ # that case. Some tests (in and outside of this source tree) and
109
+ # monitoring tools may also rely on pid files existing before we
110
+ # attempt to connect to the listener(s)
111
+ config.commit!(self, :skip => [:listeners, :pid])
112
+ self.orig_app = app
113
+ end
114
+
115
+ # Runs the thing. Returns self so you can run join on it
116
+ def start
117
+ BasicSocket.do_not_reverse_lookup = true
118
+
119
+ # inherit sockets from parents, they need to be plain Socket objects
120
+ # before they become Kgio::UNIXServer or Kgio::TCPServer
121
+ inherited = ENV['UNICORN_FD'].to_s.split(/,/).map do |fd|
122
+ io = Socket.for_fd(fd.to_i)
123
+ set_server_sockopt(io, listener_opts[sock_name(io)])
124
+ IO_PURGATORY << io
125
+ logger.info "inherited addr=#{sock_name(io)} fd=#{fd}"
126
+ server_cast(io)
127
+ end
128
+
129
+ config_listeners = config[:listeners].dup
130
+ LISTENERS.replace(inherited)
131
+
132
+ # we start out with generic Socket objects that get cast to either
133
+ # Kgio::TCPServer or Kgio::UNIXServer objects; but since the Socket
134
+ # objects share the same OS-level file descriptor as the higher-level
135
+ # *Server objects; we need to prevent Socket objects from being
136
+ # garbage-collected
137
+ config_listeners -= listener_names
138
+ if config_listeners.empty? && LISTENERS.empty?
139
+ config_listeners << Unicorn::Const::DEFAULT_LISTEN
140
+ init_listeners << Unicorn::Const::DEFAULT_LISTEN
141
+ START_CTX[:argv] << "-l#{Unicorn::Const::DEFAULT_LISTEN}"
142
+ end
143
+ config_listeners.each { |addr| listen(addr) }
144
+ raise ArgumentError, "no listeners" if LISTENERS.empty?
145
+
146
+ # this pipe is used to wake us up from select(2) in #join when signals
147
+ # are trapped. See trap_deferred.
148
+ init_self_pipe!
149
+
150
+ # setup signal handlers before writing pid file in case people get
151
+ # trigger happy and send signals as soon as the pid file exists.
152
+ # Note that signals don't actually get handled until the #join method
153
+ QUEUE_SIGS.each { |sig| trap_deferred(sig) }
154
+ trap(:CHLD) { |_| awaken_master }
155
+ self.pid = config[:pid]
156
+
157
+ self.master_pid = $$
158
+ build_app! if preload_app
159
+ maintain_worker_count
160
+ self
161
+ end
162
+
163
+ # replaces current listener set with +listeners+. This will
164
+ # close the socket if it will not exist in the new listener set
165
+ def listeners=(listeners)
166
+ cur_names, dead_names = [], []
167
+ listener_names.each do |name|
168
+ if ?/ == name[0]
169
+ # mark unlinked sockets as dead so we can rebind them
170
+ (File.socket?(name) ? cur_names : dead_names) << name
171
+ else
172
+ cur_names << name
173
+ end
174
+ end
175
+ set_names = listener_names(listeners)
176
+ dead_names.concat(cur_names - set_names).uniq!
177
+
178
+ LISTENERS.delete_if do |io|
179
+ if dead_names.include?(sock_name(io))
180
+ IO_PURGATORY.delete_if do |pio|
181
+ pio.fileno == io.fileno && (pio.close rescue nil).nil? # true
182
+ end
183
+ (io.close rescue nil).nil? # true
184
+ else
185
+ set_server_sockopt(io, listener_opts[sock_name(io)])
186
+ false
187
+ end
188
+ end
189
+
190
+ (set_names - cur_names).each { |addr| listen(addr) }
191
+ end
192
+
193
+ def stdout_path=(path); redirect_io($stdout, path); end
194
+ def stderr_path=(path); redirect_io($stderr, path); end
195
+
196
+ def logger=(obj)
197
+ Unicorn::HttpRequest::DEFAULTS["rack.logger"] = @logger = obj
198
+ end
199
+
200
+ # sets the path for the PID file of the master process
201
+ def pid=(path)
202
+ if path
203
+ if x = valid_pid?(path)
204
+ return path if pid && path == pid && x == $$
205
+ if x == reexec_pid && pid =~ /\.oldbin\z/
206
+ logger.warn("will not set pid=#{path} while reexec-ed "\
207
+ "child is running PID:#{x}")
208
+ return
209
+ end
210
+ raise ArgumentError, "Already running on PID:#{x} " \
211
+ "(or pid=#{path} is stale)"
212
+ end
213
+ end
214
+ unlink_pid_safe(pid) if pid
215
+
216
+ if path
217
+ fp = begin
218
+ tmp = "#{File.dirname(path)}/#{rand}.#$$"
219
+ File.open(tmp, File::RDWR|File::CREAT|File::EXCL, 0644)
220
+ rescue Errno::EEXIST
221
+ retry
222
+ end
223
+ fp.syswrite("#$$\n")
224
+ File.rename(fp.path, path)
225
+ fp.close
226
+ end
227
+ @pid = path
228
+ end
229
+
230
+ # add a given address to the +listeners+ set, idempotently
231
+ # Allows workers to add a private, per-process listener via the
232
+ # after_fork hook. Very useful for debugging and testing.
233
+ # +:tries+ may be specified as an option for the number of times
234
+ # to retry, and +:delay+ may be specified as the time in seconds
235
+ # to delay between retries.
236
+ # A negative value for +:tries+ indicates the listen will be
237
+ # retried indefinitely, this is useful when workers belonging to
238
+ # different masters are spawned during a transparent upgrade.
239
+ def listen(address, opt = {}.merge(listener_opts[address] || {}))
240
+ address = config.expand_addr(address)
241
+ return if String === address && listener_names.include?(address)
242
+
243
+ delay = opt[:delay] || 0.5
244
+ tries = opt[:tries] || 5
245
+ begin
246
+ io = bind_listen(address, opt)
247
+ unless Kgio::TCPServer === io || Kgio::UNIXServer === io
248
+ IO_PURGATORY << io
249
+ io = server_cast(io)
250
+ end
251
+ logger.info "listening on addr=#{sock_name(io)} fd=#{io.fileno}"
252
+ LISTENERS << io
253
+ io
254
+ rescue Errno::EADDRINUSE => err
255
+ logger.error "adding listener failed addr=#{address} (in use)"
256
+ raise err if tries == 0
257
+ tries -= 1
258
+ logger.error "retrying in #{delay} seconds " \
259
+ "(#{tries < 0 ? 'infinite' : tries} tries left)"
260
+ sleep(delay)
261
+ retry
262
+ rescue => err
263
+ logger.fatal "error adding listener addr=#{address}"
264
+ raise err
265
+ end
266
+ end
267
+
268
+ # monitors children and receives signals forever
269
+ # (or until a termination signal is sent). This handles signals
270
+ # one-at-a-time time and we'll happily drop signals in case somebody
271
+ # is signalling us too often.
272
+ def join
273
+ respawn = true
274
+ last_check = Time.now
275
+
276
+ proc_name 'master'
277
+ logger.info "master process ready" # test_exec.rb relies on this message
278
+ if ready_pipe
279
+ ready_pipe.syswrite($$.to_s)
280
+ ready_pipe.close rescue nil
281
+ self.ready_pipe = nil
282
+ end
283
+ begin
284
+ reap_all_workers
285
+ case SIG_QUEUE.shift
286
+ when nil
287
+ # avoid murdering workers after our master process (or the
288
+ # machine) comes out of suspend/hibernation
289
+ if (last_check + timeout) >= (last_check = Time.now)
290
+ murder_lazy_workers
291
+ else
292
+ # wait for workers to wakeup on suspend
293
+ master_sleep(timeout/2.0 + 1)
294
+ end
295
+ maintain_worker_count if respawn
296
+ master_sleep(1)
297
+ when :QUIT # graceful shutdown
298
+ break
299
+ when :TERM, :INT # immediate shutdown
300
+ stop(false)
301
+ break
302
+ when :USR1 # rotate logs
303
+ logger.info "master reopening logs..."
304
+ Unicorn::Util.reopen_logs
305
+ logger.info "master done reopening logs"
306
+ kill_each_worker(:USR1)
307
+ when :USR2 # exec binary, stay alive in case something went wrong
308
+ reexec
309
+ when :WINCH
310
+ if Process.ppid == 1 || Process.getpgrp != $$
311
+ respawn = false
312
+ logger.info "gracefully stopping all workers"
313
+ kill_each_worker(:QUIT)
314
+ self.worker_processes = 0
315
+ else
316
+ logger.info "SIGWINCH ignored because we're not daemonized"
317
+ end
318
+ when :TTIN
319
+ respawn = true
320
+ self.worker_processes += 1
321
+ when :TTOU
322
+ self.worker_processes -= 1 if self.worker_processes > 0
323
+ when :HUP
324
+ respawn = true
325
+ if config.config_file
326
+ load_config!
327
+ else # exec binary and exit if there's no config file
328
+ logger.info "config_file not present, reexecuting binary"
329
+ reexec
330
+ end
331
+ end
332
+ rescue Errno::EINTR
333
+ rescue => e
334
+ logger.error "Unhandled master loop exception #{e.inspect}."
335
+ logger.error e.backtrace.join("\n")
336
+ end while true
337
+ stop # gracefully shutdown all workers on our way out
338
+ logger.info "master complete"
339
+ unlink_pid_safe(pid) if pid
340
+ end
341
+
342
+ # Terminates all workers, but does not exit master process
343
+ def stop(graceful = true)
344
+ self.listeners = []
345
+ limit = Time.now + timeout
346
+ until WORKERS.empty? || Time.now > limit
347
+ kill_each_worker(graceful ? :QUIT : :TERM)
348
+ sleep(0.1)
349
+ reap_all_workers
350
+ end
351
+ kill_each_worker(:KILL)
352
+ end
353
+
354
+ private
355
+
356
+ # list of signals we care about and trap in master.
357
+ QUEUE_SIGS = [ :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP,
358
+ :TTIN, :TTOU ]
359
+
360
+ # defer a signal for later processing in #join (master process)
361
+ def trap_deferred(signal)
362
+ trap(signal) do |sig_nr|
363
+ if SIG_QUEUE.size < 5
364
+ SIG_QUEUE << signal
365
+ awaken_master
366
+ else
367
+ logger.error "ignoring SIG#{signal}, queue=#{SIG_QUEUE.inspect}"
368
+ end
369
+ end
370
+ end
371
+
372
+ # wait for a signal hander to wake us up and then consume the pipe
373
+ # Wake up every second anyways to run murder_lazy_workers
374
+ def master_sleep(sec)
375
+ IO.select([ SELF_PIPE[0] ], nil, nil, sec) or return
376
+ SELF_PIPE[0].kgio_tryread(11)
377
+ end
378
+
379
+ def awaken_master
380
+ SELF_PIPE[1].kgio_trywrite('.') # wakeup master process from select
381
+ end
382
+
383
+ # reaps all unreaped workers
384
+ def reap_all_workers
385
+ begin
386
+ wpid, status = Process.waitpid2(-1, Process::WNOHANG)
387
+ wpid or return
388
+ if reexec_pid == wpid
389
+ logger.error "reaped #{status.inspect} exec()-ed"
390
+ self.reexec_pid = 0
391
+ self.pid = pid.chomp('.oldbin') if pid
392
+ proc_name 'master'
393
+ else
394
+ worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
395
+ m = "reaped #{status.inspect} worker=#{worker.nr rescue 'unknown'}"
396
+ status.success? ? logger.info(m) : logger.error(m)
397
+ end
398
+ rescue Errno::ECHILD
399
+ break
400
+ end while true
401
+ end
402
+
403
+ # reexecutes the START_CTX with a new binary
404
+ def reexec
405
+ if reexec_pid > 0
406
+ begin
407
+ Process.kill(0, reexec_pid)
408
+ logger.error "reexec-ed child already running PID:#{reexec_pid}"
409
+ return
410
+ rescue Errno::ESRCH
411
+ self.reexec_pid = 0
412
+ end
413
+ end
414
+
415
+ if pid
416
+ old_pid = "#{pid}.oldbin"
417
+ prev_pid = pid.dup
418
+ begin
419
+ self.pid = old_pid # clear the path for a new pid file
420
+ rescue ArgumentError
421
+ logger.error "old PID:#{valid_pid?(old_pid)} running with " \
422
+ "existing pid=#{old_pid}, refusing rexec"
423
+ return
424
+ rescue => e
425
+ logger.error "error writing pid=#{old_pid} #{e.class} #{e.message}"
426
+ return
427
+ end
428
+ end
429
+
430
+ self.reexec_pid = fork do
431
+ listener_fds = LISTENERS.map { |sock| sock.fileno }
432
+ ENV['UNICORN_FD'] = listener_fds.join(',')
433
+ Dir.chdir(START_CTX[:cwd])
434
+ cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
435
+
436
+ # avoid leaking FDs we don't know about, but let before_exec
437
+ # unset FD_CLOEXEC, if anything else in the app eventually
438
+ # relies on FD inheritence.
439
+ (3..1024).each do |io|
440
+ next if listener_fds.include?(io)
441
+ io = IO.for_fd(io) rescue next
442
+ IO_PURGATORY << io
443
+ io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
444
+ end
445
+ logger.info "executing #{cmd.inspect} (in #{Dir.pwd})"
446
+ before_exec.call(self)
447
+ exec(*cmd)
448
+ end
449
+ proc_name 'master (old)'
450
+ end
451
+
452
+ # forcibly terminate all workers that haven't checked in in timeout
453
+ # seconds. The timeout is implemented using an unlinked File
454
+ # shared between the parent process and each worker. The worker
455
+ # runs File#chmod to modify the ctime of the File. If the ctime
456
+ # is stale for >timeout seconds, then we'll kill the corresponding
457
+ # worker.
458
+ def murder_lazy_workers
459
+ WORKERS.dup.each_pair do |wpid, worker|
460
+ stat = worker.tmp.stat
461
+ # skip workers that disable fchmod or have never fchmod-ed
462
+ stat.mode == 0100600 and next
463
+ (diff = (Time.now - stat.ctime)) <= timeout and next
464
+ logger.error "worker=#{worker.nr} PID:#{wpid} timeout " \
465
+ "(#{diff}s > #{timeout}s), killing"
466
+ kill_worker(:KILL, wpid) # take no prisoners for timeout violations
467
+ end
468
+ end
469
+
470
+ def spawn_missing_workers
471
+ (0...worker_processes).each do |worker_nr|
472
+ WORKERS.values.include?(worker_nr) and next
473
+ worker = Worker.new(worker_nr, Unicorn::TmpIO.new)
474
+ before_fork.call(self, worker)
475
+ WORKERS[fork {
476
+ ready_pipe.close if ready_pipe
477
+ self.ready_pipe = nil
478
+ worker_loop(worker)
479
+ }] = worker
480
+ end
481
+ end
482
+
483
+ def maintain_worker_count
484
+ (off = WORKERS.size - worker_processes) == 0 and return
485
+ off < 0 and return spawn_missing_workers
486
+ WORKERS.dup.each_pair { |wpid,w|
487
+ w.nr >= worker_processes and kill_worker(:QUIT, wpid) rescue nil
488
+ }
489
+ end
490
+
491
+ # if we get any error, try to write something back to the client
492
+ # assuming we haven't closed the socket, but don't get hung up
493
+ # if the socket is already closed or broken. We'll always ensure
494
+ # the socket is closed at the end of this function
495
+ def handle_error(client, e)
496
+ msg = case e
497
+ when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
498
+ Unicorn::Const::ERROR_500_RESPONSE
499
+ when HttpParserError # try to tell the client they're bad
500
+ Unicorn::Const::ERROR_400_RESPONSE
501
+ else
502
+ logger.error "Read error: #{e.inspect}"
503
+ logger.error e.backtrace.join("\n")
504
+ Unicorn::Const::ERROR_500_RESPONSE
505
+ end
506
+ client.kgio_trywrite(msg)
507
+ client.close
508
+ rescue
509
+ nil
510
+ end
511
+
512
+ # once a client is accepted, it is processed in its entirety here
513
+ # in 3 easy steps: read request, call app, write app response
514
+ def process_client(client)
515
+ r = @app.call(env = @request.read(client))
516
+
517
+ if 100 == r[0].to_i
518
+ client.write(Unicorn::Const::EXPECT_100_RESPONSE)
519
+ env.delete(Unicorn::Const::HTTP_EXPECT)
520
+ r = @app.call(env)
521
+ end
522
+ # r may be frozen or const, so don't modify it
523
+ @request.response_headers? or r = [ r[0], nil, r[2] ]
524
+ http_response_write(client, r)
525
+ rescue => e
526
+ handle_error(client, e)
527
+ end
528
+
529
+ # gets rid of stuff the worker has no business keeping track of
530
+ # to free some resources and drops all sig handlers.
531
+ # traps for USR1, USR2, and HUP may be set in the after_fork Proc
532
+ # by the user.
533
+ def init_worker_process(worker)
534
+ QUEUE_SIGS.each { |sig| trap(sig, nil) }
535
+ trap(:CHLD, 'DEFAULT')
536
+ SIG_QUEUE.clear
537
+ proc_name "worker[#{worker.nr}]"
538
+ START_CTX.clear
539
+ init_self_pipe!
540
+ WORKERS.values.each { |other| other.tmp.close rescue nil }
541
+ WORKERS.clear
542
+ LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
543
+ worker.tmp.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
544
+ after_fork.call(self, worker) # can drop perms
545
+ worker.user(*user) if user.kind_of?(Array) && ! worker.switched
546
+ self.timeout /= 2.0 # halve it for select()
547
+ build_app! unless preload_app
548
+ end
549
+
550
+ def reopen_worker_logs(worker_nr)
551
+ logger.info "worker=#{worker_nr} reopening logs..."
552
+ Unicorn::Util.reopen_logs
553
+ logger.info "worker=#{worker_nr} done reopening logs"
554
+ init_self_pipe!
555
+ end
556
+
557
+ # runs inside each forked worker, this sits around and waits
558
+ # for connections and doesn't die until the parent dies (or is
559
+ # given a INT, QUIT, or TERM signal)
560
+ def worker_loop(worker)
561
+ ppid = master_pid
562
+ init_worker_process(worker)
563
+ nr = 0 # this becomes negative if we need to reopen logs
564
+ alive = worker.tmp # tmp is our lifeline to the master process
565
+ ready = LISTENERS
566
+
567
+ # closing anything we IO.select on will raise EBADF
568
+ trap(:USR1) { nr = -65536; SELF_PIPE[0].close rescue nil }
569
+ trap(:QUIT) { alive = nil; LISTENERS.each { |s| s.close rescue nil } }
570
+ [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
571
+ logger.info "worker=#{worker.nr} ready"
572
+ m = 0
573
+
574
+ begin
575
+ nr < 0 and reopen_worker_logs(worker.nr)
576
+ nr = 0
577
+
578
+ # we're a goner in timeout seconds anyways if alive.chmod
579
+ # breaks, so don't trap the exception. Using fchmod() since
580
+ # futimes() is not available in base Ruby and I very strongly
581
+ # prefer temporary files to be unlinked for security,
582
+ # performance and reliability reasons, so utime is out. No-op
583
+ # changes with chmod doesn't update ctime on all filesystems; so
584
+ # we change our counter each and every time (after process_client
585
+ # and before IO.select).
586
+ alive.chmod(m = 0 == m ? 1 : 0)
587
+
588
+ ready.each do |sock|
589
+ if client = sock.kgio_tryaccept
590
+ process_client(client)
591
+ nr += 1
592
+ alive.chmod(m = 0 == m ? 1 : 0)
593
+ end
594
+ break if nr < 0
595
+ end
596
+
597
+ # make the following bet: if we accepted clients this round,
598
+ # we're probably reasonably busy, so avoid calling select()
599
+ # and do a speculative non-blocking accept() on ready listeners
600
+ # before we sleep again in select().
601
+ redo unless nr == 0 # (nr < 0) => reopen logs
602
+
603
+ ppid == Process.ppid or return
604
+ alive.chmod(m = 0 == m ? 1 : 0)
605
+
606
+ # timeout used so we can detect parent death:
607
+ ret = IO.select(LISTENERS, nil, SELF_PIPE, timeout) and ready = ret[0]
608
+ rescue Errno::EINTR
609
+ ready = LISTENERS
610
+ rescue Errno::EBADF
611
+ nr < 0 or return
612
+ rescue => e
613
+ if alive
614
+ logger.error "Unhandled listen loop exception #{e.inspect}."
615
+ logger.error e.backtrace.join("\n")
616
+ end
617
+ end while alive
618
+ end
619
+
620
+ # delivers a signal to a worker and fails gracefully if the worker
621
+ # is no longer running.
622
+ def kill_worker(signal, wpid)
623
+ Process.kill(signal, wpid)
624
+ rescue Errno::ESRCH
625
+ worker = WORKERS.delete(wpid) and worker.tmp.close rescue nil
626
+ end
627
+
628
+ # delivers a signal to each worker
629
+ def kill_each_worker(signal)
630
+ WORKERS.keys.each { |wpid| kill_worker(signal, wpid) }
631
+ end
632
+
633
+ # unlinks a PID file at given +path+ if it contains the current PID
634
+ # still potentially racy without locking the directory (which is
635
+ # non-portable and may interact badly with other programs), but the
636
+ # window for hitting the race condition is small
637
+ def unlink_pid_safe(path)
638
+ (File.read(path).to_i == $$ and File.unlink(path)) rescue nil
639
+ end
640
+
641
+ # returns a PID if a given path contains a non-stale PID file,
642
+ # nil otherwise.
643
+ def valid_pid?(path)
644
+ wpid = File.read(path).to_i
645
+ wpid <= 0 and return
646
+ Process.kill(0, wpid)
647
+ wpid
648
+ rescue Errno::ESRCH, Errno::ENOENT
649
+ # don't unlink stale pid files, racy without non-portable locking...
650
+ end
651
+
652
+ def load_config!
653
+ loaded_app = app
654
+ logger.info "reloading config_file=#{config.config_file}"
655
+ config[:listeners].replace(init_listeners)
656
+ config.reload
657
+ config.commit!(self)
658
+ kill_each_worker(:QUIT)
659
+ Unicorn::Util.reopen_logs
660
+ self.app = orig_app
661
+ build_app! if preload_app
662
+ logger.info "done reloading config_file=#{config.config_file}"
663
+ rescue StandardError, LoadError, SyntaxError => e
664
+ logger.error "error reloading config_file=#{config.config_file}: " \
665
+ "#{e.class} #{e.message} #{e.backtrace}"
666
+ self.app = loaded_app
667
+ end
668
+
669
+ # returns an array of string names for the given listener array
670
+ def listener_names(listeners = LISTENERS)
671
+ listeners.map { |io| sock_name(io) }
672
+ end
673
+
674
+ def build_app!
675
+ if app.respond_to?(:arity) && app.arity == 0
676
+ if defined?(Gem) && Gem.respond_to?(:refresh)
677
+ logger.info "Refreshing Gem list"
678
+ Gem.refresh
679
+ end
680
+ self.app = app.call
681
+ end
682
+ end
683
+
684
+ def proc_name(tag)
685
+ $0 = ([ File.basename(START_CTX[0]), tag
686
+ ]).concat(START_CTX[:argv]).join(' ')
687
+ end
688
+
689
+ def redirect_io(io, path)
690
+ File.open(path, 'ab') { |fp| io.reopen(fp) } if path
691
+ io.sync = true
692
+ end
693
+
694
+ def init_self_pipe!
695
+ SELF_PIPE.each { |io| io.close rescue nil }
696
+ SELF_PIPE.replace(Kgio::Pipe.new)
697
+ SELF_PIPE.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
698
+ end
699
+ end
700
+