unicorn 5.2.0 → 5.3.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -15,6 +15,7 @@ class Unicorn::HttpServer
15
15
  :before_fork, :after_fork, :before_exec,
16
16
  :listener_opts, :preload_app,
17
17
  :orig_app, :config, :ready_pipe, :user
18
+ attr_writer :after_worker_exit, :after_worker_ready, :worker_exec
18
19
 
19
20
  attr_reader :pid, :logger
20
21
  include Unicorn::SocketHelper
@@ -88,6 +89,7 @@ def initialize(app, options = {})
88
89
  @self_pipe = []
89
90
  @workers = {} # hash maps PIDs to Workers
90
91
  @sig_queue = [] # signal queue used for self-piping
92
+ @pid = nil
91
93
 
92
94
  # we try inheriting listeners first, so we bind them later.
93
95
  # we don't write the pid file until we've bound listeners in case
@@ -104,6 +106,14 @@ def initialize(app, options = {})
104
106
  # list of signals we care about and trap in master.
105
107
  @queue_sigs = [
106
108
  :WINCH, :QUIT, :INT, :TERM, :USR1, :USR2, :HUP, :TTIN, :TTOU ]
109
+
110
+ @worker_data = if worker_data = ENV['UNICORN_WORKER']
111
+ worker_data = worker_data.split(',').map!(&:to_i)
112
+ worker_data[1] = worker_data.slice!(1..2).map do |i|
113
+ Kgio::Pipe.for_fd(i)
114
+ end
115
+ worker_data
116
+ end
107
117
  end
108
118
 
109
119
  # Runs the thing. Returns self so you can run join on it
@@ -112,7 +122,7 @@ def start
112
122
  # this pipe is used to wake us up from select(2) in #join when signals
113
123
  # are trapped. See trap_deferred.
114
124
  @self_pipe.replace(Unicorn.pipe)
115
- @master_pid = $$
125
+ @master_pid = @worker_data ? Process.ppid : $$
116
126
 
117
127
  # setup signal handlers before writing pid file in case people get
118
128
  # trigger happy and send signals as soon as the pid file exists.
@@ -395,8 +405,7 @@ def reap_all_workers
395
405
  proc_name 'master'
396
406
  else
397
407
  worker = @workers.delete(wpid) and worker.close rescue nil
398
- m = "reaped #{status.inspect} worker=#{worker.nr rescue 'unknown'}"
399
- status.success? ? logger.info(m) : logger.error(m)
408
+ @after_worker_exit.call(self, worker, status)
400
409
  end
401
410
  rescue Errno::ECHILD
402
411
  break
@@ -430,11 +439,7 @@ def reexec
430
439
  end
431
440
 
432
441
  @reexec_pid = fork do
433
- listener_fds = {}
434
- LISTENERS.each do |sock|
435
- sock.close_on_exec = false
436
- listener_fds[sock.fileno] = sock
437
- end
442
+ listener_fds = listener_sockets
438
443
  ENV['UNICORN_FD'] = listener_fds.keys.join(',')
439
444
  Dir.chdir(START_CTX[:cwd])
440
445
  cmd = [ START_CTX[0] ].concat(START_CTX[:argv])
@@ -442,12 +447,7 @@ def reexec
442
447
  # avoid leaking FDs we don't know about, but let before_exec
443
448
  # unset FD_CLOEXEC, if anything else in the app eventually
444
449
  # relies on FD inheritence.
445
- (3..1024).each do |io|
446
- next if listener_fds.include?(io)
447
- io = IO.for_fd(io) rescue next
448
- io.autoclose = false
449
- io.close_on_exec = true
450
- end
450
+ close_sockets_on_exec(listener_fds)
451
451
 
452
452
  # exec(command, hash) works in at least 1.9.1+, but will only be
453
453
  # required in 1.9.4/2.0.0 at earliest.
@@ -459,6 +459,40 @@ def reexec
459
459
  proc_name 'master (old)'
460
460
  end
461
461
 
462
+ def worker_spawn(worker)
463
+ listener_fds = listener_sockets
464
+ env = {}
465
+ env['UNICORN_FD'] = listener_fds.keys.join(',')
466
+
467
+ listener_fds[worker.to_io.fileno] = worker.to_io
468
+ listener_fds[worker.master.fileno] = worker.master
469
+
470
+ worker_info = [worker.nr, worker.to_io.fileno, worker.master.fileno]
471
+ env['UNICORN_WORKER'] = worker_info.join(',')
472
+
473
+ close_sockets_on_exec(listener_fds)
474
+
475
+ Process.spawn(env, START_CTX[0], *START_CTX[:argv], listener_fds)
476
+ end
477
+
478
+ def listener_sockets
479
+ listener_fds = {}
480
+ LISTENERS.each do |sock|
481
+ sock.close_on_exec = false
482
+ listener_fds[sock.fileno] = sock
483
+ end
484
+ listener_fds
485
+ end
486
+
487
+ def close_sockets_on_exec(sockets)
488
+ (3..1024).each do |io|
489
+ next if sockets.include?(io)
490
+ io = IO.for_fd(io) rescue next
491
+ io.autoclose = false
492
+ io.close_on_exec = true
493
+ end
494
+ end
495
+
462
496
  # forcibly terminate all workers that haven't checked in in timeout seconds. The timeout is implemented using an unlinked File
463
497
  def murder_lazy_workers
464
498
  next_sleep = @timeout - 1
@@ -495,19 +529,29 @@ def after_fork_internal
495
529
  end
496
530
 
497
531
  def spawn_missing_workers
532
+ if @worker_data
533
+ worker = Unicorn::Worker.new(*@worker_data)
534
+ after_fork_internal
535
+ worker_loop(worker)
536
+ exit
537
+ end
538
+
498
539
  worker_nr = -1
499
540
  until (worker_nr += 1) == @worker_processes
500
541
  @workers.value?(worker_nr) and next
501
542
  worker = Unicorn::Worker.new(worker_nr)
502
543
  before_fork.call(self, worker)
503
- if pid = fork
504
- @workers[pid] = worker
505
- worker.atfork_parent
506
- else
544
+
545
+ pid = @worker_exec ? worker_spawn(worker) : fork
546
+
547
+ unless pid
507
548
  after_fork_internal
508
549
  worker_loop(worker)
509
550
  exit
510
551
  end
552
+
553
+ @workers[pid] = worker
554
+ worker.atfork_parent
511
555
  end
512
556
  rescue => e
513
557
  @logger.error(e) rescue nil
@@ -644,7 +688,7 @@ def worker_loop(worker)
644
688
  trap(:USR1) { nr = -65536 }
645
689
 
646
690
  ready = readers.dup
647
- @logger.info "worker=#{worker.nr} ready"
691
+ @after_worker_ready.call(self, worker)
648
692
 
649
693
  begin
650
694
  nr < 0 and reopen_worker_logs(worker.nr)
@@ -66,10 +66,9 @@ def self.new(app, interval = 5, path = %r{\A/})
66
66
  end
67
67
 
68
68
  #:stopdoc:
69
- PATH_INFO = "PATH_INFO"
70
69
  def process_client(client)
71
70
  super(client) # Unicorn::HttpServer#process_client
72
- if OOBGC_PATH =~ OOBGC_ENV[PATH_INFO] && ((@@nr -= 1) <= 0)
71
+ if OOBGC_PATH =~ OOBGC_ENV['PATH_INFO'] && ((@@nr -= 1) <= 0)
73
72
  @@nr = OOBGC_INTERVAL
74
73
  OOBGC_ENV.clear
75
74
  disabled = GC.enable
@@ -3,6 +3,18 @@
3
3
  require 'socket'
4
4
 
5
5
  module Unicorn
6
+
7
+ # Instead of using a generic Kgio::Socket for everything,
8
+ # tag TCP sockets so we can use TCP_INFO under Linux without
9
+ # incurring extra syscalls for Unix domain sockets.
10
+ # TODO: remove these when we remove kgio
11
+ TCPClient = Class.new(Kgio::Socket) # :nodoc:
12
+ class TCPSrv < Kgio::TCPServer # :nodoc:
13
+ def kgio_tryaccept # :nodoc:
14
+ super(TCPClient)
15
+ end
16
+ end
17
+
6
18
  module SocketHelper
7
19
 
8
20
  # internal interface
@@ -63,12 +75,15 @@ def set_tcp_sockopt(sock, opt)
63
75
  elsif respond_to?(:accf_arg)
64
76
  name = opt[:accept_filter]
65
77
  name = DEFAULTS[:accept_filter] if name.nil?
78
+ sock.listen(opt[:backlog])
79
+ got = (sock.getsockopt(:SOL_SOCKET, :SO_ACCEPTFILTER) rescue nil).to_s
80
+ arg = accf_arg(name)
66
81
  begin
67
- sock.setsockopt(:SOL_SOCKET, :SO_ACCEPTFILTER, accf_arg(name))
82
+ sock.setsockopt(:SOL_SOCKET, :SO_ACCEPTFILTER, arg)
68
83
  rescue => e
69
84
  logger.error("#{sock_name(sock)} " \
70
85
  "failed to set accept_filter=#{name} (#{e.inspect})")
71
- end
86
+ end if arg != got
72
87
  end
73
88
  end
74
89
 
@@ -148,7 +163,7 @@ def new_tcp_server(addr, port, opt)
148
163
  end
149
164
  sock.bind(Socket.pack_sockaddr_in(port, addr))
150
165
  sock.autoclose = false
151
- Kgio::TCPServer.for_fd(sock.fileno)
166
+ TCPSrv.for_fd(sock.fileno)
152
167
  end
153
168
 
154
169
  # returns rfc2732-style (e.g. "[::1]:666") addresses for IPv6
@@ -185,7 +200,7 @@ def sock_name(sock)
185
200
  def server_cast(sock)
186
201
  begin
187
202
  Socket.unpack_sockaddr_in(sock.getsockname)
188
- Kgio::TCPServer.for_fd(sock.fileno)
203
+ TCPSrv.for_fd(sock.fileno)
189
204
  rescue ArgumentError
190
205
  Kgio::UNIXServer.for_fd(sock.fileno)
191
206
  end
@@ -1,16 +1,17 @@
1
1
  # -*- encoding: binary -*-
2
2
 
3
- # When processing uploads, Unicorn may expose a StreamInput object under
4
- # "rack.input" of the (future) Rack (2.x) environment.
3
+ # When processing uploads, unicorn may expose a StreamInput object under
4
+ # "rack.input" of the Rack environment when
5
+ # Unicorn::Configurator#rewindable_input is set to +false+
5
6
  class Unicorn::StreamInput
6
7
  # The I/O chunk size (in +bytes+) for I/O operations where
7
8
  # the size cannot be user-specified when a method is called.
8
9
  # The default is 16 kilobytes.
9
- @@io_chunk_size = Unicorn::Const::CHUNK_SIZE
10
+ @@io_chunk_size = Unicorn::Const::CHUNK_SIZE # :nodoc:
10
11
 
11
12
  # Initializes a new StreamInput object. You normally do not have to call
12
13
  # this unless you are writing an HTTP server.
13
- def initialize(socket, request)
14
+ def initialize(socket, request) # :nodoc:
14
15
  @chunked = request.content_length.nil?
15
16
  @socket = socket
16
17
  @parser = request
@@ -1,6 +1,6 @@
1
1
  # -*- encoding: binary -*-
2
2
 
3
- # acts like tee(1) on an input input to provide a input-like stream
3
+ # Acts like tee(1) on an input input to provide a input-like stream
4
4
  # while providing rewindable semantics through a File/StringIO backing
5
5
  # store. On the first pass, the input is only read on demand so your
6
6
  # Rack application can use input notification (upload progress and
@@ -9,22 +9,22 @@
9
9
  # strict interpretation of Rack::Lint::InputWrapper functionality and
10
10
  # will not support any deviations from it.
11
11
  #
12
- # When processing uploads, Unicorn exposes a TeeInput object under
13
- # "rack.input" of the Rack environment.
12
+ # When processing uploads, unicorn exposes a TeeInput object under
13
+ # "rack.input" of the Rack environment by default.
14
14
  class Unicorn::TeeInput < Unicorn::StreamInput
15
15
  # The maximum size (in +bytes+) to buffer in memory before
16
16
  # resorting to a temporary file. Default is 112 kilobytes.
17
- @@client_body_buffer_size = Unicorn::Const::MAX_BODY
17
+ @@client_body_buffer_size = Unicorn::Const::MAX_BODY # :nodoc:
18
18
 
19
19
  # sets the maximum size of request bodies to buffer in memory,
20
20
  # amounts larger than this are buffered to the filesystem
21
- def self.client_body_buffer_size=(bytes)
21
+ def self.client_body_buffer_size=(bytes) # :nodoc:
22
22
  @@client_body_buffer_size = bytes
23
23
  end
24
24
 
25
25
  # returns the maximum size of request bodies to buffer in memory,
26
26
  # amounts larger than this are buffered to the filesystem
27
- def self.client_body_buffer_size
27
+ def self.client_body_buffer_size # :nodoc:
28
28
  @@client_body_buffer_size
29
29
  end
30
30
 
@@ -37,7 +37,7 @@ def new_tmpio # :nodoc:
37
37
 
38
38
  # Initializes a new TeeInput object. You normally do not have to call
39
39
  # this unless you are writing an HTTP server.
40
- def initialize(socket, request)
40
+ def initialize(socket, request) # :nodoc:
41
41
  @len = request.content_length
42
42
  super
43
43
  @tmp = @len && @len <= @@client_body_buffer_size ?
@@ -125,9 +125,7 @@ def consume!
125
125
  end
126
126
 
127
127
  def tee(buffer)
128
- if buffer && buffer.size > 0
129
- @tmp.write(buffer)
130
- end
128
+ @tmp.write(buffer) if buffer
131
129
  buffer
132
130
  end
133
131
  end
@@ -1 +1 @@
1
- Unicorn::Const::UNICORN_VERSION = '5.2.0'
1
+ Unicorn::Const::UNICORN_VERSION = '5.3.1'
@@ -12,18 +12,19 @@ class Unicorn::Worker
12
12
  # :stopdoc:
13
13
  attr_accessor :nr, :switched
14
14
  attr_reader :to_io # IO.select-compatible
15
+ attr_reader :master
15
16
 
16
17
  PER_DROP = Raindrops::PAGE_SIZE / Raindrops::SIZE
17
18
  DROPS = []
18
19
 
19
- def initialize(nr)
20
+ def initialize(nr, pipe=nil)
20
21
  drop_index = nr / PER_DROP
21
22
  @raindrop = DROPS[drop_index] ||= Raindrops.new(PER_DROP)
22
23
  @offset = nr % PER_DROP
23
24
  @raindrop[@offset] = 0
24
25
  @nr = nr
25
26
  @switched = false
26
- @to_io, @master = Unicorn.pipe
27
+ @to_io, @master = pipe || Unicorn.pipe
27
28
  end
28
29
 
29
30
  def atfork_child # :nodoc:
@@ -111,9 +112,11 @@ def close # :nodoc:
111
112
  # In most cases, you should be using the Unicorn::Configurator#user
112
113
  # directive instead. This method should only be used if you need
113
114
  # fine-grained control of exactly when you want to change permissions
114
- # in your after_fork hooks.
115
+ # in your after_fork or after_worker_ready hooks, or if you want to
116
+ # use the chroot support.
115
117
  #
116
- # Changes the worker process to the specified +user+ and +group+
118
+ # Changes the worker process to the specified +user+ and +group+,
119
+ # and chroots to the current working directory if +chroot+ is set.
117
120
  # This is only intended to be called from within the worker
118
121
  # process from the +after_fork+ hook. This should be called in
119
122
  # the +after_fork+ hook after any privileged functions need to be
@@ -122,8 +125,11 @@ def close # :nodoc:
122
125
  # Any and all errors raised within this method will be propagated
123
126
  # directly back to the caller (usually the +after_fork+ hook.
124
127
  # These errors commonly include ArgumentError for specifying an
125
- # invalid user/group and Errno::EPERM for insufficient privileges
126
- def user(user, group = nil)
128
+ # invalid user/group and Errno::EPERM for insufficient privileges.
129
+ #
130
+ # chroot support is only available in unicorn 5.3.0+
131
+ # user and group switching appeared in unicorn 0.94.0 (2009-11-05)
132
+ def user(user, group = nil, chroot = false)
127
133
  # we do not protect the caller, checking Process.euid == 0 is
128
134
  # insufficient because modern systems have fine-grained
129
135
  # capabilities. Let the caller handle any and all errors.
@@ -134,6 +140,11 @@ def user(user, group = nil)
134
140
  Process.initgroups(user, gid)
135
141
  Process::GID.change_privilege(gid)
136
142
  end
143
+ if chroot
144
+ chroot = Dir.pwd if chroot == true
145
+ Dir.chroot(chroot)
146
+ Dir.chdir('/')
147
+ end
137
148
  Process.euid != uid and Process::UID.change_privilege(uid)
138
149
  @switched = true
139
150
  end
@@ -52,7 +52,7 @@ t_begin "worker pid unchanged (again)" && {
52
52
  }
53
53
 
54
54
  t_begin "nuking the existing Unicorn succeeds" && {
55
- kill -9 $unicorn_pid $worker_pid
55
+ kill -9 $unicorn_pid
56
56
  while kill -0 $unicorn_pid
57
57
  do
58
58
  sleep 1
@@ -18,7 +18,8 @@ after_fork { |s,w| }
18
18
  next if key =~ %r{\Astd(?:err|out)_path\z}
19
19
  key = key.to_sym
20
20
  def_value = defaults[key]
21
- srv_value = srv.__send__(key)
21
+ srv_value = srv.respond_to?(key) ? srv.__send__(key)
22
+ : srv.instance_variable_get("@#{key}")
22
23
  fp << "#{key}|#{srv_value}|#{def_value}\\n"
23
24
  end
24
25
  }
data/t/test-lib.sh CHANGED
@@ -106,8 +106,8 @@ check_stderr () {
106
106
  # unicorn_setup
107
107
  unicorn_setup () {
108
108
  eval $(unused_listen)
109
- port=$(expr $listen : '[^:]*:\([0-9]\+\)')
110
- host=$(expr $listen : '\([^:]*\):[0-9]\+')
109
+ port=$(expr $listen : '[^:]*:\([0-9]*\)')
110
+ host=$(expr $listen : '\([^:][^:]*\):[0-9][0-9]*')
111
111
 
112
112
  rtmpfiles unicorn_config pid r_err r_out fifo tmp ok
113
113
  cat > $unicorn_config <<EOF
@@ -97,6 +97,9 @@ def teardown
97
97
  end
98
98
 
99
99
  def test_sd_listen_fds_emulation
100
+ # [ruby-core:69895] [Bug #11336] fixed by r51576
101
+ return if RUBY_VERSION.to_f < 2.3
102
+
100
103
  File.open("config.ru", "wb") { |fp| fp.write(HI) }
101
104
  sock = TCPServer.new(@addr, @port)
102
105
 
@@ -119,14 +122,12 @@ def test_sd_listen_fds_emulation
119
122
  res = hit(["http://#@addr:#@port/"])
120
123
  assert_equal [ "HI\n" ], res
121
124
  assert_shutdown(pid)
122
- assert_equal 1, sock.getsockopt(:SOL_SOCKET, :SO_KEEPALIVE).int,
125
+ assert sock.getsockopt(:SOL_SOCKET, :SO_KEEPALIVE).bool,
123
126
  'unicorn should always set SO_KEEPALIVE on inherited sockets'
124
127
  end
125
128
  ensure
126
129
  sock.close if sock
127
- # disabled test on old Rubies: https://bugs.ruby-lang.org/issues/11336
128
- # [ruby-core:69895] [Bug #11336] fixed by r51576
129
- end if RUBY_VERSION.to_f >= 2.3
130
+ end
130
131
 
131
132
  def test_inherit_listener_unspecified
132
133
  File.open("config.ru", "wb") { |fp| fp.write(HI) }
@@ -142,7 +143,7 @@ def test_inherit_listener_unspecified
142
143
  res = hit(["http://#@addr:#@port/"])
143
144
  assert_equal [ "HI\n" ], res
144
145
  assert_shutdown(pid)
145
- assert_equal 1, sock.getsockopt(:SOL_SOCKET, :SO_KEEPALIVE).int,
146
+ assert sock.getsockopt(:SOL_SOCKET, :SO_KEEPALIVE).bool,
146
147
  'unicorn should always set SO_KEEPALIVE on inherited sockets'
147
148
  ensure
148
149
  sock.close if sock
@@ -0,0 +1,90 @@
1
+ require 'socket'
2
+ require 'unicorn'
3
+ require 'io/wait'
4
+ require 'tempfile'
5
+ require 'test/unit'
6
+
7
+ class TestCccTCPI < Test::Unit::TestCase
8
+ def test_ccc_tcpi
9
+ start_pid = $$
10
+ host = '127.0.0.1'
11
+ srv = TCPServer.new(host, 0)
12
+ port = srv.addr[1]
13
+ err = Tempfile.new('unicorn_ccc')
14
+ rd, wr = IO.pipe
15
+ sleep_pipe = IO.pipe
16
+ pid = fork do
17
+ sleep_pipe[1].close
18
+ reqs = 0
19
+ rd.close
20
+ worker_pid = nil
21
+ app = lambda do |env|
22
+ worker_pid ||= begin
23
+ at_exit { wr.write(reqs.to_s) if worker_pid == $$ }
24
+ $$
25
+ end
26
+ reqs += 1
27
+
28
+ # will wake up when writer closes
29
+ sleep_pipe[0].read if env['PATH_INFO'] == '/sleep'
30
+
31
+ [ 200, [ %w(Content-Length 0), %w(Content-Type text/plain) ], [] ]
32
+ end
33
+ ENV['UNICORN_FD'] = srv.fileno.to_s
34
+ opts = {
35
+ listeners: [ "#{host}:#{port}" ],
36
+ stderr_path: err.path,
37
+ check_client_connection: true,
38
+ }
39
+ uni = Unicorn::HttpServer.new(app, opts)
40
+ uni.start.join
41
+ end
42
+ wr.close
43
+
44
+ # make sure the server is running, at least
45
+ client = TCPSocket.new(host, port)
46
+ client.write("GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
47
+ assert client.wait_readable(10), 'never got response from server'
48
+ res = client.read
49
+ assert_match %r{\AHTTP/1\.1 200}, res, 'got part of first response'
50
+ assert_match %r{\r\n\r\n\z}, res, 'got end of response, server is ready'
51
+ client.close
52
+
53
+ # start a slow request...
54
+ sleeper = TCPSocket.new(host, port)
55
+ sleeper.write("GET /sleep HTTP/1.1\r\nHost: example.com\r\n\r\n")
56
+
57
+ # and a bunch of aborted ones
58
+ nr = 100
59
+ nr.times do |i|
60
+ client = TCPSocket.new(host, port)
61
+ client.write("GET /collections/#{rand(10000)} HTTP/1.1\r\n" \
62
+ "Host: example.com\r\n\r\n")
63
+ client.close
64
+ end
65
+ sleep_pipe[1].close # wake up the reader in the worker
66
+ res = sleeper.read
67
+ assert_match %r{\AHTTP/1\.1 200}, res, 'got part of first sleeper response'
68
+ assert_match %r{\r\n\r\n\z}, res, 'got end of sleeper response'
69
+ sleeper.close
70
+ kpid = pid
71
+ pid = nil
72
+ Process.kill(:QUIT, kpid)
73
+ _, status = Process.waitpid2(kpid)
74
+ assert status.success?
75
+ reqs = rd.read.to_i
76
+ warn "server got #{reqs} requests with #{nr} CCC aborted\n" if $DEBUG
77
+ assert_operator reqs, :<, nr
78
+ assert_operator reqs, :>=, 2, 'first 2 requests got through, at least'
79
+ ensure
80
+ return if start_pid != $$
81
+ srv.close if srv
82
+ if pid
83
+ Process.kill(:QUIT, pid)
84
+ _, status = Process.waitpid2(pid)
85
+ assert status.success?
86
+ end
87
+ err.close! if err
88
+ rd.close if rd
89
+ end
90
+ end