unicorn 5.5.0 → 6.0.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (52) hide show
  1. checksums.yaml +4 -4
  2. data/.olddoc.yml +12 -7
  3. data/Documentation/.gitignore +1 -3
  4. data/Documentation/unicorn.1 +222 -0
  5. data/Documentation/unicorn_rails.1 +207 -0
  6. data/FAQ +1 -1
  7. data/GIT-VERSION-GEN +1 -1
  8. data/GNUmakefile +112 -57
  9. data/HACKING +1 -1
  10. data/ISSUES +16 -21
  11. data/KNOWN_ISSUES +2 -2
  12. data/Links +5 -5
  13. data/README +13 -6
  14. data/SIGNALS +1 -1
  15. data/Sandbox +2 -2
  16. data/archive/slrnpull.conf +1 -1
  17. data/bin/unicorn_rails +2 -2
  18. data/examples/big_app_gc.rb +1 -1
  19. data/examples/logrotate.conf +2 -2
  20. data/examples/nginx.conf +1 -1
  21. data/examples/unicorn.conf.minimal.rb +2 -2
  22. data/examples/unicorn.conf.rb +2 -2
  23. data/examples/unicorn@.service +7 -0
  24. data/ext/unicorn_http/extconf.rb +5 -0
  25. data/ext/unicorn_http/unicorn_http.rl +43 -5
  26. data/lib/unicorn.rb +4 -1
  27. data/lib/unicorn/configurator.rb +13 -3
  28. data/lib/unicorn/http_request.rb +11 -1
  29. data/lib/unicorn/http_server.rb +37 -5
  30. data/lib/unicorn/oob_gc.rb +5 -5
  31. data/lib/unicorn/tmpio.rb +8 -2
  32. data/t/GNUmakefile +3 -72
  33. data/test/benchmark/README +14 -4
  34. data/test/benchmark/ddstream.ru +50 -0
  35. data/test/benchmark/readinput.ru +40 -0
  36. data/test/benchmark/uconnect.perl +66 -0
  37. data/test/exec/test_exec.rb +14 -12
  38. data/test/test_helper.rb +38 -30
  39. data/test/unit/test_ccc.rb +4 -3
  40. data/test/unit/test_http_parser_ng.rb +81 -0
  41. data/test/unit/test_server.rb +81 -7
  42. data/test/unit/test_signals.rb +6 -6
  43. data/test/unit/test_socket_helper.rb +1 -1
  44. data/test/unit/test_upload.rb +9 -14
  45. data/test/unit/test_util.rb +1 -1
  46. data/unicorn.gemspec +8 -7
  47. metadata +12 -11
  48. data/Documentation/GNUmakefile +0 -30
  49. data/Documentation/unicorn.1.txt +0 -187
  50. data/Documentation/unicorn_rails.1.txt +0 -175
  51. data/t/hijack.ru +0 -55
  52. data/t/t0200-rack-hijack.sh +0 -51
data/t/GNUmakefile CHANGED
@@ -1,74 +1,5 @@
1
- # we can run tests in parallel with GNU make
1
+ # there used to be more, here, but we stopped relying on recursive make
2
2
  all::
3
+ $(MAKE) -C .. test-integration
3
4
 
4
- pid := $(shell echo $$PPID)
5
-
6
- RUBY = ruby
7
- RAKE = rake
8
- -include ../local.mk
9
- ifeq ($(RUBY_VERSION),)
10
- RUBY_VERSION := $(shell $(RUBY) -e 'puts RUBY_VERSION')
11
- endif
12
-
13
- ifeq ($(RUBY_VERSION),)
14
- $(error unable to detect RUBY_VERSION)
15
- endif
16
-
17
- RUBY_ENGINE := $(shell $(RUBY) -e 'puts((RUBY_ENGINE rescue "ruby"))')
18
- export RUBY_ENGINE
19
-
20
- MYLIBS := $(RUBYLIB)
21
-
22
- T = $(wildcard t[0-9][0-9][0-9][0-9]-*.sh)
23
-
24
- all:: $(T)
25
-
26
- # can't rely on "set -o pipefail" since we don't require bash or ksh93 :<
27
- t_pfx = trash/$@-$(RUBY_ENGINE)-$(RUBY_VERSION)
28
- TEST_OPTS =
29
- # TRACER = strace -f -o $(t_pfx).strace -s 100000
30
- # TRACER = /usr/bin/time -o $(t_pfx).time
31
-
32
- ifdef V
33
- ifeq ($(V),2)
34
- TEST_OPTS += --trace
35
- else
36
- TEST_OPTS += --verbose
37
- endif
38
- endif
39
-
40
- random_blob:
41
- dd if=/dev/urandom bs=1M count=30 of=$@.$(pid)
42
- mv $@.$(pid) $@
43
-
44
- $(T): random_blob
45
-
46
- dependencies := socat curl
47
- deps := $(addprefix .dep+,$(dependencies))
48
- $(deps): dep_bin = $(lastword $(subst +, ,$@))
49
- $(deps):
50
- @which $(dep_bin) > $@.$(pid) 2>/dev/null || :
51
- @test -s $@.$(pid) || \
52
- { echo >&2 "E '$(dep_bin)' not found in PATH=$(PATH)"; exit 1; }
53
- @mv $@.$(pid) $@
54
- dep: $(deps)
55
-
56
- test_prefix := $(CURDIR)/../test/$(RUBY_ENGINE)-$(RUBY_VERSION)
57
- $(test_prefix)/.stamp:
58
- $(MAKE) -C .. test-install
59
-
60
- $(T): export RUBY := $(RUBY)
61
- $(T): export RAKE := $(RAKE)
62
- $(T): export PATH := $(test_prefix)/bin:$(PATH)
63
- $(T): export RUBYLIB := $(test_prefix)/lib:$(MYLIBS)
64
- $(T): dep $(test_prefix)/.stamp trash/.gitignore
65
- $(TRACER) $(SHELL) $(SH_TEST_OPTS) $@ $(TEST_OPTS)
66
-
67
- trash/.gitignore:
68
- mkdir -p $(@D)
69
- echo '*' > $@
70
-
71
- clean:
72
- $(RM) -r trash/*
73
-
74
- .PHONY: $(T) clean
5
+ .PHONY: all
@@ -42,9 +42,19 @@ The benchmark client is usually httperf.
42
42
  Another gentle reminder: performance with slow networks/clients
43
43
  is NOT our problem. That is the job of nginx (or similar).
44
44
 
45
+ == ddstream.ru
46
+
47
+ Standalone Rack app intended to show how BAD we are at slow clients.
48
+ See usage in comments.
49
+
50
+ == readinput.ru
51
+
52
+ Standalone Rack app intended to show how bad we are with slow uploaders.
53
+ See usage in comments.
54
+
45
55
  == Contributors
46
56
 
47
- This directory is maintained independently in the "benchmark" branch
48
- based against v0.1.0. Only changes to this directory (test/benchmarks)
49
- are committed to this branch although the master branch may merge this
50
- branch occassionaly.
57
+ This directory is intended to remain stable. Do not make changes
58
+ to benchmarking code which can change performance and invalidate
59
+ results across revisions. Instead, write new benchmarks and update
60
+ coments/documentation as necessary.
@@ -0,0 +1,50 @@
1
+ # This app is intended to test large HTTP responses with or without
2
+ # a fully-buffering reverse proxy such as nginx. Without a fully-buffering
3
+ # reverse proxy, unicorn will be unresponsive when client count exceeds
4
+ # worker_processes.
5
+ #
6
+ # To demonstrate how bad unicorn is at slowly reading clients:
7
+ #
8
+ # # in one terminal, start unicorn with one worker:
9
+ # unicorn -E none -l 127.0.0.1:8080 test/benchmark/ddstream.ru
10
+ #
11
+ # # in a different terminal, start more slow curl processes than
12
+ # # unicorn workers and watch time outputs
13
+ # curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
14
+ # curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
15
+ # wait
16
+ #
17
+ # The last client won't see a response until the first one is done reading
18
+ #
19
+ # nginx note: do not change the default "proxy_buffering" behavior.
20
+ # Setting "proxy_buffering off" prevents nginx from protecting unicorn.
21
+
22
+ # totally standalone rack app to stream a giant response
23
+ class BigResponse
24
+ def initialize(bs, count)
25
+ @buf = "#{bs.to_s(16)}\r\n#{' ' * bs}\r\n"
26
+ @count = count
27
+ @res = [ 200,
28
+ { 'Transfer-Encoding' => -'chunked', 'Content-Type' => 'text/plain' },
29
+ self
30
+ ]
31
+ end
32
+
33
+ # rack response body iterator
34
+ def each
35
+ (1..@count).each { yield @buf }
36
+ yield -"0\r\n\r\n"
37
+ end
38
+
39
+ # rack app entry endpoint
40
+ def call(_env)
41
+ @res
42
+ end
43
+ end
44
+
45
+ # default to a giant (128M) response because kernel socket buffers
46
+ # can be ridiculously large on some systems
47
+ bs = ENV['bs'] ? ENV['bs'].to_i : 65536
48
+ count = ENV['count'] ? ENV['count'].to_i : 2048
49
+ warn "serving response with bs=#{bs} count=#{count} (#{bs*count} bytes)"
50
+ run BigResponse.new(bs, count)
@@ -0,0 +1,40 @@
1
+ # This app is intended to test large HTTP requests with or without
2
+ # a fully-buffering reverse proxy such as nginx. Without a fully-buffering
3
+ # reverse proxy, unicorn will be unresponsive when client count exceeds
4
+ # worker_processes.
5
+
6
+ DOC = <<DOC
7
+ To demonstrate how bad unicorn is at slowly uploading clients:
8
+
9
+ # in one terminal, start unicorn with one worker:
10
+ unicorn -E none -l 127.0.0.1:8080 test/benchmark/readinput.ru
11
+
12
+ # in a different terminal, upload 45M from multiple curl processes:
13
+ dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
14
+ --trace-time -v http://127.0.0.1:8080/ &
15
+ dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
16
+ --trace-time -v http://127.0.0.1:8080/ &
17
+ wait
18
+
19
+ # The last client won't see a response until the first one is done uploading
20
+ # You also won't be able to make GET requests to view this documentation
21
+ # while clients are uploading. You can also view the stderr debug output
22
+ # of unicorn (see logging code in #{__FILE__}).
23
+ DOC
24
+
25
+ run(lambda do |env|
26
+ input = env['rack.input']
27
+ buf = ''.b
28
+
29
+ # default logger contains timestamps, rely on that so users can
30
+ # see what the server is doing
31
+ l = env['rack.logger']
32
+
33
+ l.debug('BEGIN reading input ...') if l
34
+ :nop while input.read(16384, buf)
35
+ l.debug('DONE reading input ...') if l
36
+
37
+ buf.clear
38
+ [ 200, [ %W(Content-Length #{DOC.size}), %w(Content-Type text/plain) ],
39
+ [ DOC ] ]
40
+ end)
@@ -0,0 +1,66 @@
1
+ #!/usr/bin/perl -w
2
+ # Benchmark script to spawn some processes and hammer a local unicorn
3
+ # to test accept loop performance. This only does Unix sockets.
4
+ # There's plenty of TCP benchmarking tools out there, and TCP port reuse
5
+ # has predictability problems since unicorn can't do persistent connections.
6
+ # Written in Perl for the same reason: predictability.
7
+ # Ruby GC is not as predictable as Perl refcounting.
8
+ use strict;
9
+ use Socket qw(AF_UNIX SOCK_STREAM sockaddr_un);
10
+ use POSIX qw(:sys_wait_h);
11
+ use Getopt::Std;
12
+ # -c / -n switches stolen from ab(1)
13
+ my $usage = "$0 [-c CONCURRENCY] [-n NUM_REQUESTS] SOCKET_PATH\n";
14
+ our $opt_c = 2;
15
+ our $opt_n = 1000;
16
+ getopts('c:n:') or die $usage;
17
+ my $unix_path = shift or die $usage;
18
+ use constant REQ => "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n";
19
+ use constant REQ_LEN => length(REQ);
20
+ use constant BUFSIZ => 8192;
21
+ $^F = 99; # don't waste syscall time with FD_CLOEXEC
22
+
23
+ my %workers; # pid => worker num
24
+ die "-n $opt_n not evenly divisible by -c $opt_c\n" if $opt_n % $opt_c;
25
+ my $n_per_worker = $opt_n / $opt_c;
26
+ my $addr = sockaddr_un($unix_path);
27
+
28
+ for my $num (1..$opt_c) {
29
+ defined(my $pid = fork) or die "fork failed: $!\n";
30
+ if ($pid) {
31
+ $workers{$pid} = $num;
32
+ } else {
33
+ work($n_per_worker);
34
+ }
35
+ }
36
+
37
+ reap_worker(0) while scalar keys %workers;
38
+ exit;
39
+
40
+ sub work {
41
+ my ($n) = @_;
42
+ my ($buf, $x);
43
+ for (1..$n) {
44
+ socket(S, AF_UNIX, SOCK_STREAM, 0) or die "socket: $!";
45
+ connect(S, $addr) or die "connect: $!";
46
+ defined($x = syswrite(S, REQ)) or die "write: $!";
47
+ $x == REQ_LEN or die "short write: $x != ".REQ_LEN."\n";
48
+ do {
49
+ $x = sysread(S, $buf, BUFSIZ);
50
+ unless (defined $x) {
51
+ next if $!{EINTR};
52
+ die "sysread: $!\n";
53
+ }
54
+ } until ($x == 0);
55
+ }
56
+ exit 0;
57
+ }
58
+
59
+ sub reap_worker {
60
+ my ($flags) = @_;
61
+ my $pid = waitpid(-1, $flags);
62
+ return if !defined $pid || $pid <= 0;
63
+ my $p = delete $workers{$pid} || '(unknown)';
64
+ warn("$pid [$p] exited with $?\n") if $?;
65
+ $p;
66
+ }
@@ -45,8 +45,9 @@ def call(env)
45
45
 
46
46
  COMMON_TMP = Tempfile.new('unicorn_tmp') unless defined?(COMMON_TMP)
47
47
 
48
+ HEAVY_WORKERS = 2
48
49
  HEAVY_CFG = <<-EOS
49
- worker_processes 4
50
+ worker_processes #{HEAVY_WORKERS}
50
51
  timeout 30
51
52
  logger Logger.new('#{COMMON_TMP.path}')
52
53
  before_fork do |server, worker|
@@ -573,7 +574,7 @@ def test_unicorn_config_per_worker_listen
573
574
  assert_equal String, results[0].class
574
575
  worker_pid = results[0].to_i
575
576
  assert_not_equal pid, worker_pid
576
- s = UNIXSocket.new(tmp.path)
577
+ s = unix_socket(tmp.path)
577
578
  s.syswrite("GET / HTTP/1.0\r\n\r\n")
578
579
  results = ''
579
580
  loop { results << s.sysread(4096) } rescue nil
@@ -606,6 +607,7 @@ def test_unicorn_config_listen_augments_cli
606
607
  def test_weird_config_settings
607
608
  File.open("config.ru", "wb") { |fp| fp.syswrite(HI) }
608
609
  ucfg = Tempfile.new('unicorn_test_config')
610
+ proc_total = HEAVY_WORKERS + 1 # + 1 for master
609
611
  ucfg.syswrite(HEAVY_CFG)
610
612
  pid = xfork do
611
613
  redirect_test_io do
@@ -616,9 +618,9 @@ def test_weird_config_settings
616
618
  results = retry_hit(["http://#{@addr}:#{@port}/"])
617
619
  assert_equal String, results[0].class
618
620
  wait_master_ready(COMMON_TMP.path)
619
- wait_workers_ready(COMMON_TMP.path, 4)
621
+ wait_workers_ready(COMMON_TMP.path, HEAVY_WORKERS)
620
622
  bf = File.readlines(COMMON_TMP.path).grep(/\bbefore_fork: worker=/)
621
- assert_equal 4, bf.size
623
+ assert_equal HEAVY_WORKERS, bf.size
622
624
  rotate = Tempfile.new('unicorn_rotate')
623
625
 
624
626
  File.rename(COMMON_TMP.path, rotate.path)
@@ -630,20 +632,20 @@ def test_weird_config_settings
630
632
  tries = DEFAULT_TRIES
631
633
  log = File.readlines(rotate.path)
632
634
  while (tries -= 1) > 0 &&
633
- log.grep(/reopening logs\.\.\./).size < 5
635
+ log.grep(/reopening logs\.\.\./).size < proc_total
634
636
  sleep DEFAULT_RES
635
637
  log = File.readlines(rotate.path)
636
638
  end
637
- assert_equal 5, log.grep(/reopening logs\.\.\./).size
639
+ assert_equal proc_total, log.grep(/reopening logs\.\.\./).size
638
640
  assert_equal 0, log.grep(/done reopening logs/).size
639
641
 
640
642
  tries = DEFAULT_TRIES
641
643
  log = File.readlines(COMMON_TMP.path)
642
- while (tries -= 1) > 0 && log.grep(/done reopening logs/).size < 5
644
+ while (tries -= 1) > 0 && log.grep(/done reopening logs/).size < proc_total
643
645
  sleep DEFAULT_RES
644
646
  log = File.readlines(COMMON_TMP.path)
645
647
  end
646
- assert_equal 5, log.grep(/done reopening logs/).size
648
+ assert_equal proc_total, log.grep(/done reopening logs/).size
647
649
  assert_equal 0, log.grep(/reopening logs\.\.\./).size
648
650
 
649
651
  Process.kill(:QUIT, pid)
@@ -730,7 +732,7 @@ def test_socket_unlinked_restore
730
732
  wait_for_file(sock_path)
731
733
  assert File.socket?(sock_path)
732
734
 
733
- sock = UNIXSocket.new(sock_path)
735
+ sock = unix_socket(sock_path)
734
736
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
735
737
  results = sock.sysread(4096)
736
738
 
@@ -740,7 +742,7 @@ def test_socket_unlinked_restore
740
742
  wait_for_file(sock_path)
741
743
  assert File.socket?(sock_path)
742
744
 
743
- sock = UNIXSocket.new(sock_path)
745
+ sock = unix_socket(sock_path)
744
746
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
745
747
  results = sock.sysread(4096)
746
748
 
@@ -775,7 +777,7 @@ def test_unicorn_config_file
775
777
  assert_equal pid, File.read(pid_file).to_i
776
778
  assert File.socket?(sock_path), "socket created"
777
779
 
778
- sock = UNIXSocket.new(sock_path)
780
+ sock = unix_socket(sock_path)
779
781
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
780
782
  results = sock.sysread(4096)
781
783
 
@@ -801,7 +803,7 @@ def test_unicorn_config_file
801
803
  wait_for_file(new_sock_path)
802
804
  assert File.socket?(new_sock_path), "socket exists"
803
805
  @sockets.each do |path|
804
- sock = UNIXSocket.new(path)
806
+ sock = unix_socket(path)
805
807
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
806
808
  results = sock.sysread(4096)
807
809
  assert_equal String, results.class
data/test/test_helper.rb CHANGED
@@ -28,22 +28,43 @@
28
28
  require 'fileutils'
29
29
  require 'logger'
30
30
  require 'unicorn'
31
+ require 'io/nonblock'
31
32
 
32
33
  if ENV['DEBUG']
33
34
  require 'ruby-debug'
34
35
  Debugger.start
35
36
  end
36
37
 
38
+ unless RUBY_VERSION < '3.1'
39
+ warn "Unicorn was only tested against MRI up to 3.0.\n" \
40
+ "It might not properly work with #{RUBY_VERSION}"
41
+ end
42
+
37
43
  def redirect_test_io
38
44
  orig_err = STDERR.dup
39
45
  orig_out = STDOUT.dup
40
- STDERR.reopen("test_stderr.#{$$}.log", "a")
41
- STDOUT.reopen("test_stdout.#{$$}.log", "a")
46
+ rdr_pid = $$
47
+ new_out = File.open("test_stdout.#$$.log", "a")
48
+ new_err = File.open("test_stderr.#$$.log", "a")
49
+ new_out.sync = new_err.sync = true
50
+
51
+ if tail = ENV['TAIL'] # "tail -F" if GNU, "tail -f" otherwise
52
+ require 'shellwords'
53
+ cmd = tail.shellsplit
54
+ cmd << new_out.path
55
+ cmd << new_err.path
56
+ pid = Process.spawn(*cmd, { 1 => 2, :pgroup => true })
57
+ sleep 0.1 # wait for tail(1) to startup
58
+ end
59
+ STDERR.reopen(new_err)
60
+ STDOUT.reopen(new_out)
42
61
  STDERR.sync = STDOUT.sync = true
43
62
 
44
63
  at_exit do
45
- File.unlink("test_stderr.#{$$}.log") rescue nil
46
- File.unlink("test_stdout.#{$$}.log") rescue nil
64
+ if rdr_pid == $$
65
+ File.unlink(new_out.path) rescue nil
66
+ File.unlink(new_err.path) rescue nil
67
+ end
47
68
  end
48
69
 
49
70
  begin
@@ -51,6 +72,7 @@ def redirect_test_io
51
72
  ensure
52
73
  STDERR.reopen(orig_err)
53
74
  STDOUT.reopen(orig_out)
75
+ Process.kill(:TERM, pid) if pid
54
76
  end
55
77
  end
56
78
 
@@ -265,34 +287,20 @@ def wait_for_death(pid)
265
287
  raise "PID:#{pid} never died!"
266
288
  end
267
289
 
268
- # executes +cmd+ and chunks its STDOUT
269
- def chunked_spawn(stdout, *cmd)
270
- fork {
271
- crd, cwr = IO.pipe
272
- crd.binmode
273
- cwr.binmode
274
- crd.sync = cwr.sync = true
275
-
276
- pid = fork {
277
- STDOUT.reopen(cwr)
278
- crd.close
279
- cwr.close
280
- exec(*cmd)
281
- }
282
- cwr.close
283
- begin
284
- buf = crd.readpartial(16384)
285
- stdout.write("#{'%x' % buf.size}\r\n#{buf}")
286
- rescue EOFError
287
- stdout.write("0\r\n")
288
- pid, status = Process.waitpid(pid)
289
- exit status.exitstatus
290
- end while true
291
- }
292
- end
293
-
294
290
  def reset_sig_handlers
295
291
  %w(WINCH QUIT INT TERM USR1 USR2 HUP TTIN TTOU CHLD).each do |sig|
296
292
  trap(sig, "DEFAULT")
297
293
  end
298
294
  end
295
+
296
+ def tcp_socket(*args)
297
+ sock = TCPSocket.new(*args)
298
+ sock.nonblock = false
299
+ sock
300
+ end
301
+
302
+ def unix_socket(*args)
303
+ sock = UNIXSocket.new(*args)
304
+ sock.nonblock = false
305
+ sock
306
+ end