unicorn 5.4.1 → 6.1.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (81) hide show
  1. checksums.yaml +4 -4
  2. data/.manifest +10 -5
  3. data/.olddoc.yml +15 -7
  4. data/Application_Timeouts +4 -4
  5. data/CONTRIBUTORS +6 -2
  6. data/Documentation/.gitignore +1 -3
  7. data/Documentation/unicorn.1 +222 -0
  8. data/Documentation/unicorn_rails.1 +207 -0
  9. data/FAQ +1 -1
  10. data/GIT-VERSION-FILE +1 -1
  11. data/GIT-VERSION-GEN +1 -1
  12. data/GNUmakefile +112 -57
  13. data/HACKING +2 -9
  14. data/ISSUES +29 -33
  15. data/KNOWN_ISSUES +2 -2
  16. data/LATEST +16 -21
  17. data/LICENSE +2 -2
  18. data/Links +13 -11
  19. data/NEWS +197 -0
  20. data/README +27 -14
  21. data/SIGNALS +1 -1
  22. data/Sandbox +5 -5
  23. data/archive/slrnpull.conf +1 -1
  24. data/bin/unicorn +3 -1
  25. data/bin/unicorn_rails +2 -2
  26. data/examples/big_app_gc.rb +1 -1
  27. data/examples/logrotate.conf +3 -3
  28. data/examples/nginx.conf +4 -3
  29. data/examples/unicorn.conf.minimal.rb +2 -2
  30. data/examples/unicorn.conf.rb +2 -2
  31. data/examples/unicorn@.service +7 -0
  32. data/ext/unicorn_http/c_util.h +5 -13
  33. data/ext/unicorn_http/common_field_optimization.h +23 -6
  34. data/ext/unicorn_http/epollexclusive.h +124 -0
  35. data/ext/unicorn_http/ext_help.h +0 -24
  36. data/ext/unicorn_http/extconf.rb +32 -6
  37. data/ext/unicorn_http/global_variables.h +3 -3
  38. data/ext/unicorn_http/httpdate.c +3 -2
  39. data/ext/unicorn_http/unicorn_http.c +262 -237
  40. data/ext/unicorn_http/unicorn_http.rl +52 -27
  41. data/lib/unicorn/configurator.rb +25 -4
  42. data/lib/unicorn/http_request.rb +12 -3
  43. data/lib/unicorn/http_server.rb +62 -36
  44. data/lib/unicorn/launcher.rb +1 -1
  45. data/lib/unicorn/oob_gc.rb +5 -5
  46. data/lib/unicorn/select_waiter.rb +6 -0
  47. data/lib/unicorn/socket_helper.rb +1 -0
  48. data/lib/unicorn/tmpio.rb +8 -2
  49. data/lib/unicorn/util.rb +1 -1
  50. data/lib/unicorn/version.rb +1 -1
  51. data/lib/unicorn/worker.rb +16 -2
  52. data/lib/unicorn.rb +21 -9
  53. data/man/man1/unicorn.1 +89 -88
  54. data/man/man1/unicorn_rails.1 +78 -83
  55. data/t/GNUmakefile +3 -72
  56. data/t/README +4 -4
  57. data/t/t0301-no-default-middleware-ignored-in-config.sh +25 -0
  58. data/t/t0301.ru +13 -0
  59. data/t/test-lib.sh +2 -1
  60. data/test/benchmark/README +14 -4
  61. data/test/benchmark/ddstream.ru +50 -0
  62. data/test/benchmark/readinput.ru +40 -0
  63. data/test/benchmark/uconnect.perl +66 -0
  64. data/test/exec/test_exec.rb +14 -12
  65. data/test/test_helper.rb +38 -30
  66. data/test/unit/test_ccc.rb +4 -3
  67. data/test/unit/test_http_parser.rb +16 -0
  68. data/test/unit/test_http_parser_ng.rb +81 -0
  69. data/test/unit/test_server.rb +81 -7
  70. data/test/unit/test_signals.rb +6 -6
  71. data/test/unit/test_socket_helper.rb +1 -1
  72. data/test/unit/test_upload.rb +9 -14
  73. data/test/unit/test_util.rb +29 -3
  74. data/test/unit/test_waiter.rb +34 -0
  75. data/unicorn.gemspec +8 -7
  76. metadata +19 -13
  77. data/Documentation/GNUmakefile +0 -30
  78. data/Documentation/unicorn.1.txt +0 -187
  79. data/Documentation/unicorn_rails.1.txt +0 -175
  80. data/t/hijack.ru +0 -55
  81. data/t/t0200-rack-hijack.sh +0 -51
@@ -0,0 +1,25 @@
1
+ #!/bin/sh
2
+ . ./test-lib.sh
3
+ t_plan 3 "-N / --no-default-middleware option not supported in config.ru"
4
+
5
+ t_begin "setup and start" && {
6
+ unicorn_setup
7
+ RACK_ENV=development unicorn -D -c $unicorn_config t0301.ru
8
+ unicorn_wait_start
9
+ }
10
+
11
+ t_begin "check switches parsed as expected and -N ignored for Rack::Lint" && {
12
+ debug=false
13
+ lint=
14
+ eval "$(curl -sf http://$listen/vars)"
15
+ test x"$debug" = xtrue
16
+ test x"$lint" != x
17
+ test -f "$lint"
18
+ }
19
+
20
+ t_begin "killing succeeds" && {
21
+ kill $unicorn_pid
22
+ check_stderr
23
+ }
24
+
25
+ t_done
data/t/t0301.ru ADDED
@@ -0,0 +1,13 @@
1
+ #\-N --debug
2
+ run(lambda do |env|
3
+ case env['PATH_INFO']
4
+ when '/vars'
5
+ b = "debug=#{$DEBUG.inspect}\n" \
6
+ "lint=#{caller.grep(%r{rack/lint\.rb})[0].split(':')[0]}\n"
7
+ end
8
+ h = {
9
+ 'Content-Length' => b.size.to_s,
10
+ 'Content-Type' => 'text/plain',
11
+ }
12
+ [ 200, h, [ b ] ]
13
+ end)
data/t/test-lib.sh CHANGED
@@ -94,7 +94,8 @@ check_stderr () {
94
94
  set +u
95
95
  _r_err=${1-${r_err}}
96
96
  set -u
97
- if grep -v $T $_r_err | grep -i Error
97
+ if grep -v $T $_r_err | grep -i Error | \
98
+ grep -v NameError.*Unicorn::Waiter
98
99
  then
99
100
  die "Errors found in $_r_err"
100
101
  elif grep SIGKILL $_r_err
@@ -42,9 +42,19 @@ The benchmark client is usually httperf.
42
42
  Another gentle reminder: performance with slow networks/clients
43
43
  is NOT our problem. That is the job of nginx (or similar).
44
44
 
45
+ == ddstream.ru
46
+
47
+ Standalone Rack app intended to show how BAD we are at slow clients.
48
+ See usage in comments.
49
+
50
+ == readinput.ru
51
+
52
+ Standalone Rack app intended to show how bad we are with slow uploaders.
53
+ See usage in comments.
54
+
45
55
  == Contributors
46
56
 
47
- This directory is maintained independently in the "benchmark" branch
48
- based against v0.1.0. Only changes to this directory (test/benchmarks)
49
- are committed to this branch although the master branch may merge this
50
- branch occassionaly.
57
+ This directory is intended to remain stable. Do not make changes
58
+ to benchmarking code which can change performance and invalidate
59
+ results across revisions. Instead, write new benchmarks and update
60
+ coments/documentation as necessary.
@@ -0,0 +1,50 @@
1
+ # This app is intended to test large HTTP responses with or without
2
+ # a fully-buffering reverse proxy such as nginx. Without a fully-buffering
3
+ # reverse proxy, unicorn will be unresponsive when client count exceeds
4
+ # worker_processes.
5
+ #
6
+ # To demonstrate how bad unicorn is at slowly reading clients:
7
+ #
8
+ # # in one terminal, start unicorn with one worker:
9
+ # unicorn -E none -l 127.0.0.1:8080 test/benchmark/ddstream.ru
10
+ #
11
+ # # in a different terminal, start more slow curl processes than
12
+ # # unicorn workers and watch time outputs
13
+ # curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
14
+ # curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
15
+ # wait
16
+ #
17
+ # The last client won't see a response until the first one is done reading
18
+ #
19
+ # nginx note: do not change the default "proxy_buffering" behavior.
20
+ # Setting "proxy_buffering off" prevents nginx from protecting unicorn.
21
+
22
+ # totally standalone rack app to stream a giant response
23
+ class BigResponse
24
+ def initialize(bs, count)
25
+ @buf = "#{bs.to_s(16)}\r\n#{' ' * bs}\r\n"
26
+ @count = count
27
+ @res = [ 200,
28
+ { 'Transfer-Encoding' => -'chunked', 'Content-Type' => 'text/plain' },
29
+ self
30
+ ]
31
+ end
32
+
33
+ # rack response body iterator
34
+ def each
35
+ (1..@count).each { yield @buf }
36
+ yield -"0\r\n\r\n"
37
+ end
38
+
39
+ # rack app entry endpoint
40
+ def call(_env)
41
+ @res
42
+ end
43
+ end
44
+
45
+ # default to a giant (128M) response because kernel socket buffers
46
+ # can be ridiculously large on some systems
47
+ bs = ENV['bs'] ? ENV['bs'].to_i : 65536
48
+ count = ENV['count'] ? ENV['count'].to_i : 2048
49
+ warn "serving response with bs=#{bs} count=#{count} (#{bs*count} bytes)"
50
+ run BigResponse.new(bs, count)
@@ -0,0 +1,40 @@
1
+ # This app is intended to test large HTTP requests with or without
2
+ # a fully-buffering reverse proxy such as nginx. Without a fully-buffering
3
+ # reverse proxy, unicorn will be unresponsive when client count exceeds
4
+ # worker_processes.
5
+
6
+ DOC = <<DOC
7
+ To demonstrate how bad unicorn is at slowly uploading clients:
8
+
9
+ # in one terminal, start unicorn with one worker:
10
+ unicorn -E none -l 127.0.0.1:8080 test/benchmark/readinput.ru
11
+
12
+ # in a different terminal, upload 45M from multiple curl processes:
13
+ dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
14
+ --trace-time -v http://127.0.0.1:8080/ &
15
+ dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
16
+ --trace-time -v http://127.0.0.1:8080/ &
17
+ wait
18
+
19
+ # The last client won't see a response until the first one is done uploading
20
+ # You also won't be able to make GET requests to view this documentation
21
+ # while clients are uploading. You can also view the stderr debug output
22
+ # of unicorn (see logging code in #{__FILE__}).
23
+ DOC
24
+
25
+ run(lambda do |env|
26
+ input = env['rack.input']
27
+ buf = ''.b
28
+
29
+ # default logger contains timestamps, rely on that so users can
30
+ # see what the server is doing
31
+ l = env['rack.logger']
32
+
33
+ l.debug('BEGIN reading input ...') if l
34
+ :nop while input.read(16384, buf)
35
+ l.debug('DONE reading input ...') if l
36
+
37
+ buf.clear
38
+ [ 200, [ %W(Content-Length #{DOC.size}), %w(Content-Type text/plain) ],
39
+ [ DOC ] ]
40
+ end)
@@ -0,0 +1,66 @@
1
+ #!/usr/bin/perl -w
2
+ # Benchmark script to spawn some processes and hammer a local unicorn
3
+ # to test accept loop performance. This only does Unix sockets.
4
+ # There's plenty of TCP benchmarking tools out there, and TCP port reuse
5
+ # has predictability problems since unicorn can't do persistent connections.
6
+ # Written in Perl for the same reason: predictability.
7
+ # Ruby GC is not as predictable as Perl refcounting.
8
+ use strict;
9
+ use Socket qw(AF_UNIX SOCK_STREAM sockaddr_un);
10
+ use POSIX qw(:sys_wait_h);
11
+ use Getopt::Std;
12
+ # -c / -n switches stolen from ab(1)
13
+ my $usage = "$0 [-c CONCURRENCY] [-n NUM_REQUESTS] SOCKET_PATH\n";
14
+ our $opt_c = 2;
15
+ our $opt_n = 1000;
16
+ getopts('c:n:') or die $usage;
17
+ my $unix_path = shift or die $usage;
18
+ use constant REQ => "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n";
19
+ use constant REQ_LEN => length(REQ);
20
+ use constant BUFSIZ => 8192;
21
+ $^F = 99; # don't waste syscall time with FD_CLOEXEC
22
+
23
+ my %workers; # pid => worker num
24
+ die "-n $opt_n not evenly divisible by -c $opt_c\n" if $opt_n % $opt_c;
25
+ my $n_per_worker = $opt_n / $opt_c;
26
+ my $addr = sockaddr_un($unix_path);
27
+
28
+ for my $num (1..$opt_c) {
29
+ defined(my $pid = fork) or die "fork failed: $!\n";
30
+ if ($pid) {
31
+ $workers{$pid} = $num;
32
+ } else {
33
+ work($n_per_worker);
34
+ }
35
+ }
36
+
37
+ reap_worker(0) while scalar keys %workers;
38
+ exit;
39
+
40
+ sub work {
41
+ my ($n) = @_;
42
+ my ($buf, $x);
43
+ for (1..$n) {
44
+ socket(S, AF_UNIX, SOCK_STREAM, 0) or die "socket: $!";
45
+ connect(S, $addr) or die "connect: $!";
46
+ defined($x = syswrite(S, REQ)) or die "write: $!";
47
+ $x == REQ_LEN or die "short write: $x != ".REQ_LEN."\n";
48
+ do {
49
+ $x = sysread(S, $buf, BUFSIZ);
50
+ unless (defined $x) {
51
+ next if $!{EINTR};
52
+ die "sysread: $!\n";
53
+ }
54
+ } until ($x == 0);
55
+ }
56
+ exit 0;
57
+ }
58
+
59
+ sub reap_worker {
60
+ my ($flags) = @_;
61
+ my $pid = waitpid(-1, $flags);
62
+ return if !defined $pid || $pid <= 0;
63
+ my $p = delete $workers{$pid} || '(unknown)';
64
+ warn("$pid [$p] exited with $?\n") if $?;
65
+ $p;
66
+ }
@@ -45,8 +45,9 @@ end
45
45
 
46
46
  COMMON_TMP = Tempfile.new('unicorn_tmp') unless defined?(COMMON_TMP)
47
47
 
48
+ HEAVY_WORKERS = 2
48
49
  HEAVY_CFG = <<-EOS
49
- worker_processes 4
50
+ worker_processes #{HEAVY_WORKERS}
50
51
  timeout 30
51
52
  logger Logger.new('#{COMMON_TMP.path}')
52
53
  before_fork do |server, worker|
@@ -573,7 +574,7 @@ EOF
573
574
  assert_equal String, results[0].class
574
575
  worker_pid = results[0].to_i
575
576
  assert_not_equal pid, worker_pid
576
- s = UNIXSocket.new(tmp.path)
577
+ s = unix_socket(tmp.path)
577
578
  s.syswrite("GET / HTTP/1.0\r\n\r\n")
578
579
  results = ''
579
580
  loop { results << s.sysread(4096) } rescue nil
@@ -606,6 +607,7 @@ EOF
606
607
  def test_weird_config_settings
607
608
  File.open("config.ru", "wb") { |fp| fp.syswrite(HI) }
608
609
  ucfg = Tempfile.new('unicorn_test_config')
610
+ proc_total = HEAVY_WORKERS + 1 # + 1 for master
609
611
  ucfg.syswrite(HEAVY_CFG)
610
612
  pid = xfork do
611
613
  redirect_test_io do
@@ -616,9 +618,9 @@ EOF
616
618
  results = retry_hit(["http://#{@addr}:#{@port}/"])
617
619
  assert_equal String, results[0].class
618
620
  wait_master_ready(COMMON_TMP.path)
619
- wait_workers_ready(COMMON_TMP.path, 4)
621
+ wait_workers_ready(COMMON_TMP.path, HEAVY_WORKERS)
620
622
  bf = File.readlines(COMMON_TMP.path).grep(/\bbefore_fork: worker=/)
621
- assert_equal 4, bf.size
623
+ assert_equal HEAVY_WORKERS, bf.size
622
624
  rotate = Tempfile.new('unicorn_rotate')
623
625
 
624
626
  File.rename(COMMON_TMP.path, rotate.path)
@@ -630,20 +632,20 @@ EOF
630
632
  tries = DEFAULT_TRIES
631
633
  log = File.readlines(rotate.path)
632
634
  while (tries -= 1) > 0 &&
633
- log.grep(/reopening logs\.\.\./).size < 5
635
+ log.grep(/reopening logs\.\.\./).size < proc_total
634
636
  sleep DEFAULT_RES
635
637
  log = File.readlines(rotate.path)
636
638
  end
637
- assert_equal 5, log.grep(/reopening logs\.\.\./).size
639
+ assert_equal proc_total, log.grep(/reopening logs\.\.\./).size
638
640
  assert_equal 0, log.grep(/done reopening logs/).size
639
641
 
640
642
  tries = DEFAULT_TRIES
641
643
  log = File.readlines(COMMON_TMP.path)
642
- while (tries -= 1) > 0 && log.grep(/done reopening logs/).size < 5
644
+ while (tries -= 1) > 0 && log.grep(/done reopening logs/).size < proc_total
643
645
  sleep DEFAULT_RES
644
646
  log = File.readlines(COMMON_TMP.path)
645
647
  end
646
- assert_equal 5, log.grep(/done reopening logs/).size
648
+ assert_equal proc_total, log.grep(/done reopening logs/).size
647
649
  assert_equal 0, log.grep(/reopening logs\.\.\./).size
648
650
 
649
651
  Process.kill(:QUIT, pid)
@@ -730,7 +732,7 @@ EOF
730
732
  wait_for_file(sock_path)
731
733
  assert File.socket?(sock_path)
732
734
 
733
- sock = UNIXSocket.new(sock_path)
735
+ sock = unix_socket(sock_path)
734
736
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
735
737
  results = sock.sysread(4096)
736
738
 
@@ -740,7 +742,7 @@ EOF
740
742
  wait_for_file(sock_path)
741
743
  assert File.socket?(sock_path)
742
744
 
743
- sock = UNIXSocket.new(sock_path)
745
+ sock = unix_socket(sock_path)
744
746
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
745
747
  results = sock.sysread(4096)
746
748
 
@@ -775,7 +777,7 @@ EOF
775
777
  assert_equal pid, File.read(pid_file).to_i
776
778
  assert File.socket?(sock_path), "socket created"
777
779
 
778
- sock = UNIXSocket.new(sock_path)
780
+ sock = unix_socket(sock_path)
779
781
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
780
782
  results = sock.sysread(4096)
781
783
 
@@ -801,7 +803,7 @@ EOF
801
803
  wait_for_file(new_sock_path)
802
804
  assert File.socket?(new_sock_path), "socket exists"
803
805
  @sockets.each do |path|
804
- sock = UNIXSocket.new(path)
806
+ sock = unix_socket(path)
805
807
  sock.syswrite("GET / HTTP/1.0\r\n\r\n")
806
808
  results = sock.sysread(4096)
807
809
  assert_equal String, results.class
data/test/test_helper.rb CHANGED
@@ -28,22 +28,43 @@ require 'tempfile'
28
28
  require 'fileutils'
29
29
  require 'logger'
30
30
  require 'unicorn'
31
+ require 'io/nonblock'
31
32
 
32
33
  if ENV['DEBUG']
33
34
  require 'ruby-debug'
34
35
  Debugger.start
35
36
  end
36
37
 
38
+ unless RUBY_VERSION < '3.1'
39
+ warn "Unicorn was only tested against MRI up to 3.0.\n" \
40
+ "It might not properly work with #{RUBY_VERSION}"
41
+ end
42
+
37
43
  def redirect_test_io
38
44
  orig_err = STDERR.dup
39
45
  orig_out = STDOUT.dup
40
- STDERR.reopen("test_stderr.#{$$}.log", "a")
41
- STDOUT.reopen("test_stdout.#{$$}.log", "a")
46
+ rdr_pid = $$
47
+ new_out = File.open("test_stdout.#$$.log", "a")
48
+ new_err = File.open("test_stderr.#$$.log", "a")
49
+ new_out.sync = new_err.sync = true
50
+
51
+ if tail = ENV['TAIL'] # "tail -F" if GNU, "tail -f" otherwise
52
+ require 'shellwords'
53
+ cmd = tail.shellsplit
54
+ cmd << new_out.path
55
+ cmd << new_err.path
56
+ pid = Process.spawn(*cmd, { 1 => 2, :pgroup => true })
57
+ sleep 0.1 # wait for tail(1) to startup
58
+ end
59
+ STDERR.reopen(new_err)
60
+ STDOUT.reopen(new_out)
42
61
  STDERR.sync = STDOUT.sync = true
43
62
 
44
63
  at_exit do
45
- File.unlink("test_stderr.#{$$}.log") rescue nil
46
- File.unlink("test_stdout.#{$$}.log") rescue nil
64
+ if rdr_pid == $$
65
+ File.unlink(new_out.path) rescue nil
66
+ File.unlink(new_err.path) rescue nil
67
+ end
47
68
  end
48
69
 
49
70
  begin
@@ -51,6 +72,7 @@ def redirect_test_io
51
72
  ensure
52
73
  STDERR.reopen(orig_err)
53
74
  STDOUT.reopen(orig_out)
75
+ Process.kill(:TERM, pid) if pid
54
76
  end
55
77
  end
56
78
 
@@ -265,34 +287,20 @@ def wait_for_death(pid)
265
287
  raise "PID:#{pid} never died!"
266
288
  end
267
289
 
268
- # executes +cmd+ and chunks its STDOUT
269
- def chunked_spawn(stdout, *cmd)
270
- fork {
271
- crd, cwr = IO.pipe
272
- crd.binmode
273
- cwr.binmode
274
- crd.sync = cwr.sync = true
275
-
276
- pid = fork {
277
- STDOUT.reopen(cwr)
278
- crd.close
279
- cwr.close
280
- exec(*cmd)
281
- }
282
- cwr.close
283
- begin
284
- buf = crd.readpartial(16384)
285
- stdout.write("#{'%x' % buf.size}\r\n#{buf}")
286
- rescue EOFError
287
- stdout.write("0\r\n")
288
- pid, status = Process.waitpid(pid)
289
- exit status.exitstatus
290
- end while true
291
- }
292
- end
293
-
294
290
  def reset_sig_handlers
295
291
  %w(WINCH QUIT INT TERM USR1 USR2 HUP TTIN TTOU CHLD).each do |sig|
296
292
  trap(sig, "DEFAULT")
297
293
  end
298
294
  end
295
+
296
+ def tcp_socket(*args)
297
+ sock = TCPSocket.new(*args)
298
+ sock.nonblock = false
299
+ sock
300
+ end
301
+
302
+ def unix_socket(*args)
303
+ sock = UNIXSocket.new(*args)
304
+ sock.nonblock = false
305
+ sock
306
+ end
@@ -3,6 +3,7 @@ require 'unicorn'
3
3
  require 'io/wait'
4
4
  require 'tempfile'
5
5
  require 'test/unit'
6
+ require './test/test_helper'
6
7
 
7
8
  class TestCccTCPI < Test::Unit::TestCase
8
9
  def test_ccc_tcpi
@@ -42,7 +43,7 @@ class TestCccTCPI < Test::Unit::TestCase
42
43
  wr.close
43
44
 
44
45
  # make sure the server is running, at least
45
- client = TCPSocket.new(host, port)
46
+ client = tcp_socket(host, port)
46
47
  client.write("GET / HTTP/1.1\r\nHost: example.com\r\n\r\n")
47
48
  assert client.wait(10), 'never got response from server'
48
49
  res = client.read
@@ -51,13 +52,13 @@ class TestCccTCPI < Test::Unit::TestCase
51
52
  client.close
52
53
 
53
54
  # start a slow request...
54
- sleeper = TCPSocket.new(host, port)
55
+ sleeper = tcp_socket(host, port)
55
56
  sleeper.write("GET /sleep HTTP/1.1\r\nHost: example.com\r\n\r\n")
56
57
 
57
58
  # and a bunch of aborted ones
58
59
  nr = 100
59
60
  nr.times do |i|
60
- client = TCPSocket.new(host, port)
61
+ client = tcp_socket(host, port)
61
62
  client.write("GET /collections/#{rand(10000)} HTTP/1.1\r\n" \
62
63
  "Host: example.com\r\n\r\n")
63
64
  client.close
@@ -865,4 +865,20 @@ class HttpParserTest < Test::Unit::TestCase
865
865
  rescue LoadError
866
866
  # not all Ruby implementations have objspace
867
867
  end
868
+
869
+ def test_dedupe
870
+ parser = HttpParser.new
871
+ # n.b. String#freeze optimization doesn't work under modern test-unit
872
+ exp = -'HTTP_HOST'
873
+ get = "GET / HTTP/1.1\r\nHost: example.com\r\nHavpbea-fhpxf: true\r\n\r\n"
874
+ assert parser.add_parse(get)
875
+ key = parser.env.keys.detect { |k| k == exp }
876
+ assert_same exp, key
877
+
878
+ if RUBY_VERSION.to_r >= 2.6 # 2.6.0-rc1+
879
+ exp = -'HTTP_HAVPBEA_FHPXF'
880
+ key = parser.env.keys.detect { |k| k == exp }
881
+ assert_same exp, key
882
+ end
883
+ end if RUBY_VERSION.to_r >= 2.5 && RUBY_ENGINE == 'ruby'
868
884
  end
@@ -11,6 +11,20 @@ class HttpParserNgTest < Test::Unit::TestCase
11
11
  @parser = HttpParser.new
12
12
  end
13
13
 
14
+ # RFC 7230 allows gzip/deflate/compress Transfer-Encoding,
15
+ # but "chunked" must be last if used
16
+ def test_is_chunked
17
+ [ 'chunked,chunked', 'chunked,gzip', 'chunked,gzip,chunked' ].each do |x|
18
+ assert_raise(HttpParserError) { HttpParser.is_chunked?(x) }
19
+ end
20
+ [ 'gzip, chunked', 'gzip,chunked', 'gzip ,chunked' ].each do |x|
21
+ assert HttpParser.is_chunked?(x)
22
+ end
23
+ [ 'gzip', 'xhunked', 'xchunked' ].each do |x|
24
+ assert !HttpParser.is_chunked?(x)
25
+ end
26
+ end
27
+
14
28
  def test_parser_max_len
15
29
  assert_raises(RangeError) do
16
30
  HttpParser.max_header_len = 0xffffffff + 1
@@ -566,6 +580,73 @@ class HttpParserNgTest < Test::Unit::TestCase
566
580
  end
567
581
  end
568
582
 
583
+ def test_duplicate_content_length
584
+ str = "PUT / HTTP/1.1\r\n" \
585
+ "Content-Length: 1\r\n" \
586
+ "Content-Length: 9\r\n" \
587
+ "\r\n"
588
+ assert_raises(HttpParserError) { @parser.headers({}, str) }
589
+ end
590
+
591
+ def test_chunked_overrides_content_length
592
+ order = [ 'Transfer-Encoding: chunked', 'Content-Length: 666' ]
593
+ %w(a b).each do |x|
594
+ str = "PUT /#{x} HTTP/1.1\r\n" \
595
+ "#{order.join("\r\n")}" \
596
+ "\r\n\r\na\r\nhelloworld\r\n0\r\n\r\n"
597
+ order.reverse!
598
+ env = @parser.headers({}, str)
599
+ assert_nil @parser.content_length
600
+ assert_equal 'chunked', env['HTTP_TRANSFER_ENCODING']
601
+ assert_equal '666', env['CONTENT_LENGTH'],
602
+ 'Content-Length logged so the app can log a possible client bug/attack'
603
+ @parser.filter_body(dst = '', str)
604
+ assert_equal 'helloworld', dst
605
+ @parser.parse # handle the non-existent trailer
606
+ assert @parser.next?
607
+ end
608
+ end
609
+
610
+ def test_chunked_order_good
611
+ str = "PUT /x HTTP/1.1\r\n" \
612
+ "Transfer-Encoding: gzip\r\n" \
613
+ "Transfer-Encoding: chunked\r\n" \
614
+ "\r\n"
615
+ env = @parser.headers({}, str)
616
+ assert_equal 'gzip,chunked', env['HTTP_TRANSFER_ENCODING']
617
+ assert_nil @parser.content_length
618
+
619
+ @parser.clear
620
+ str = "PUT /x HTTP/1.1\r\n" \
621
+ "Transfer-Encoding: gzip, chunked\r\n" \
622
+ "\r\n"
623
+ env = @parser.headers({}, str)
624
+ assert_equal 'gzip, chunked', env['HTTP_TRANSFER_ENCODING']
625
+ assert_nil @parser.content_length
626
+ end
627
+
628
+ def test_chunked_order_bad
629
+ str = "PUT /x HTTP/1.1\r\n" \
630
+ "Transfer-Encoding: chunked\r\n" \
631
+ "Transfer-Encoding: gzip\r\n" \
632
+ "\r\n"
633
+ assert_raise(HttpParserError) { @parser.headers({}, str) }
634
+ end
635
+
636
+ def test_double_chunked
637
+ str = "PUT /x HTTP/1.1\r\n" \
638
+ "Transfer-Encoding: chunked\r\n" \
639
+ "Transfer-Encoding: chunked\r\n" \
640
+ "\r\n"
641
+ assert_raise(HttpParserError) { @parser.headers({}, str) }
642
+
643
+ @parser.clear
644
+ str = "PUT /x HTTP/1.1\r\n" \
645
+ "Transfer-Encoding: chunked,chunked\r\n" \
646
+ "\r\n"
647
+ assert_raise(HttpParserError) { @parser.headers({}, str) }
648
+ end
649
+
569
650
  def test_backtrace_is_empty
570
651
  begin
571
652
  @parser.headers({}, "AAADFSFDSFD\r\n\r\n")