unicorn 5.5.1 → 5.6.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- checksums.yaml +4 -4
- data/.olddoc.yml +12 -7
- data/Documentation/.gitignore +1 -3
- data/Documentation/unicorn.1 +222 -0
- data/Documentation/unicorn_rails.1 +207 -0
- data/FAQ +1 -1
- data/GIT-VERSION-GEN +1 -1
- data/GNUmakefile +15 -5
- data/HACKING +1 -1
- data/ISSUES +12 -12
- data/KNOWN_ISSUES +2 -2
- data/Links +5 -5
- data/README +6 -6
- data/SIGNALS +1 -1
- data/Sandbox +2 -2
- data/archive/slrnpull.conf +1 -1
- data/examples/big_app_gc.rb +1 -1
- data/examples/logrotate.conf +2 -2
- data/examples/nginx.conf +1 -1
- data/examples/unicorn.conf.minimal.rb +2 -2
- data/examples/unicorn.conf.rb +2 -2
- data/examples/unicorn@.service +7 -0
- data/ext/unicorn_http/unicorn_http.rl +43 -5
- data/lib/unicorn.rb +1 -1
- data/lib/unicorn/configurator.rb +13 -3
- data/lib/unicorn/http_request.rb +11 -0
- data/lib/unicorn/http_server.rb +32 -4
- data/lib/unicorn/oob_gc.rb +2 -2
- data/lib/unicorn/tmpio.rb +8 -2
- data/test/benchmark/README +14 -4
- data/test/benchmark/ddstream.ru +50 -0
- data/test/benchmark/readinput.ru +40 -0
- data/test/benchmark/uconnect.perl +66 -0
- data/test/exec/test_exec.rb +9 -7
- data/test/test_helper.rb +0 -26
- data/test/unit/test_http_parser_ng.rb +81 -0
- data/test/unit/test_server.rb +30 -0
- data/test/unit/test_upload.rb +4 -9
- data/test/unit/test_util.rb +1 -1
- data/unicorn.gemspec +3 -3
- metadata +10 -7
- data/Documentation/GNUmakefile +0 -30
- data/Documentation/unicorn.1.txt +0 -187
- data/Documentation/unicorn_rails.1.txt +0 -173
data/lib/unicorn/http_request.rb
CHANGED
@@ -188,4 +188,15 @@ def write_http_header(socket) # :nodoc:
|
|
188
188
|
HTTP_RESPONSE_START.each { |c| socket.write(c) }
|
189
189
|
end
|
190
190
|
end
|
191
|
+
|
192
|
+
# called by ext/unicorn_http/unicorn_http.rl via rb_funcall
|
193
|
+
def self.is_chunked?(v) # :nodoc:
|
194
|
+
vals = v.split(/[ \t]*,[ \t]*/).map!(&:downcase)
|
195
|
+
if vals.pop == 'chunked'.freeze
|
196
|
+
return true unless vals.include?('chunked'.freeze)
|
197
|
+
raise Unicorn::HttpParserError, 'double chunked', []
|
198
|
+
end
|
199
|
+
return false unless vals.include?('chunked'.freeze)
|
200
|
+
raise Unicorn::HttpParserError, 'chunked not last', []
|
201
|
+
end
|
191
202
|
end
|
data/lib/unicorn/http_server.rb
CHANGED
@@ -6,7 +6,7 @@
|
|
6
6
|
# forked worker children.
|
7
7
|
#
|
8
8
|
# Users do not need to know the internals of this class, but reading the
|
9
|
-
# {source}[https://
|
9
|
+
# {source}[https://yhbt.net/unicorn.git/tree/lib/unicorn/http_server.rb]
|
10
10
|
# is education for programmers wishing to learn how unicorn works.
|
11
11
|
# See Unicorn::Configurator for information on how to configure unicorn.
|
12
12
|
class Unicorn::HttpServer
|
@@ -15,7 +15,7 @@ class Unicorn::HttpServer
|
|
15
15
|
:before_fork, :after_fork, :before_exec,
|
16
16
|
:listener_opts, :preload_app,
|
17
17
|
:orig_app, :config, :ready_pipe, :user,
|
18
|
-
:default_middleware
|
18
|
+
:default_middleware, :early_hints
|
19
19
|
attr_writer :after_worker_exit, :after_worker_ready, :worker_exec
|
20
20
|
|
21
21
|
attr_reader :pid, :logger
|
@@ -588,6 +588,25 @@ def handle_error(client, e)
|
|
588
588
|
rescue
|
589
589
|
end
|
590
590
|
|
591
|
+
def e103_response_write(client, headers)
|
592
|
+
response = if @request.response_start_sent
|
593
|
+
"103 Early Hints\r\n"
|
594
|
+
else
|
595
|
+
"HTTP/1.1 103 Early Hints\r\n"
|
596
|
+
end
|
597
|
+
|
598
|
+
headers.each_pair do |k, vs|
|
599
|
+
next if !vs || vs.empty?
|
600
|
+
values = vs.to_s.split("\n".freeze)
|
601
|
+
values.each do |v|
|
602
|
+
response << "#{k}: #{v}\r\n"
|
603
|
+
end
|
604
|
+
end
|
605
|
+
response << "\r\n".freeze
|
606
|
+
response << "HTTP/1.1 ".freeze if @request.response_start_sent
|
607
|
+
client.write(response)
|
608
|
+
end
|
609
|
+
|
591
610
|
def e100_response_write(client, env)
|
592
611
|
# We use String#freeze to avoid allocations under Ruby 2.1+
|
593
612
|
# Not many users hit this code path, so it's better to reduce the
|
@@ -602,7 +621,15 @@ def e100_response_write(client, env)
|
|
602
621
|
# once a client is accepted, it is processed in its entirety here
|
603
622
|
# in 3 easy steps: read request, call app, write app response
|
604
623
|
def process_client(client)
|
605
|
-
|
624
|
+
env = @request.read(client)
|
625
|
+
|
626
|
+
if early_hints
|
627
|
+
env["rack.early_hints"] = lambda do |headers|
|
628
|
+
e103_response_write(client, headers)
|
629
|
+
end
|
630
|
+
end
|
631
|
+
|
632
|
+
status, headers, body = @app.call(env)
|
606
633
|
|
607
634
|
begin
|
608
635
|
return if @request.hijacked?
|
@@ -686,6 +713,7 @@ def worker_loop(worker)
|
|
686
713
|
trap(:USR1) { nr = -65536 }
|
687
714
|
|
688
715
|
ready = readers.dup
|
716
|
+
nr_listeners = readers.size
|
689
717
|
@after_worker_ready.call(self, worker)
|
690
718
|
|
691
719
|
begin
|
@@ -708,7 +736,7 @@ def worker_loop(worker)
|
|
708
736
|
# we're probably reasonably busy, so avoid calling select()
|
709
737
|
# and do a speculative non-blocking accept() on ready listeners
|
710
738
|
# before we sleep again in select().
|
711
|
-
|
739
|
+
if nr == nr_listeners
|
712
740
|
tmp = ready.dup
|
713
741
|
redo
|
714
742
|
end
|
data/lib/unicorn/oob_gc.rb
CHANGED
@@ -43,8 +43,8 @@
|
|
43
43
|
# use Unicorn::OobGC, 2, %r{\A/(?:expensive/foo|more_expensive/foo)}
|
44
44
|
#
|
45
45
|
# Feedback from users of early implementations of this module:
|
46
|
-
# * https://
|
47
|
-
# * https://
|
46
|
+
# * https://yhbt.net/unicorn-public/0BFC98E9-072B-47EE-9A70-05478C20141B@lukemelia.com/
|
47
|
+
# * https://yhbt.net/unicorn-public/AANLkTilUbgdyDv9W1bi-s_W6kq9sOhWfmuYkKLoKGOLj@mail.gmail.com/
|
48
48
|
|
49
49
|
module Unicorn::OobGC
|
50
50
|
|
data/lib/unicorn/tmpio.rb
CHANGED
@@ -11,12 +11,18 @@ class Unicorn::TmpIO < File
|
|
11
11
|
# immediately, switched to binary mode, and userspace output
|
12
12
|
# buffering is disabled
|
13
13
|
def self.new
|
14
|
+
path = nil
|
15
|
+
|
16
|
+
# workaround File#path being tainted:
|
17
|
+
# https://bugs.ruby-lang.org/issues/14485
|
14
18
|
fp = begin
|
15
|
-
|
19
|
+
path = "#{Dir::tmpdir}/#{rand}"
|
20
|
+
super(path, RDWR|CREAT|EXCL, 0600)
|
16
21
|
rescue Errno::EEXIST
|
17
22
|
retry
|
18
23
|
end
|
19
|
-
|
24
|
+
|
25
|
+
unlink(path)
|
20
26
|
fp.binmode
|
21
27
|
fp.sync = true
|
22
28
|
fp
|
data/test/benchmark/README
CHANGED
@@ -42,9 +42,19 @@ The benchmark client is usually httperf.
|
|
42
42
|
Another gentle reminder: performance with slow networks/clients
|
43
43
|
is NOT our problem. That is the job of nginx (or similar).
|
44
44
|
|
45
|
+
== ddstream.ru
|
46
|
+
|
47
|
+
Standalone Rack app intended to show how BAD we are at slow clients.
|
48
|
+
See usage in comments.
|
49
|
+
|
50
|
+
== readinput.ru
|
51
|
+
|
52
|
+
Standalone Rack app intended to show how bad we are with slow uploaders.
|
53
|
+
See usage in comments.
|
54
|
+
|
45
55
|
== Contributors
|
46
56
|
|
47
|
-
This directory is
|
48
|
-
|
49
|
-
|
50
|
-
|
57
|
+
This directory is intended to remain stable. Do not make changes
|
58
|
+
to benchmarking code which can change performance and invalidate
|
59
|
+
results across revisions. Instead, write new benchmarks and update
|
60
|
+
coments/documentation as necessary.
|
@@ -0,0 +1,50 @@
|
|
1
|
+
# This app is intended to test large HTTP responses with or without
|
2
|
+
# a fully-buffering reverse proxy such as nginx. Without a fully-buffering
|
3
|
+
# reverse proxy, unicorn will be unresponsive when client count exceeds
|
4
|
+
# worker_processes.
|
5
|
+
#
|
6
|
+
# To demonstrate how bad unicorn is at slowly reading clients:
|
7
|
+
#
|
8
|
+
# # in one terminal, start unicorn with one worker:
|
9
|
+
# unicorn -E none -l 127.0.0.1:8080 test/benchmark/ddstream.ru
|
10
|
+
#
|
11
|
+
# # in a different terminal, start more slow curl processes than
|
12
|
+
# # unicorn workers and watch time outputs
|
13
|
+
# curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
|
14
|
+
# curl --limit-rate 8K --trace-time -vsN http://127.0.0.1:8080/ >/dev/null &
|
15
|
+
# wait
|
16
|
+
#
|
17
|
+
# The last client won't see a response until the first one is done reading
|
18
|
+
#
|
19
|
+
# nginx note: do not change the default "proxy_buffering" behavior.
|
20
|
+
# Setting "proxy_buffering off" prevents nginx from protecting unicorn.
|
21
|
+
|
22
|
+
# totally standalone rack app to stream a giant response
|
23
|
+
class BigResponse
|
24
|
+
def initialize(bs, count)
|
25
|
+
@buf = "#{bs.to_s(16)}\r\n#{' ' * bs}\r\n"
|
26
|
+
@count = count
|
27
|
+
@res = [ 200,
|
28
|
+
{ 'Transfer-Encoding' => -'chunked', 'Content-Type' => 'text/plain' },
|
29
|
+
self
|
30
|
+
]
|
31
|
+
end
|
32
|
+
|
33
|
+
# rack response body iterator
|
34
|
+
def each
|
35
|
+
(1..@count).each { yield @buf }
|
36
|
+
yield -"0\r\n\r\n"
|
37
|
+
end
|
38
|
+
|
39
|
+
# rack app entry endpoint
|
40
|
+
def call(_env)
|
41
|
+
@res
|
42
|
+
end
|
43
|
+
end
|
44
|
+
|
45
|
+
# default to a giant (128M) response because kernel socket buffers
|
46
|
+
# can be ridiculously large on some systems
|
47
|
+
bs = ENV['bs'] ? ENV['bs'].to_i : 65536
|
48
|
+
count = ENV['count'] ? ENV['count'].to_i : 2048
|
49
|
+
warn "serving response with bs=#{bs} count=#{count} (#{bs*count} bytes)"
|
50
|
+
run BigResponse.new(bs, count)
|
@@ -0,0 +1,40 @@
|
|
1
|
+
# This app is intended to test large HTTP requests with or without
|
2
|
+
# a fully-buffering reverse proxy such as nginx. Without a fully-buffering
|
3
|
+
# reverse proxy, unicorn will be unresponsive when client count exceeds
|
4
|
+
# worker_processes.
|
5
|
+
|
6
|
+
DOC = <<DOC
|
7
|
+
To demonstrate how bad unicorn is at slowly uploading clients:
|
8
|
+
|
9
|
+
# in one terminal, start unicorn with one worker:
|
10
|
+
unicorn -E none -l 127.0.0.1:8080 test/benchmark/readinput.ru
|
11
|
+
|
12
|
+
# in a different terminal, upload 45M from multiple curl processes:
|
13
|
+
dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
|
14
|
+
--trace-time -v http://127.0.0.1:8080/ &
|
15
|
+
dd if=/dev/zero bs=45M count=1 | curl -T- -HExpect: --limit-rate 1M \
|
16
|
+
--trace-time -v http://127.0.0.1:8080/ &
|
17
|
+
wait
|
18
|
+
|
19
|
+
# The last client won't see a response until the first one is done uploading
|
20
|
+
# You also won't be able to make GET requests to view this documentation
|
21
|
+
# while clients are uploading. You can also view the stderr debug output
|
22
|
+
# of unicorn (see logging code in #{__FILE__}).
|
23
|
+
DOC
|
24
|
+
|
25
|
+
run(lambda do |env|
|
26
|
+
input = env['rack.input']
|
27
|
+
buf = ''.b
|
28
|
+
|
29
|
+
# default logger contains timestamps, rely on that so users can
|
30
|
+
# see what the server is doing
|
31
|
+
l = env['rack.logger']
|
32
|
+
|
33
|
+
l.debug('BEGIN reading input ...') if l
|
34
|
+
:nop while input.read(16384, buf)
|
35
|
+
l.debug('DONE reading input ...') if l
|
36
|
+
|
37
|
+
buf.clear
|
38
|
+
[ 200, [ %W(Content-Length #{DOC.size}), %w(Content-Type text/plain) ],
|
39
|
+
[ DOC ] ]
|
40
|
+
end)
|
@@ -0,0 +1,66 @@
|
|
1
|
+
#!/usr/bin/perl -w
|
2
|
+
# Benchmark script to spawn some processes and hammer a local unicorn
|
3
|
+
# to test accept loop performance. This only does Unix sockets.
|
4
|
+
# There's plenty of TCP benchmarking tools out there, and TCP port reuse
|
5
|
+
# has predictability problems since unicorn can't do persistent connections.
|
6
|
+
# Written in Perl for the same reason: predictability.
|
7
|
+
# Ruby GC is not as predictable as Perl refcounting.
|
8
|
+
use strict;
|
9
|
+
use Socket qw(AF_UNIX SOCK_STREAM sockaddr_un);
|
10
|
+
use POSIX qw(:sys_wait_h);
|
11
|
+
use Getopt::Std;
|
12
|
+
# -c / -n switches stolen from ab(1)
|
13
|
+
my $usage = "$0 [-c CONCURRENCY] [-n NUM_REQUESTS] SOCKET_PATH\n";
|
14
|
+
our $opt_c = 2;
|
15
|
+
our $opt_n = 1000;
|
16
|
+
getopts('c:n:') or die $usage;
|
17
|
+
my $unix_path = shift or die $usage;
|
18
|
+
use constant REQ => "GET / HTTP/1.1\r\nHost: example.com\r\n\r\n";
|
19
|
+
use constant REQ_LEN => length(REQ);
|
20
|
+
use constant BUFSIZ => 8192;
|
21
|
+
$^F = 99; # don't waste syscall time with FD_CLOEXEC
|
22
|
+
|
23
|
+
my %workers; # pid => worker num
|
24
|
+
die "-n $opt_n not evenly divisible by -c $opt_c\n" if $opt_n % $opt_c;
|
25
|
+
my $n_per_worker = $opt_n / $opt_c;
|
26
|
+
my $addr = sockaddr_un($unix_path);
|
27
|
+
|
28
|
+
for my $num (1..$opt_c) {
|
29
|
+
defined(my $pid = fork) or die "fork failed: $!\n";
|
30
|
+
if ($pid) {
|
31
|
+
$workers{$pid} = $num;
|
32
|
+
} else {
|
33
|
+
work($n_per_worker);
|
34
|
+
}
|
35
|
+
}
|
36
|
+
|
37
|
+
reap_worker(0) while scalar keys %workers;
|
38
|
+
exit;
|
39
|
+
|
40
|
+
sub work {
|
41
|
+
my ($n) = @_;
|
42
|
+
my ($buf, $x);
|
43
|
+
for (1..$n) {
|
44
|
+
socket(S, AF_UNIX, SOCK_STREAM, 0) or die "socket: $!";
|
45
|
+
connect(S, $addr) or die "connect: $!";
|
46
|
+
defined($x = syswrite(S, REQ)) or die "write: $!";
|
47
|
+
$x == REQ_LEN or die "short write: $x != ".REQ_LEN."\n";
|
48
|
+
do {
|
49
|
+
$x = sysread(S, $buf, BUFSIZ);
|
50
|
+
unless (defined $x) {
|
51
|
+
next if $!{EINTR};
|
52
|
+
die "sysread: $!\n";
|
53
|
+
}
|
54
|
+
} until ($x == 0);
|
55
|
+
}
|
56
|
+
exit 0;
|
57
|
+
}
|
58
|
+
|
59
|
+
sub reap_worker {
|
60
|
+
my ($flags) = @_;
|
61
|
+
my $pid = waitpid(-1, $flags);
|
62
|
+
return if !defined $pid || $pid <= 0;
|
63
|
+
my $p = delete $workers{$pid} || '(unknown)';
|
64
|
+
warn("$pid [$p] exited with $?\n") if $?;
|
65
|
+
$p;
|
66
|
+
}
|
data/test/exec/test_exec.rb
CHANGED
@@ -45,8 +45,9 @@ def call(env)
|
|
45
45
|
|
46
46
|
COMMON_TMP = Tempfile.new('unicorn_tmp') unless defined?(COMMON_TMP)
|
47
47
|
|
48
|
+
HEAVY_WORKERS = 2
|
48
49
|
HEAVY_CFG = <<-EOS
|
49
|
-
worker_processes
|
50
|
+
worker_processes #{HEAVY_WORKERS}
|
50
51
|
timeout 30
|
51
52
|
logger Logger.new('#{COMMON_TMP.path}')
|
52
53
|
before_fork do |server, worker|
|
@@ -606,6 +607,7 @@ def test_unicorn_config_listen_augments_cli
|
|
606
607
|
def test_weird_config_settings
|
607
608
|
File.open("config.ru", "wb") { |fp| fp.syswrite(HI) }
|
608
609
|
ucfg = Tempfile.new('unicorn_test_config')
|
610
|
+
proc_total = HEAVY_WORKERS + 1 # + 1 for master
|
609
611
|
ucfg.syswrite(HEAVY_CFG)
|
610
612
|
pid = xfork do
|
611
613
|
redirect_test_io do
|
@@ -616,9 +618,9 @@ def test_weird_config_settings
|
|
616
618
|
results = retry_hit(["http://#{@addr}:#{@port}/"])
|
617
619
|
assert_equal String, results[0].class
|
618
620
|
wait_master_ready(COMMON_TMP.path)
|
619
|
-
wait_workers_ready(COMMON_TMP.path,
|
621
|
+
wait_workers_ready(COMMON_TMP.path, HEAVY_WORKERS)
|
620
622
|
bf = File.readlines(COMMON_TMP.path).grep(/\bbefore_fork: worker=/)
|
621
|
-
assert_equal
|
623
|
+
assert_equal HEAVY_WORKERS, bf.size
|
622
624
|
rotate = Tempfile.new('unicorn_rotate')
|
623
625
|
|
624
626
|
File.rename(COMMON_TMP.path, rotate.path)
|
@@ -630,20 +632,20 @@ def test_weird_config_settings
|
|
630
632
|
tries = DEFAULT_TRIES
|
631
633
|
log = File.readlines(rotate.path)
|
632
634
|
while (tries -= 1) > 0 &&
|
633
|
-
log.grep(/reopening logs\.\.\./).size <
|
635
|
+
log.grep(/reopening logs\.\.\./).size < proc_total
|
634
636
|
sleep DEFAULT_RES
|
635
637
|
log = File.readlines(rotate.path)
|
636
638
|
end
|
637
|
-
assert_equal
|
639
|
+
assert_equal proc_total, log.grep(/reopening logs\.\.\./).size
|
638
640
|
assert_equal 0, log.grep(/done reopening logs/).size
|
639
641
|
|
640
642
|
tries = DEFAULT_TRIES
|
641
643
|
log = File.readlines(COMMON_TMP.path)
|
642
|
-
while (tries -= 1) > 0 && log.grep(/done reopening logs/).size <
|
644
|
+
while (tries -= 1) > 0 && log.grep(/done reopening logs/).size < proc_total
|
643
645
|
sleep DEFAULT_RES
|
644
646
|
log = File.readlines(COMMON_TMP.path)
|
645
647
|
end
|
646
|
-
assert_equal
|
648
|
+
assert_equal proc_total, log.grep(/done reopening logs/).size
|
647
649
|
assert_equal 0, log.grep(/reopening logs\.\.\./).size
|
648
650
|
|
649
651
|
Process.kill(:QUIT, pid)
|
data/test/test_helper.rb
CHANGED
@@ -265,32 +265,6 @@ def wait_for_death(pid)
|
|
265
265
|
raise "PID:#{pid} never died!"
|
266
266
|
end
|
267
267
|
|
268
|
-
# executes +cmd+ and chunks its STDOUT
|
269
|
-
def chunked_spawn(stdout, *cmd)
|
270
|
-
fork {
|
271
|
-
crd, cwr = IO.pipe
|
272
|
-
crd.binmode
|
273
|
-
cwr.binmode
|
274
|
-
crd.sync = cwr.sync = true
|
275
|
-
|
276
|
-
pid = fork {
|
277
|
-
STDOUT.reopen(cwr)
|
278
|
-
crd.close
|
279
|
-
cwr.close
|
280
|
-
exec(*cmd)
|
281
|
-
}
|
282
|
-
cwr.close
|
283
|
-
begin
|
284
|
-
buf = crd.readpartial(16384)
|
285
|
-
stdout.write("#{'%x' % buf.size}\r\n#{buf}")
|
286
|
-
rescue EOFError
|
287
|
-
stdout.write("0\r\n")
|
288
|
-
pid, status = Process.waitpid(pid)
|
289
|
-
exit status.exitstatus
|
290
|
-
end while true
|
291
|
-
}
|
292
|
-
end
|
293
|
-
|
294
268
|
def reset_sig_handlers
|
295
269
|
%w(WINCH QUIT INT TERM USR1 USR2 HUP TTIN TTOU CHLD).each do |sig|
|
296
270
|
trap(sig, "DEFAULT")
|
@@ -11,6 +11,20 @@ def setup
|
|
11
11
|
@parser = HttpParser.new
|
12
12
|
end
|
13
13
|
|
14
|
+
# RFC 7230 allows gzip/deflate/compress Transfer-Encoding,
|
15
|
+
# but "chunked" must be last if used
|
16
|
+
def test_is_chunked
|
17
|
+
[ 'chunked,chunked', 'chunked,gzip', 'chunked,gzip,chunked' ].each do |x|
|
18
|
+
assert_raise(HttpParserError) { HttpParser.is_chunked?(x) }
|
19
|
+
end
|
20
|
+
[ 'gzip, chunked', 'gzip,chunked', 'gzip ,chunked' ].each do |x|
|
21
|
+
assert HttpParser.is_chunked?(x)
|
22
|
+
end
|
23
|
+
[ 'gzip', 'xhunked', 'xchunked' ].each do |x|
|
24
|
+
assert !HttpParser.is_chunked?(x)
|
25
|
+
end
|
26
|
+
end
|
27
|
+
|
14
28
|
def test_parser_max_len
|
15
29
|
assert_raises(RangeError) do
|
16
30
|
HttpParser.max_header_len = 0xffffffff + 1
|
@@ -566,6 +580,73 @@ def test_invalid_content_length
|
|
566
580
|
end
|
567
581
|
end
|
568
582
|
|
583
|
+
def test_duplicate_content_length
|
584
|
+
str = "PUT / HTTP/1.1\r\n" \
|
585
|
+
"Content-Length: 1\r\n" \
|
586
|
+
"Content-Length: 9\r\n" \
|
587
|
+
"\r\n"
|
588
|
+
assert_raises(HttpParserError) { @parser.headers({}, str) }
|
589
|
+
end
|
590
|
+
|
591
|
+
def test_chunked_overrides_content_length
|
592
|
+
order = [ 'Transfer-Encoding: chunked', 'Content-Length: 666' ]
|
593
|
+
%w(a b).each do |x|
|
594
|
+
str = "PUT /#{x} HTTP/1.1\r\n" \
|
595
|
+
"#{order.join("\r\n")}" \
|
596
|
+
"\r\n\r\na\r\nhelloworld\r\n0\r\n\r\n"
|
597
|
+
order.reverse!
|
598
|
+
env = @parser.headers({}, str)
|
599
|
+
assert_nil @parser.content_length
|
600
|
+
assert_equal 'chunked', env['HTTP_TRANSFER_ENCODING']
|
601
|
+
assert_equal '666', env['CONTENT_LENGTH'],
|
602
|
+
'Content-Length logged so the app can log a possible client bug/attack'
|
603
|
+
@parser.filter_body(dst = '', str)
|
604
|
+
assert_equal 'helloworld', dst
|
605
|
+
@parser.parse # handle the non-existent trailer
|
606
|
+
assert @parser.next?
|
607
|
+
end
|
608
|
+
end
|
609
|
+
|
610
|
+
def test_chunked_order_good
|
611
|
+
str = "PUT /x HTTP/1.1\r\n" \
|
612
|
+
"Transfer-Encoding: gzip\r\n" \
|
613
|
+
"Transfer-Encoding: chunked\r\n" \
|
614
|
+
"\r\n"
|
615
|
+
env = @parser.headers({}, str)
|
616
|
+
assert_equal 'gzip,chunked', env['HTTP_TRANSFER_ENCODING']
|
617
|
+
assert_nil @parser.content_length
|
618
|
+
|
619
|
+
@parser.clear
|
620
|
+
str = "PUT /x HTTP/1.1\r\n" \
|
621
|
+
"Transfer-Encoding: gzip, chunked\r\n" \
|
622
|
+
"\r\n"
|
623
|
+
env = @parser.headers({}, str)
|
624
|
+
assert_equal 'gzip, chunked', env['HTTP_TRANSFER_ENCODING']
|
625
|
+
assert_nil @parser.content_length
|
626
|
+
end
|
627
|
+
|
628
|
+
def test_chunked_order_bad
|
629
|
+
str = "PUT /x HTTP/1.1\r\n" \
|
630
|
+
"Transfer-Encoding: chunked\r\n" \
|
631
|
+
"Transfer-Encoding: gzip\r\n" \
|
632
|
+
"\r\n"
|
633
|
+
assert_raise(HttpParserError) { @parser.headers({}, str) }
|
634
|
+
end
|
635
|
+
|
636
|
+
def test_double_chunked
|
637
|
+
str = "PUT /x HTTP/1.1\r\n" \
|
638
|
+
"Transfer-Encoding: chunked\r\n" \
|
639
|
+
"Transfer-Encoding: chunked\r\n" \
|
640
|
+
"\r\n"
|
641
|
+
assert_raise(HttpParserError) { @parser.headers({}, str) }
|
642
|
+
|
643
|
+
@parser.clear
|
644
|
+
str = "PUT /x HTTP/1.1\r\n" \
|
645
|
+
"Transfer-Encoding: chunked,chunked\r\n" \
|
646
|
+
"\r\n"
|
647
|
+
assert_raise(HttpParserError) { @parser.headers({}, str) }
|
648
|
+
end
|
649
|
+
|
569
650
|
def test_backtrace_is_empty
|
570
651
|
begin
|
571
652
|
@parser.headers({}, "AAADFSFDSFD\r\n\r\n")
|