unicorn 1.0.2 → 1.1.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/Documentation/unicorn.1.txt +3 -0
- data/Documentation/unicorn_rails.1.txt +5 -2
- data/GIT-VERSION-GEN +1 -1
- data/GNUmakefile +2 -2
- data/Rakefile +3 -7
- data/ext/unicorn_http/unicorn_http.rl +1 -1
- data/lib/unicorn.rb +10 -13
- data/lib/unicorn/configurator.rb +466 -443
- data/lib/unicorn/const.rb +2 -2
- data/lib/unicorn/http_request.rb +1 -2
- data/lib/unicorn/http_response.rb +55 -60
- data/lib/unicorn/launcher.rb +5 -1
- data/lib/unicorn/socket_helper.rb +42 -24
- data/lib/unicorn/tee_input.rb +203 -195
- data/test/test_helper.rb +0 -1
- data/test/unit/test_socket_helper.rb +24 -0
- data/unicorn.gemspec +2 -2
- metadata +8 -11
- data/t/pid.ru +0 -3
- data/t/t0008-back_out_of_upgrade.sh +0 -110
- data/t/t0009-winch_ttin.sh +0 -59
- data/t/t0012-reload-empty-config.sh +0 -82
data/lib/unicorn/const.rb
CHANGED
@@ -8,8 +8,8 @@ module Unicorn
|
|
8
8
|
# Symbols did not really improve things much compared to constants.
|
9
9
|
module Const
|
10
10
|
|
11
|
-
# The current version of Unicorn, currently 1.0
|
12
|
-
UNICORN_VERSION="1.0
|
11
|
+
# The current version of Unicorn, currently 1.1.0
|
12
|
+
UNICORN_VERSION="1.1.0"
|
13
13
|
|
14
14
|
DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
|
15
15
|
DEFAULT_PORT = 8080 # default TCP listen port
|
data/lib/unicorn/http_request.rb
CHANGED
@@ -1,6 +1,5 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
|
3
|
-
require 'stringio'
|
4
3
|
require 'unicorn_http'
|
5
4
|
|
6
5
|
module Unicorn
|
@@ -53,7 +52,7 @@ module Unicorn
|
|
53
52
|
# that client may be a proxy, gateway, or other intermediary
|
54
53
|
# acting on behalf of the actual source client."
|
55
54
|
REQ[Const::REMOTE_ADDR] =
|
56
|
-
TCPSocket === socket ? socket.peeraddr
|
55
|
+
TCPSocket === socket ? socket.peeraddr[-1] : LOCALHOST
|
57
56
|
|
58
57
|
# short circuit the common case with small GET requests first
|
59
58
|
if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
|
@@ -1,75 +1,70 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
|
-
|
3
2
|
require 'time'
|
4
3
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
class HttpResponse
|
4
|
+
# Writes a Rack response to your client using the HTTP/1.1 specification.
|
5
|
+
# You use it by simply doing:
|
6
|
+
#
|
7
|
+
# status, headers, body = rack_app.call(env)
|
8
|
+
# HttpResponse.write(socket, [ status, headers, body ])
|
9
|
+
#
|
10
|
+
# Most header correctness (including Content-Length and Content-Type)
|
11
|
+
# is the job of Rack, with the exception of the "Connection: close"
|
12
|
+
# and "Date" headers.
|
13
|
+
#
|
14
|
+
# A design decision was made to force the client to not pipeline or
|
15
|
+
# keepalive requests. HTTP/1.1 pipelining really kills the
|
16
|
+
# performance due to how it has to be handled and how unclear the
|
17
|
+
# standard is. To fix this the HttpResponse always gives a
|
18
|
+
# "Connection: close" header which forces the client to close right
|
19
|
+
# away. The bonus for this is that it gives a pretty nice speed boost
|
20
|
+
# to most clients since they can close their connection immediately.
|
21
|
+
module Unicorn::HttpResponse
|
25
22
|
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
23
|
+
# Every standard HTTP code mapped to the appropriate message.
|
24
|
+
CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)|
|
25
|
+
hash[code] = "#{code} #{msg}"
|
26
|
+
hash
|
27
|
+
}
|
31
28
|
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
29
|
+
# Rack does not set/require a Date: header. We always override the
|
30
|
+
# Connection: and Date: headers no matter what (if anything) our
|
31
|
+
# Rack application sent us.
|
32
|
+
SKIP = { 'connection' => true, 'date' => true, 'status' => true }
|
36
33
|
|
37
|
-
|
38
|
-
|
39
|
-
|
34
|
+
# writes the rack_response to socket as an HTTP response
|
35
|
+
def self.write(socket, rack_response, have_header = true)
|
36
|
+
status, headers, body = rack_response
|
40
37
|
|
41
|
-
|
42
|
-
|
43
|
-
|
38
|
+
if have_header
|
39
|
+
status = CODES[status.to_i] || status
|
40
|
+
out = []
|
44
41
|
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
end
|
42
|
+
# Don't bother enforcing duplicate supression, it's a Hash most of
|
43
|
+
# the time anyways so just hope our app knows what it's doing
|
44
|
+
headers.each do |key, value|
|
45
|
+
next if SKIP.include?(key.downcase)
|
46
|
+
if value =~ /\n/
|
47
|
+
# avoiding blank, key-only cookies with /\n+/
|
48
|
+
out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
|
49
|
+
else
|
50
|
+
out << "#{key}: #{value}\r\n"
|
55
51
|
end
|
56
|
-
|
57
|
-
# Rack should enforce Content-Length or chunked transfer encoding,
|
58
|
-
# so don't worry or care about them.
|
59
|
-
# Date is required by HTTP/1.1 as long as our clock can be trusted.
|
60
|
-
# Some broken clients require a "Status" header so we accomodate them
|
61
|
-
socket.write("HTTP/1.1 #{status}\r\n" \
|
62
|
-
"Date: #{Time.now.httpdate}\r\n" \
|
63
|
-
"Status: #{status}\r\n" \
|
64
|
-
"Connection: close\r\n" \
|
65
|
-
"#{out.join('')}\r\n")
|
66
52
|
end
|
67
53
|
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
54
|
+
# Rack should enforce Content-Length or chunked transfer encoding,
|
55
|
+
# so don't worry or care about them.
|
56
|
+
# Date is required by HTTP/1.1 as long as our clock can be trusted.
|
57
|
+
# Some broken clients require a "Status" header so we accomodate them
|
58
|
+
socket.write("HTTP/1.1 #{status}\r\n" \
|
59
|
+
"Date: #{Time.now.httpdate}\r\n" \
|
60
|
+
"Status: #{status}\r\n" \
|
61
|
+
"Connection: close\r\n" \
|
62
|
+
"#{out.join('')}\r\n")
|
72
63
|
end
|
73
64
|
|
65
|
+
body.each { |chunk| socket.write(chunk) }
|
66
|
+
socket.close # flushes and uncorks the socket immediately
|
67
|
+
ensure
|
68
|
+
body.respond_to?(:close) and body.close
|
74
69
|
end
|
75
70
|
end
|
data/lib/unicorn/launcher.rb
CHANGED
@@ -24,7 +24,11 @@ module Unicorn::Launcher
|
|
24
24
|
|
25
25
|
# We only start a new process group if we're not being reexecuted
|
26
26
|
# and inheriting file descriptors from our parent
|
27
|
-
|
27
|
+
if ENV['UNICORN_FD']
|
28
|
+
exit if fork
|
29
|
+
Process.setsid
|
30
|
+
exit if fork
|
31
|
+
else
|
28
32
|
# grandparent - reads pipe, exits when master is ready
|
29
33
|
# \_ parent - exits immediately ASAP
|
30
34
|
# \_ unicorn master - writes to pipe when ready
|
@@ -1,11 +1,28 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
|
-
|
2
|
+
# :enddoc:
|
3
3
|
require 'socket'
|
4
4
|
|
5
5
|
module Unicorn
|
6
6
|
module SocketHelper
|
7
7
|
include Socket::Constants
|
8
8
|
|
9
|
+
# :stopdoc:
|
10
|
+
# internal interface, only used by Rainbows!/Zbatery
|
11
|
+
DEFAULTS = {
|
12
|
+
# The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+
|
13
|
+
# with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9
|
14
|
+
# This change shouldn't affect Unicorn users behind nginx (a
|
15
|
+
# value of 1 remains an optimization), but Rainbows! users may
|
16
|
+
# want to use a higher value on Linux 2.6.32+ to protect against
|
17
|
+
# denial-of-service attacks
|
18
|
+
:tcp_defer_accept => 1,
|
19
|
+
|
20
|
+
# FreeBSD, we need to override this to 'dataready' when we
|
21
|
+
# eventually get HTTPS support
|
22
|
+
:accept_filter => 'httpready',
|
23
|
+
}
|
24
|
+
#:startdoc:
|
25
|
+
|
9
26
|
# configure platform-specific options (only tested on Linux 2.6 so far)
|
10
27
|
case RUBY_PLATFORM
|
11
28
|
when /linux/
|
@@ -14,22 +31,13 @@ module Unicorn
|
|
14
31
|
|
15
32
|
# do not send out partial frames (Linux)
|
16
33
|
TCP_CORK = 3 unless defined?(TCP_CORK)
|
17
|
-
when /freebsd(([1-4]\..{1,2})|5\.[0-4])/
|
18
|
-
# Do nothing for httpready, just closing a bug when freebsd <= 5.4
|
19
|
-
TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) # :nodoc:
|
20
34
|
when /freebsd/
|
21
35
|
# do not send out partial frames (FreeBSD)
|
22
36
|
TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
|
23
37
|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
|
28
|
-
# set set the "httpready" accept filter in FreeBSD if available
|
29
|
-
# if other protocols are to be supported, this may be
|
30
|
-
# String#replace-d with "dataready" arguments instead
|
31
|
-
FILTER_ARG = ['httpready', nil].pack('a16a240')
|
32
|
-
end
|
38
|
+
def accf_arg(af_name)
|
39
|
+
[ af_name, nil ].pack('a16a240')
|
40
|
+
end if defined?(SO_ACCEPTFILTER)
|
33
41
|
end
|
34
42
|
|
35
43
|
def set_tcp_sockopt(sock, opt)
|
@@ -49,10 +57,25 @@ module Unicorn
|
|
49
57
|
end
|
50
58
|
|
51
59
|
# No good reason to ever have deferred accepts off
|
60
|
+
# (except maybe benchmarking)
|
52
61
|
if defined?(TCP_DEFER_ACCEPT)
|
53
|
-
|
54
|
-
|
55
|
-
|
62
|
+
# this differs from nginx, since nginx doesn't allow us to
|
63
|
+
# configure the the timeout...
|
64
|
+
tmp = DEFAULTS.merge(opt)
|
65
|
+
seconds = tmp[:tcp_defer_accept]
|
66
|
+
seconds = DEFAULTS[:tcp_defer_accept] if seconds == true
|
67
|
+
seconds = 0 unless seconds # nil/false means disable this
|
68
|
+
sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds)
|
69
|
+
elsif respond_to?(:accf_arg)
|
70
|
+
tmp = DEFAULTS.merge(opt)
|
71
|
+
if name = tmp[:accept_filter]
|
72
|
+
begin
|
73
|
+
sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
|
74
|
+
rescue => e
|
75
|
+
logger.error("#{sock_name(sock)} " \
|
76
|
+
"failed to set accept_filter=#{name} (#{e.inspect})")
|
77
|
+
end
|
78
|
+
end
|
56
79
|
end
|
57
80
|
end
|
58
81
|
|
@@ -69,14 +92,11 @@ module Unicorn
|
|
69
92
|
end
|
70
93
|
sock.listen(opt[:backlog] || 1024)
|
71
94
|
rescue => e
|
72
|
-
|
73
|
-
|
74
|
-
logger.error e.backtrace.join("\n")
|
75
|
-
end
|
95
|
+
logger.error "error setting socket options: #{e.inspect}"
|
96
|
+
logger.error e.backtrace.join("\n")
|
76
97
|
end
|
77
98
|
|
78
99
|
def log_buffer_sizes(sock, pfx = '')
|
79
|
-
respond_to?(:logger) or return
|
80
100
|
rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i')
|
81
101
|
sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i')
|
82
102
|
logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}"
|
@@ -91,9 +111,7 @@ module Unicorn
|
|
91
111
|
sock = if address[0] == ?/
|
92
112
|
if File.exist?(address)
|
93
113
|
if File.socket?(address)
|
94
|
-
|
95
|
-
logger.info "unlinking existing socket=#{address}"
|
96
|
-
end
|
114
|
+
logger.info "unlinking existing socket=#{address}"
|
97
115
|
File.unlink(address)
|
98
116
|
else
|
99
117
|
raise ArgumentError,
|
data/lib/unicorn/tee_input.rb
CHANGED
@@ -1,224 +1,232 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
3
|
+
# acts like tee(1) on an input input to provide a input-like stream
|
4
|
+
# while providing rewindable semantics through a File/StringIO backing
|
5
|
+
# store. On the first pass, the input is only read on demand so your
|
6
|
+
# Rack application can use input notification (upload progress and
|
7
|
+
# like). This should fully conform to the Rack::Lint::InputWrapper
|
8
|
+
# specification on the public API. This class is intended to be a
|
9
|
+
# strict interpretation of Rack::Lint::InputWrapper functionality and
|
10
|
+
# will not support any deviations from it.
|
11
|
+
#
|
12
|
+
# When processing uploads, Unicorn exposes a TeeInput object under
|
13
|
+
# "rack.input" of the Rack environment.
|
14
|
+
class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
|
15
|
+
:buf, :len, :tmp, :buf2)
|
16
|
+
|
17
|
+
# The maximum size (in +bytes+) to buffer in memory before
|
18
|
+
# resorting to a temporary file. Default is 112 kilobytes.
|
19
|
+
@@client_body_buffer_size = Unicorn::Const::MAX_BODY
|
20
|
+
|
21
|
+
# The I/O chunk size (in +bytes+) for I/O operations where
|
22
|
+
# the size cannot be user-specified when a method is called.
|
23
|
+
# The default is 16 kilobytes.
|
24
|
+
@@io_chunk_size = Unicorn::Const::CHUNK_SIZE
|
25
|
+
|
26
|
+
# Initializes a new TeeInput object. You normally do not have to call
|
27
|
+
# this unless you are writing an HTTP server.
|
28
|
+
def initialize(*args)
|
29
|
+
super(*args)
|
30
|
+
self.len = parser.content_length
|
31
|
+
self.tmp = len && len < @@client_body_buffer_size ?
|
32
|
+
StringIO.new("") : Unicorn::Util.tmpio
|
33
|
+
self.buf2 = ""
|
34
|
+
if buf.size > 0
|
35
|
+
parser.filter_body(buf2, buf) and finalize_input
|
36
|
+
tmp.write(buf2)
|
37
|
+
tmp.rewind
|
30
38
|
end
|
39
|
+
end
|
31
40
|
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
end
|
55
|
-
tmp.seek(pos)
|
41
|
+
# :call-seq:
|
42
|
+
# ios.size => Integer
|
43
|
+
#
|
44
|
+
# Returns the size of the input. For requests with a Content-Length
|
45
|
+
# header value, this will not read data off the socket and just return
|
46
|
+
# the value of the Content-Length header as an Integer.
|
47
|
+
#
|
48
|
+
# For Transfer-Encoding:chunked requests, this requires consuming
|
49
|
+
# all of the input stream before returning since there's no other
|
50
|
+
# way to determine the size of the request body beforehand.
|
51
|
+
#
|
52
|
+
# This method is no longer part of the Rack specification as of
|
53
|
+
# Rack 1.2, so its use is not recommended. This method only exists
|
54
|
+
# for compatibility with Rack applications designed for Rack 1.1 and
|
55
|
+
# earlier. Most applications should only need to call +read+ with a
|
56
|
+
# specified +length+ in a loop until it returns +nil+.
|
57
|
+
def size
|
58
|
+
len and return len
|
59
|
+
|
60
|
+
if socket
|
61
|
+
pos = tmp.pos
|
62
|
+
while tee(@@io_chunk_size, buf2)
|
56
63
|
end
|
57
|
-
|
58
|
-
self.len = tmp.size
|
64
|
+
tmp.seek(pos)
|
59
65
|
end
|
60
66
|
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
80
|
-
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
67
|
+
self.len = tmp.size
|
68
|
+
end
|
69
|
+
|
70
|
+
# :call-seq:
|
71
|
+
# ios.read([length [, buffer ]]) => string, buffer, or nil
|
72
|
+
#
|
73
|
+
# Reads at most length bytes from the I/O stream, or to the end of
|
74
|
+
# file if length is omitted or is nil. length must be a non-negative
|
75
|
+
# integer or nil. If the optional buffer argument is present, it
|
76
|
+
# must reference a String, which will receive the data.
|
77
|
+
#
|
78
|
+
# At end of file, it returns nil or "" depend on length.
|
79
|
+
# ios.read() and ios.read(nil) returns "".
|
80
|
+
# ios.read(length [, buffer]) returns nil.
|
81
|
+
#
|
82
|
+
# If the Content-Length of the HTTP request is known (as is the common
|
83
|
+
# case for POST requests), then ios.read(length [, buffer]) will block
|
84
|
+
# until the specified length is read (or it is the last chunk).
|
85
|
+
# Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
|
86
|
+
# ios.read(length [, buffer]) will return immediately if there is
|
87
|
+
# any data and only block when nothing is available (providing
|
88
|
+
# IO#readpartial semantics).
|
89
|
+
def read(*args)
|
90
|
+
socket or return tmp.read(*args)
|
91
|
+
|
92
|
+
length = args.shift
|
93
|
+
if nil == length
|
94
|
+
rv = tmp.read || ""
|
95
|
+
while tee(@@io_chunk_size, buf2)
|
96
|
+
rv << buf2
|
97
|
+
end
|
98
|
+
rv
|
99
|
+
else
|
100
|
+
rv = args.shift || ""
|
101
|
+
diff = tmp.size - tmp.pos
|
102
|
+
if 0 == diff
|
103
|
+
ensure_length(tee(length, rv), length)
|
90
104
|
else
|
91
|
-
|
92
|
-
diff = tmp.size - tmp.pos
|
93
|
-
if 0 == diff
|
94
|
-
ensure_length(tee(length, rv), length)
|
95
|
-
else
|
96
|
-
ensure_length(tmp.read(diff > length ? length : diff, rv), length)
|
97
|
-
end
|
105
|
+
ensure_length(tmp.read(diff > length ? length : diff, rv), length)
|
98
106
|
end
|
99
107
|
end
|
108
|
+
end
|
100
109
|
|
101
|
-
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
|
110
|
+
# :call-seq:
|
111
|
+
# ios.gets => string or nil
|
112
|
+
#
|
113
|
+
# Reads the next ``line'' from the I/O stream; lines are separated
|
114
|
+
# by the global record separator ($/, typically "\n"). A global
|
115
|
+
# record separator of nil reads the entire unread contents of ios.
|
116
|
+
# Returns nil if called at the end of file.
|
117
|
+
# This takes zero arguments for strict Rack::Lint compatibility,
|
118
|
+
# unlike IO#gets.
|
119
|
+
def gets
|
120
|
+
socket or return tmp.gets
|
121
|
+
sep = $/ or return read
|
122
|
+
|
123
|
+
orig_size = tmp.size
|
124
|
+
if tmp.pos == orig_size
|
125
|
+
tee(@@io_chunk_size, buf2) or return nil
|
126
|
+
tmp.seek(orig_size)
|
127
|
+
end
|
119
128
|
|
120
|
-
|
121
|
-
|
122
|
-
|
129
|
+
sep_size = Rack::Utils.bytesize(sep)
|
130
|
+
line = tmp.gets # cannot be nil here since size > pos
|
131
|
+
sep == line[-sep_size, sep_size] and return line
|
123
132
|
|
124
|
-
|
125
|
-
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
133
|
-
|
134
|
-
line
|
135
|
-
end
|
133
|
+
# unlikely, if we got here, then tmp is at EOF
|
134
|
+
begin
|
135
|
+
orig_size = tmp.pos
|
136
|
+
tee(@@io_chunk_size, buf2) or break
|
137
|
+
tmp.seek(orig_size)
|
138
|
+
line << tmp.gets
|
139
|
+
sep == line[-sep_size, sep_size] and return line
|
140
|
+
# tmp is at EOF again here, retry the loop
|
141
|
+
end while true
|
136
142
|
|
137
|
-
|
138
|
-
|
139
|
-
#
|
140
|
-
# Executes the block for every ``line'' in *ios*, where lines are
|
141
|
-
# separated by the global record separator ($/, typically "\n").
|
142
|
-
def each(&block)
|
143
|
-
while line = gets
|
144
|
-
yield line
|
145
|
-
end
|
143
|
+
line
|
144
|
+
end
|
146
145
|
|
147
|
-
|
146
|
+
# :call-seq:
|
147
|
+
# ios.each { |line| block } => ios
|
148
|
+
#
|
149
|
+
# Executes the block for every ``line'' in *ios*, where lines are
|
150
|
+
# separated by the global record separator ($/, typically "\n").
|
151
|
+
def each(&block)
|
152
|
+
while line = gets
|
153
|
+
yield line
|
148
154
|
end
|
149
155
|
|
150
|
-
#
|
151
|
-
|
152
|
-
#
|
153
|
-
# Positions the *ios* pointer to the beginning of input, returns
|
154
|
-
# the offset (zero) of the +ios+ pointer. Subsequent reads will
|
155
|
-
# start from the beginning of the previously-buffered input.
|
156
|
-
def rewind
|
157
|
-
tmp.rewind # Rack does not specify what the return value is here
|
158
|
-
end
|
156
|
+
self # Rack does not specify what the return value is here
|
157
|
+
end
|
159
158
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
|
168
|
-
|
169
|
-
socket.close if socket
|
170
|
-
raise ClientShutdown, "bytes_read=#{tmp.size}", []
|
171
|
-
when HttpParserError
|
172
|
-
e.set_backtrace([])
|
173
|
-
end
|
174
|
-
raise e
|
175
|
-
end
|
159
|
+
# :call-seq:
|
160
|
+
# ios.rewind => 0
|
161
|
+
#
|
162
|
+
# Positions the *ios* pointer to the beginning of input, returns
|
163
|
+
# the offset (zero) of the +ios+ pointer. Subsequent reads will
|
164
|
+
# start from the beginning of the previously-buffered input.
|
165
|
+
def rewind
|
166
|
+
tmp.rewind # Rack does not specify what the return value is here
|
167
|
+
end
|
176
168
|
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
|
183
|
-
|
184
|
-
|
185
|
-
|
186
|
-
|
187
|
-
|
188
|
-
|
189
|
-
|
190
|
-
client_error(e)
|
169
|
+
private
|
170
|
+
|
171
|
+
def client_error(e)
|
172
|
+
case e
|
173
|
+
when EOFError
|
174
|
+
# in case client only did a premature shutdown(SHUT_WR)
|
175
|
+
# we do support clients that shutdown(SHUT_WR) after the
|
176
|
+
# _entire_ request has been sent, and those will not have
|
177
|
+
# raised EOFError on us.
|
178
|
+
socket.close if socket
|
179
|
+
raise ClientShutdown, "bytes_read=#{tmp.size}", []
|
180
|
+
when HttpParserError
|
181
|
+
e.set_backtrace([])
|
191
182
|
end
|
183
|
+
raise e
|
184
|
+
end
|
192
185
|
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
|
199
|
-
|
186
|
+
# tees off a +length+ chunk of data from the input into the IO
|
187
|
+
# backing store as well as returning it. +dst+ must be specified.
|
188
|
+
# returns nil if reading from the input returns nil
|
189
|
+
def tee(length, dst)
|
190
|
+
unless parser.body_eof?
|
191
|
+
if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
|
192
|
+
tmp.write(dst)
|
193
|
+
tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
|
194
|
+
return dst
|
200
195
|
end
|
201
|
-
self.socket = nil
|
202
196
|
end
|
197
|
+
finalize_input
|
198
|
+
rescue => e
|
199
|
+
client_error(e)
|
200
|
+
end
|
203
201
|
|
204
|
-
|
205
|
-
|
206
|
-
|
207
|
-
|
208
|
-
|
209
|
-
|
210
|
-
|
211
|
-
|
212
|
-
|
213
|
-
|
214
|
-
return dst if dst.nil? || len.nil?
|
215
|
-
|
216
|
-
while dst.size < length && tee(length - dst.size, buf2)
|
217
|
-
dst << buf2
|
218
|
-
end
|
202
|
+
def finalize_input
|
203
|
+
while parser.trailers(req, buf).nil?
|
204
|
+
# Don't worry about raising ClientShutdown here on EOFError, tee()
|
205
|
+
# will catch EOFError when app is processing it, otherwise in
|
206
|
+
# initialize we never get any chance to enter the app so the
|
207
|
+
# EOFError will just get trapped by Unicorn and not the Rack app
|
208
|
+
buf << socket.readpartial(@@io_chunk_size)
|
209
|
+
end
|
210
|
+
self.socket = nil
|
211
|
+
end
|
219
212
|
|
220
|
-
|
213
|
+
# tee()s into +dst+ until it is of +length+ bytes (or until
|
214
|
+
# we've reached the Content-Length of the request body).
|
215
|
+
# Returns +dst+ (the exact object, not a duplicate)
|
216
|
+
# To continue supporting applications that need near-real-time
|
217
|
+
# streaming input bodies, this is a no-op for
|
218
|
+
# "Transfer-Encoding: chunked" requests.
|
219
|
+
def ensure_length(dst, length)
|
220
|
+
# len is nil for chunked bodies, so we can't ensure length for those
|
221
|
+
# since they could be streaming bidirectionally and we don't want to
|
222
|
+
# block the caller in that case.
|
223
|
+
return dst if dst.nil? || len.nil?
|
224
|
+
|
225
|
+
while dst.size < length && tee(length - dst.size, buf2)
|
226
|
+
dst << buf2
|
221
227
|
end
|
222
228
|
|
229
|
+
dst
|
223
230
|
end
|
231
|
+
|
224
232
|
end
|