unicorn 1.0.2 → 1.1.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -8,8 +8,8 @@ module Unicorn
8
8
  # Symbols did not really improve things much compared to constants.
9
9
  module Const
10
10
 
11
- # The current version of Unicorn, currently 1.0.2
12
- UNICORN_VERSION="1.0.2"
11
+ # The current version of Unicorn, currently 1.1.0
12
+ UNICORN_VERSION="1.1.0"
13
13
 
14
14
  DEFAULT_HOST = "0.0.0.0" # default TCP listen host address
15
15
  DEFAULT_PORT = 8080 # default TCP listen port
@@ -1,6 +1,5 @@
1
1
  # -*- encoding: binary -*-
2
2
 
3
- require 'stringio'
4
3
  require 'unicorn_http'
5
4
 
6
5
  module Unicorn
@@ -53,7 +52,7 @@ module Unicorn
53
52
  # that client may be a proxy, gateway, or other intermediary
54
53
  # acting on behalf of the actual source client."
55
54
  REQ[Const::REMOTE_ADDR] =
56
- TCPSocket === socket ? socket.peeraddr.last : LOCALHOST
55
+ TCPSocket === socket ? socket.peeraddr[-1] : LOCALHOST
57
56
 
58
57
  # short circuit the common case with small GET requests first
59
58
  if PARSER.headers(REQ, socket.readpartial(Const::CHUNK_SIZE, BUF)).nil?
@@ -1,75 +1,70 @@
1
1
  # -*- encoding: binary -*-
2
-
3
2
  require 'time'
4
3
 
5
- module Unicorn
6
- # Writes a Rack response to your client using the HTTP/1.1 specification.
7
- # You use it by simply doing:
8
- #
9
- # status, headers, body = rack_app.call(env)
10
- # HttpResponse.write(socket, [ status, headers, body ])
11
- #
12
- # Most header correctness (including Content-Length and Content-Type)
13
- # is the job of Rack, with the exception of the "Connection: close"
14
- # and "Date" headers.
15
- #
16
- # A design decision was made to force the client to not pipeline or
17
- # keepalive requests. HTTP/1.1 pipelining really kills the
18
- # performance due to how it has to be handled and how unclear the
19
- # standard is. To fix this the HttpResponse always gives a
20
- # "Connection: close" header which forces the client to close right
21
- # away. The bonus for this is that it gives a pretty nice speed boost
22
- # to most clients since they can close their connection immediately.
23
-
24
- class HttpResponse
4
+ # Writes a Rack response to your client using the HTTP/1.1 specification.
5
+ # You use it by simply doing:
6
+ #
7
+ # status, headers, body = rack_app.call(env)
8
+ # HttpResponse.write(socket, [ status, headers, body ])
9
+ #
10
+ # Most header correctness (including Content-Length and Content-Type)
11
+ # is the job of Rack, with the exception of the "Connection: close"
12
+ # and "Date" headers.
13
+ #
14
+ # A design decision was made to force the client to not pipeline or
15
+ # keepalive requests. HTTP/1.1 pipelining really kills the
16
+ # performance due to how it has to be handled and how unclear the
17
+ # standard is. To fix this the HttpResponse always gives a
18
+ # "Connection: close" header which forces the client to close right
19
+ # away. The bonus for this is that it gives a pretty nice speed boost
20
+ # to most clients since they can close their connection immediately.
21
+ module Unicorn::HttpResponse
25
22
 
26
- # Every standard HTTP code mapped to the appropriate message.
27
- CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)|
28
- hash[code] = "#{code} #{msg}"
29
- hash
30
- }
23
+ # Every standard HTTP code mapped to the appropriate message.
24
+ CODES = Rack::Utils::HTTP_STATUS_CODES.inject({}) { |hash,(code,msg)|
25
+ hash[code] = "#{code} #{msg}"
26
+ hash
27
+ }
31
28
 
32
- # Rack does not set/require a Date: header. We always override the
33
- # Connection: and Date: headers no matter what (if anything) our
34
- # Rack application sent us.
35
- SKIP = { 'connection' => true, 'date' => true, 'status' => true }
29
+ # Rack does not set/require a Date: header. We always override the
30
+ # Connection: and Date: headers no matter what (if anything) our
31
+ # Rack application sent us.
32
+ SKIP = { 'connection' => true, 'date' => true, 'status' => true }
36
33
 
37
- # writes the rack_response to socket as an HTTP response
38
- def self.write(socket, rack_response, have_header = true)
39
- status, headers, body = rack_response
34
+ # writes the rack_response to socket as an HTTP response
35
+ def self.write(socket, rack_response, have_header = true)
36
+ status, headers, body = rack_response
40
37
 
41
- if have_header
42
- status = CODES[status.to_i] || status
43
- out = []
38
+ if have_header
39
+ status = CODES[status.to_i] || status
40
+ out = []
44
41
 
45
- # Don't bother enforcing duplicate supression, it's a Hash most of
46
- # the time anyways so just hope our app knows what it's doing
47
- headers.each do |key, value|
48
- next if SKIP.include?(key.downcase)
49
- if value =~ /\n/
50
- # avoiding blank, key-only cookies with /\n+/
51
- out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
52
- else
53
- out << "#{key}: #{value}\r\n"
54
- end
42
+ # Don't bother enforcing duplicate supression, it's a Hash most of
43
+ # the time anyways so just hope our app knows what it's doing
44
+ headers.each do |key, value|
45
+ next if SKIP.include?(key.downcase)
46
+ if value =~ /\n/
47
+ # avoiding blank, key-only cookies with /\n+/
48
+ out.concat(value.split(/\n+/).map! { |v| "#{key}: #{v}\r\n" })
49
+ else
50
+ out << "#{key}: #{value}\r\n"
55
51
  end
56
-
57
- # Rack should enforce Content-Length or chunked transfer encoding,
58
- # so don't worry or care about them.
59
- # Date is required by HTTP/1.1 as long as our clock can be trusted.
60
- # Some broken clients require a "Status" header so we accomodate them
61
- socket.write("HTTP/1.1 #{status}\r\n" \
62
- "Date: #{Time.now.httpdate}\r\n" \
63
- "Status: #{status}\r\n" \
64
- "Connection: close\r\n" \
65
- "#{out.join('')}\r\n")
66
52
  end
67
53
 
68
- body.each { |chunk| socket.write(chunk) }
69
- socket.close # flushes and uncorks the socket immediately
70
- ensure
71
- body.respond_to?(:close) and body.close
54
+ # Rack should enforce Content-Length or chunked transfer encoding,
55
+ # so don't worry or care about them.
56
+ # Date is required by HTTP/1.1 as long as our clock can be trusted.
57
+ # Some broken clients require a "Status" header so we accomodate them
58
+ socket.write("HTTP/1.1 #{status}\r\n" \
59
+ "Date: #{Time.now.httpdate}\r\n" \
60
+ "Status: #{status}\r\n" \
61
+ "Connection: close\r\n" \
62
+ "#{out.join('')}\r\n")
72
63
  end
73
64
 
65
+ body.each { |chunk| socket.write(chunk) }
66
+ socket.close # flushes and uncorks the socket immediately
67
+ ensure
68
+ body.respond_to?(:close) and body.close
74
69
  end
75
70
  end
@@ -24,7 +24,11 @@ module Unicorn::Launcher
24
24
 
25
25
  # We only start a new process group if we're not being reexecuted
26
26
  # and inheriting file descriptors from our parent
27
- unless ENV['UNICORN_FD']
27
+ if ENV['UNICORN_FD']
28
+ exit if fork
29
+ Process.setsid
30
+ exit if fork
31
+ else
28
32
  # grandparent - reads pipe, exits when master is ready
29
33
  # \_ parent - exits immediately ASAP
30
34
  # \_ unicorn master - writes to pipe when ready
@@ -1,11 +1,28 @@
1
1
  # -*- encoding: binary -*-
2
-
2
+ # :enddoc:
3
3
  require 'socket'
4
4
 
5
5
  module Unicorn
6
6
  module SocketHelper
7
7
  include Socket::Constants
8
8
 
9
+ # :stopdoc:
10
+ # internal interface, only used by Rainbows!/Zbatery
11
+ DEFAULTS = {
12
+ # The semantics for TCP_DEFER_ACCEPT changed in Linux 2.6.32+
13
+ # with commit d1b99ba41d6c5aa1ed2fc634323449dd656899e9
14
+ # This change shouldn't affect Unicorn users behind nginx (a
15
+ # value of 1 remains an optimization), but Rainbows! users may
16
+ # want to use a higher value on Linux 2.6.32+ to protect against
17
+ # denial-of-service attacks
18
+ :tcp_defer_accept => 1,
19
+
20
+ # FreeBSD, we need to override this to 'dataready' when we
21
+ # eventually get HTTPS support
22
+ :accept_filter => 'httpready',
23
+ }
24
+ #:startdoc:
25
+
9
26
  # configure platform-specific options (only tested on Linux 2.6 so far)
10
27
  case RUBY_PLATFORM
11
28
  when /linux/
@@ -14,22 +31,13 @@ module Unicorn
14
31
 
15
32
  # do not send out partial frames (Linux)
16
33
  TCP_CORK = 3 unless defined?(TCP_CORK)
17
- when /freebsd(([1-4]\..{1,2})|5\.[0-4])/
18
- # Do nothing for httpready, just closing a bug when freebsd <= 5.4
19
- TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH) # :nodoc:
20
34
  when /freebsd/
21
35
  # do not send out partial frames (FreeBSD)
22
36
  TCP_NOPUSH = 4 unless defined?(TCP_NOPUSH)
23
37
 
24
- # Use the HTTP accept filter if available.
25
- # The struct made by pack() is defined in /usr/include/sys/socket.h
26
- # as accept_filter_arg
27
- unless `/sbin/sysctl -nq net.inet.accf.http`.empty?
28
- # set set the "httpready" accept filter in FreeBSD if available
29
- # if other protocols are to be supported, this may be
30
- # String#replace-d with "dataready" arguments instead
31
- FILTER_ARG = ['httpready', nil].pack('a16a240')
32
- end
38
+ def accf_arg(af_name)
39
+ [ af_name, nil ].pack('a16a240')
40
+ end if defined?(SO_ACCEPTFILTER)
33
41
  end
34
42
 
35
43
  def set_tcp_sockopt(sock, opt)
@@ -49,10 +57,25 @@ module Unicorn
49
57
  end
50
58
 
51
59
  # No good reason to ever have deferred accepts off
60
+ # (except maybe benchmarking)
52
61
  if defined?(TCP_DEFER_ACCEPT)
53
- sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, 1)
54
- elsif defined?(SO_ACCEPTFILTER) && defined?(FILTER_ARG)
55
- sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, FILTER_ARG)
62
+ # this differs from nginx, since nginx doesn't allow us to
63
+ # configure the the timeout...
64
+ tmp = DEFAULTS.merge(opt)
65
+ seconds = tmp[:tcp_defer_accept]
66
+ seconds = DEFAULTS[:tcp_defer_accept] if seconds == true
67
+ seconds = 0 unless seconds # nil/false means disable this
68
+ sock.setsockopt(SOL_TCP, TCP_DEFER_ACCEPT, seconds)
69
+ elsif respond_to?(:accf_arg)
70
+ tmp = DEFAULTS.merge(opt)
71
+ if name = tmp[:accept_filter]
72
+ begin
73
+ sock.setsockopt(SOL_SOCKET, SO_ACCEPTFILTER, accf_arg(name))
74
+ rescue => e
75
+ logger.error("#{sock_name(sock)} " \
76
+ "failed to set accept_filter=#{name} (#{e.inspect})")
77
+ end
78
+ end
56
79
  end
57
80
  end
58
81
 
@@ -69,14 +92,11 @@ module Unicorn
69
92
  end
70
93
  sock.listen(opt[:backlog] || 1024)
71
94
  rescue => e
72
- if respond_to?(:logger)
73
- logger.error "error setting socket options: #{e.inspect}"
74
- logger.error e.backtrace.join("\n")
75
- end
95
+ logger.error "error setting socket options: #{e.inspect}"
96
+ logger.error e.backtrace.join("\n")
76
97
  end
77
98
 
78
99
  def log_buffer_sizes(sock, pfx = '')
79
- respond_to?(:logger) or return
80
100
  rcvbuf = sock.getsockopt(SOL_SOCKET, SO_RCVBUF).unpack('i')
81
101
  sndbuf = sock.getsockopt(SOL_SOCKET, SO_SNDBUF).unpack('i')
82
102
  logger.info "#{pfx}#{sock_name(sock)} rcvbuf=#{rcvbuf} sndbuf=#{sndbuf}"
@@ -91,9 +111,7 @@ module Unicorn
91
111
  sock = if address[0] == ?/
92
112
  if File.exist?(address)
93
113
  if File.socket?(address)
94
- if self.respond_to?(:logger)
95
- logger.info "unlinking existing socket=#{address}"
96
- end
114
+ logger.info "unlinking existing socket=#{address}"
97
115
  File.unlink(address)
98
116
  else
99
117
  raise ArgumentError,
@@ -1,224 +1,232 @@
1
1
  # -*- encoding: binary -*-
2
2
 
3
- module Unicorn
4
-
5
- # acts like tee(1) on an input input to provide a input-like stream
6
- # while providing rewindable semantics through a File/StringIO backing
7
- # store. On the first pass, the input is only read on demand so your
8
- # Rack application can use input notification (upload progress and
9
- # like). This should fully conform to the Rack::Lint::InputWrapper
10
- # specification on the public API. This class is intended to be a
11
- # strict interpretation of Rack::Lint::InputWrapper functionality and
12
- # will not support any deviations from it.
13
- #
14
- # When processing uploads, Unicorn exposes a TeeInput object under
15
- # "rack.input" of the Rack environment.
16
- class TeeInput < Struct.new(:socket, :req, :parser, :buf, :len, :tmp, :buf2)
17
-
18
- # Initializes a new TeeInput object. You normally do not have to call
19
- # this unless you are writing an HTTP server.
20
- def initialize(*args)
21
- super(*args)
22
- self.len = parser.content_length
23
- self.tmp = len && len < Const::MAX_BODY ? StringIO.new("") : Util.tmpio
24
- self.buf2 = ""
25
- if buf.size > 0
26
- parser.filter_body(buf2, buf) and finalize_input
27
- tmp.write(buf2)
28
- tmp.seek(0)
29
- end
3
+ # acts like tee(1) on an input input to provide a input-like stream
4
+ # while providing rewindable semantics through a File/StringIO backing
5
+ # store. On the first pass, the input is only read on demand so your
6
+ # Rack application can use input notification (upload progress and
7
+ # like). This should fully conform to the Rack::Lint::InputWrapper
8
+ # specification on the public API. This class is intended to be a
9
+ # strict interpretation of Rack::Lint::InputWrapper functionality and
10
+ # will not support any deviations from it.
11
+ #
12
+ # When processing uploads, Unicorn exposes a TeeInput object under
13
+ # "rack.input" of the Rack environment.
14
+ class Unicorn::TeeInput < Struct.new(:socket, :req, :parser,
15
+ :buf, :len, :tmp, :buf2)
16
+
17
+ # The maximum size (in +bytes+) to buffer in memory before
18
+ # resorting to a temporary file. Default is 112 kilobytes.
19
+ @@client_body_buffer_size = Unicorn::Const::MAX_BODY
20
+
21
+ # The I/O chunk size (in +bytes+) for I/O operations where
22
+ # the size cannot be user-specified when a method is called.
23
+ # The default is 16 kilobytes.
24
+ @@io_chunk_size = Unicorn::Const::CHUNK_SIZE
25
+
26
+ # Initializes a new TeeInput object. You normally do not have to call
27
+ # this unless you are writing an HTTP server.
28
+ def initialize(*args)
29
+ super(*args)
30
+ self.len = parser.content_length
31
+ self.tmp = len && len < @@client_body_buffer_size ?
32
+ StringIO.new("") : Unicorn::Util.tmpio
33
+ self.buf2 = ""
34
+ if buf.size > 0
35
+ parser.filter_body(buf2, buf) and finalize_input
36
+ tmp.write(buf2)
37
+ tmp.rewind
30
38
  end
39
+ end
31
40
 
32
- # :call-seq:
33
- # ios.size => Integer
34
- #
35
- # Returns the size of the input. For requests with a Content-Length
36
- # header value, this will not read data off the socket and just return
37
- # the value of the Content-Length header as an Integer.
38
- #
39
- # For Transfer-Encoding:chunked requests, this requires consuming
40
- # all of the input stream before returning since there's no other
41
- # way to determine the size of the request body beforehand.
42
- #
43
- # This method is no longer part of the Rack specification as of
44
- # Rack 1.2, so its use is not recommended. This method only exists
45
- # for compatibility with Rack applications designed for Rack 1.1 and
46
- # earlier. Most applications should only need to call +read+ with a
47
- # specified +length+ in a loop until it returns +nil+.
48
- def size
49
- len and return len
50
-
51
- if socket
52
- pos = tmp.pos
53
- while tee(Const::CHUNK_SIZE, buf2)
54
- end
55
- tmp.seek(pos)
41
+ # :call-seq:
42
+ # ios.size => Integer
43
+ #
44
+ # Returns the size of the input. For requests with a Content-Length
45
+ # header value, this will not read data off the socket and just return
46
+ # the value of the Content-Length header as an Integer.
47
+ #
48
+ # For Transfer-Encoding:chunked requests, this requires consuming
49
+ # all of the input stream before returning since there's no other
50
+ # way to determine the size of the request body beforehand.
51
+ #
52
+ # This method is no longer part of the Rack specification as of
53
+ # Rack 1.2, so its use is not recommended. This method only exists
54
+ # for compatibility with Rack applications designed for Rack 1.1 and
55
+ # earlier. Most applications should only need to call +read+ with a
56
+ # specified +length+ in a loop until it returns +nil+.
57
+ def size
58
+ len and return len
59
+
60
+ if socket
61
+ pos = tmp.pos
62
+ while tee(@@io_chunk_size, buf2)
56
63
  end
57
-
58
- self.len = tmp.size
64
+ tmp.seek(pos)
59
65
  end
60
66
 
61
- # :call-seq:
62
- # ios.read([length [, buffer ]]) => string, buffer, or nil
63
- #
64
- # Reads at most length bytes from the I/O stream, or to the end of
65
- # file if length is omitted or is nil. length must be a non-negative
66
- # integer or nil. If the optional buffer argument is present, it
67
- # must reference a String, which will receive the data.
68
- #
69
- # At end of file, it returns nil or "" depend on length.
70
- # ios.read() and ios.read(nil) returns "".
71
- # ios.read(length [, buffer]) returns nil.
72
- #
73
- # If the Content-Length of the HTTP request is known (as is the common
74
- # case for POST requests), then ios.read(length [, buffer]) will block
75
- # until the specified length is read (or it is the last chunk).
76
- # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
77
- # ios.read(length [, buffer]) will return immediately if there is
78
- # any data and only block when nothing is available (providing
79
- # IO#readpartial semantics).
80
- def read(*args)
81
- socket or return tmp.read(*args)
82
-
83
- length = args.shift
84
- if nil == length
85
- rv = tmp.read || ""
86
- while tee(Const::CHUNK_SIZE, buf2)
87
- rv << buf2
88
- end
89
- rv
67
+ self.len = tmp.size
68
+ end
69
+
70
+ # :call-seq:
71
+ # ios.read([length [, buffer ]]) => string, buffer, or nil
72
+ #
73
+ # Reads at most length bytes from the I/O stream, or to the end of
74
+ # file if length is omitted or is nil. length must be a non-negative
75
+ # integer or nil. If the optional buffer argument is present, it
76
+ # must reference a String, which will receive the data.
77
+ #
78
+ # At end of file, it returns nil or "" depend on length.
79
+ # ios.read() and ios.read(nil) returns "".
80
+ # ios.read(length [, buffer]) returns nil.
81
+ #
82
+ # If the Content-Length of the HTTP request is known (as is the common
83
+ # case for POST requests), then ios.read(length [, buffer]) will block
84
+ # until the specified length is read (or it is the last chunk).
85
+ # Otherwise, for uncommon "Transfer-Encoding: chunked" requests,
86
+ # ios.read(length [, buffer]) will return immediately if there is
87
+ # any data and only block when nothing is available (providing
88
+ # IO#readpartial semantics).
89
+ def read(*args)
90
+ socket or return tmp.read(*args)
91
+
92
+ length = args.shift
93
+ if nil == length
94
+ rv = tmp.read || ""
95
+ while tee(@@io_chunk_size, buf2)
96
+ rv << buf2
97
+ end
98
+ rv
99
+ else
100
+ rv = args.shift || ""
101
+ diff = tmp.size - tmp.pos
102
+ if 0 == diff
103
+ ensure_length(tee(length, rv), length)
90
104
  else
91
- rv = args.shift || ""
92
- diff = tmp.size - tmp.pos
93
- if 0 == diff
94
- ensure_length(tee(length, rv), length)
95
- else
96
- ensure_length(tmp.read(diff > length ? length : diff, rv), length)
97
- end
105
+ ensure_length(tmp.read(diff > length ? length : diff, rv), length)
98
106
  end
99
107
  end
108
+ end
100
109
 
101
- # :call-seq:
102
- # ios.gets => string or nil
103
- #
104
- # Reads the next ``line'' from the I/O stream; lines are separated
105
- # by the global record separator ($/, typically "\n"). A global
106
- # record separator of nil reads the entire unread contents of ios.
107
- # Returns nil if called at the end of file.
108
- # This takes zero arguments for strict Rack::Lint compatibility,
109
- # unlike IO#gets.
110
- def gets
111
- socket or return tmp.gets
112
- sep = $/ or return read
113
-
114
- orig_size = tmp.size
115
- if tmp.pos == orig_size
116
- tee(Const::CHUNK_SIZE, buf2) or return nil
117
- tmp.seek(orig_size)
118
- end
110
+ # :call-seq:
111
+ # ios.gets => string or nil
112
+ #
113
+ # Reads the next ``line'' from the I/O stream; lines are separated
114
+ # by the global record separator ($/, typically "\n"). A global
115
+ # record separator of nil reads the entire unread contents of ios.
116
+ # Returns nil if called at the end of file.
117
+ # This takes zero arguments for strict Rack::Lint compatibility,
118
+ # unlike IO#gets.
119
+ def gets
120
+ socket or return tmp.gets
121
+ sep = $/ or return read
122
+
123
+ orig_size = tmp.size
124
+ if tmp.pos == orig_size
125
+ tee(@@io_chunk_size, buf2) or return nil
126
+ tmp.seek(orig_size)
127
+ end
119
128
 
120
- sep_size = Rack::Utils.bytesize(sep)
121
- line = tmp.gets # cannot be nil here since size > pos
122
- sep == line[-sep_size, sep_size] and return line
129
+ sep_size = Rack::Utils.bytesize(sep)
130
+ line = tmp.gets # cannot be nil here since size > pos
131
+ sep == line[-sep_size, sep_size] and return line
123
132
 
124
- # unlikely, if we got here, then tmp is at EOF
125
- begin
126
- orig_size = tmp.pos
127
- tee(Const::CHUNK_SIZE, buf2) or break
128
- tmp.seek(orig_size)
129
- line << tmp.gets
130
- sep == line[-sep_size, sep_size] and return line
131
- # tmp is at EOF again here, retry the loop
132
- end while true
133
-
134
- line
135
- end
133
+ # unlikely, if we got here, then tmp is at EOF
134
+ begin
135
+ orig_size = tmp.pos
136
+ tee(@@io_chunk_size, buf2) or break
137
+ tmp.seek(orig_size)
138
+ line << tmp.gets
139
+ sep == line[-sep_size, sep_size] and return line
140
+ # tmp is at EOF again here, retry the loop
141
+ end while true
136
142
 
137
- # :call-seq:
138
- # ios.each { |line| block } => ios
139
- #
140
- # Executes the block for every ``line'' in *ios*, where lines are
141
- # separated by the global record separator ($/, typically "\n").
142
- def each(&block)
143
- while line = gets
144
- yield line
145
- end
143
+ line
144
+ end
146
145
 
147
- self # Rack does not specify what the return value is here
146
+ # :call-seq:
147
+ # ios.each { |line| block } => ios
148
+ #
149
+ # Executes the block for every ``line'' in *ios*, where lines are
150
+ # separated by the global record separator ($/, typically "\n").
151
+ def each(&block)
152
+ while line = gets
153
+ yield line
148
154
  end
149
155
 
150
- # :call-seq:
151
- # ios.rewind => 0
152
- #
153
- # Positions the *ios* pointer to the beginning of input, returns
154
- # the offset (zero) of the +ios+ pointer. Subsequent reads will
155
- # start from the beginning of the previously-buffered input.
156
- def rewind
157
- tmp.rewind # Rack does not specify what the return value is here
158
- end
156
+ self # Rack does not specify what the return value is here
157
+ end
159
158
 
160
- private
161
-
162
- def client_error(e)
163
- case e
164
- when EOFError
165
- # in case client only did a premature shutdown(SHUT_WR)
166
- # we do support clients that shutdown(SHUT_WR) after the
167
- # _entire_ request has been sent, and those will not have
168
- # raised EOFError on us.
169
- socket.close if socket
170
- raise ClientShutdown, "bytes_read=#{tmp.size}", []
171
- when HttpParserError
172
- e.set_backtrace([])
173
- end
174
- raise e
175
- end
159
+ # :call-seq:
160
+ # ios.rewind => 0
161
+ #
162
+ # Positions the *ios* pointer to the beginning of input, returns
163
+ # the offset (zero) of the +ios+ pointer. Subsequent reads will
164
+ # start from the beginning of the previously-buffered input.
165
+ def rewind
166
+ tmp.rewind # Rack does not specify what the return value is here
167
+ end
176
168
 
177
- # tees off a +length+ chunk of data from the input into the IO
178
- # backing store as well as returning it. +dst+ must be specified.
179
- # returns nil if reading from the input returns nil
180
- def tee(length, dst)
181
- unless parser.body_eof?
182
- if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
183
- tmp.write(dst)
184
- tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
185
- return dst
186
- end
187
- end
188
- finalize_input
189
- rescue => e
190
- client_error(e)
169
+ private
170
+
171
+ def client_error(e)
172
+ case e
173
+ when EOFError
174
+ # in case client only did a premature shutdown(SHUT_WR)
175
+ # we do support clients that shutdown(SHUT_WR) after the
176
+ # _entire_ request has been sent, and those will not have
177
+ # raised EOFError on us.
178
+ socket.close if socket
179
+ raise ClientShutdown, "bytes_read=#{tmp.size}", []
180
+ when HttpParserError
181
+ e.set_backtrace([])
191
182
  end
183
+ raise e
184
+ end
192
185
 
193
- def finalize_input
194
- while parser.trailers(req, buf).nil?
195
- # Don't worry about raising ClientShutdown here on EOFError, tee()
196
- # will catch EOFError when app is processing it, otherwise in
197
- # initialize we never get any chance to enter the app so the
198
- # EOFError will just get trapped by Unicorn and not the Rack app
199
- buf << socket.readpartial(Const::CHUNK_SIZE)
186
+ # tees off a +length+ chunk of data from the input into the IO
187
+ # backing store as well as returning it. +dst+ must be specified.
188
+ # returns nil if reading from the input returns nil
189
+ def tee(length, dst)
190
+ unless parser.body_eof?
191
+ if parser.filter_body(dst, socket.readpartial(length, buf)).nil?
192
+ tmp.write(dst)
193
+ tmp.seek(0, IO::SEEK_END) # workaround FreeBSD/OSX + MRI 1.8.x bug
194
+ return dst
200
195
  end
201
- self.socket = nil
202
196
  end
197
+ finalize_input
198
+ rescue => e
199
+ client_error(e)
200
+ end
203
201
 
204
- # tee()s into +dst+ until it is of +length+ bytes (or until
205
- # we've reached the Content-Length of the request body).
206
- # Returns +dst+ (the exact object, not a duplicate)
207
- # To continue supporting applications that need near-real-time
208
- # streaming input bodies, this is a no-op for
209
- # "Transfer-Encoding: chunked" requests.
210
- def ensure_length(dst, length)
211
- # len is nil for chunked bodies, so we can't ensure length for those
212
- # since they could be streaming bidirectionally and we don't want to
213
- # block the caller in that case.
214
- return dst if dst.nil? || len.nil?
215
-
216
- while dst.size < length && tee(length - dst.size, buf2)
217
- dst << buf2
218
- end
202
+ def finalize_input
203
+ while parser.trailers(req, buf).nil?
204
+ # Don't worry about raising ClientShutdown here on EOFError, tee()
205
+ # will catch EOFError when app is processing it, otherwise in
206
+ # initialize we never get any chance to enter the app so the
207
+ # EOFError will just get trapped by Unicorn and not the Rack app
208
+ buf << socket.readpartial(@@io_chunk_size)
209
+ end
210
+ self.socket = nil
211
+ end
219
212
 
220
- dst
213
+ # tee()s into +dst+ until it is of +length+ bytes (or until
214
+ # we've reached the Content-Length of the request body).
215
+ # Returns +dst+ (the exact object, not a duplicate)
216
+ # To continue supporting applications that need near-real-time
217
+ # streaming input bodies, this is a no-op for
218
+ # "Transfer-Encoding: chunked" requests.
219
+ def ensure_length(dst, length)
220
+ # len is nil for chunked bodies, so we can't ensure length for those
221
+ # since they could be streaming bidirectionally and we don't want to
222
+ # block the caller in that case.
223
+ return dst if dst.nil? || len.nil?
224
+
225
+ while dst.size < length && tee(length - dst.size, buf2)
226
+ dst << buf2
221
227
  end
222
228
 
229
+ dst
223
230
  end
231
+
224
232
  end