spider-gazelle 0.1.6 → 0.1.7

Sign up to get free protection for your applications and to get access to all the features.
checksums.yaml CHANGED
@@ -1,7 +1,7 @@
1
1
  ---
2
2
  SHA1:
3
- metadata.gz: 3ffd0e265eacee179a3ecd3ef2b55546656aaa31
4
- data.tar.gz: 65f4d1cc2bfb36040840492dc6013733bdd2bd99
3
+ metadata.gz: 523c991f5e4470fe7b6170fad1fd32a59226c12d
4
+ data.tar.gz: 92b58b38698befe73be3fd9aa20d54beb675bb7e
5
5
  SHA512:
6
- metadata.gz: 9579a2a0879d1ee81059f4ed7d19a0827630aa5d0737b37fac4c4485dc36b7ddf47f3cece95f065c3d454cb8b9ad712cd7f09b0a01b7762da8fdc2333609efa9
7
- data.tar.gz: 8168496effeeaaef233c9e318251dab67b345ff8136012e1242b2e4bdf2ace18dfb2828cbc840f78dfb325119a53a1285abe5634f3d79fec8dd20af747cc2260
6
+ metadata.gz: 7b469356bdda20fdaa8caea8769d02595002a9f943dbd08320043f1a6e57026150f0e2cfe1692d3c929f3cc68b7a3007dd6835cda4dddfe2835609d9eea31bff
7
+ data.tar.gz: 75b41769b7c3006dd084d978b34a66eff5ab9669dc25f8ac695f45ac0852d87a9b5576105b186470f75174b5459325f53b833530a329c9cc03b77c5d8d06e399
data/README.md CHANGED
@@ -1,5 +1,7 @@
1
1
  # spider-gazelle
2
2
 
3
+ [<img src="https://codeclimate.com/github/cotag/spider-gazelle.png" />](https://codeclimate.com/github/cotag/spider-gazelle)
4
+
3
5
 
4
6
  A fast, parallel and concurrent web server for ruby
5
7
 
@@ -28,3 +30,8 @@ Look out! Here comes the Spidergazelle!
28
30
 
29
31
  For other command line options look at [the source](/bin/sg)
30
32
 
33
+
34
+ ## Community support
35
+
36
+ * [GitHub issues](https://github.com/cotag/spider-gazelle/issues).
37
+ * IRC channel `#spider-gazelle` at `irc.freenode.net`.
@@ -1,39 +1,37 @@
1
- require 'rack/handler'
2
- require 'spider-gazelle'
3
-
1
+ require "rack/handler"
2
+ require "spider-gazelle"
3
+ require "spider-gazelle/const"
4
4
 
5
5
  module Rack
6
- module Handler
7
- module SpiderGazelle
8
- DEFAULT_OPTIONS = {
9
- :Host => '0.0.0.0',
10
- :Port => 8080,
11
- :Verbose => false
12
- }
13
-
14
- def self.run(app, options = {})
15
- options = DEFAULT_OPTIONS.merge(options)
16
-
17
- if options[:Verbose]
18
- app = Rack::CommonLogger.new(app, STDOUT)
19
- end
20
-
21
- if options[:environment]
22
- ENV['RACK_ENV'] = options[:environment].to_s
23
- end
24
-
25
- ::SpiderGazelle::Spider.run app, options
26
- end
6
+ module Handler
7
+ module SpiderGazelle
8
+ DEFAULT_OPTIONS = {
9
+ :Host => "0.0.0.0",
10
+ :Port => 8080,
11
+ :Verbose => false
12
+ }
13
+
14
+ def self.run(app, options = {})
15
+ options = DEFAULT_OPTIONS.merge(options)
16
+
17
+ if options[:Verbose]
18
+ app = Rack::CommonLogger.new(app, STDOUT)
19
+ end
27
20
 
28
- def self.valid_options
29
- {
30
- "Host=HOST" => "Hostname to listen on (default: 0.0.0.0)",
31
- "Port=PORT" => "Port to listen on (default: 8080)",
32
- "Quiet" => "Don't report each request"
33
- }
34
- end
21
+ if options[:environment]
22
+ ENV["RACK_ENV"] = options[:environment].to_s
35
23
  end
36
24
 
37
- register :'spider-gazelle', SpiderGazelle
25
+ ::SpiderGazelle::Spider.run app, options
26
+ end
27
+
28
+ def self.valid_options
29
+ { "Host=HOST" => "Hostname to listen on (default: 0.0.0.0)",
30
+ "Port=PORT" => "Port to listen on (default: 8080)",
31
+ "Quiet" => "Don't report each request" }
32
+ end
38
33
  end
34
+
35
+ register :"spider-gazelle", SpiderGazelle
36
+ end
39
37
  end
@@ -2,7 +2,6 @@ require "http-parser" # C based, fast, http parser
2
2
  require "libuv" # Ruby Libuv FFI wrapper
3
3
  require "rack" # Ruby webserver abstraction
4
4
 
5
- require "spider-gazelle/version"
6
5
  require "spider-gazelle/request" # Holds request information and handles request processing
7
6
  require "spider-gazelle/connection" # Holds connection information and handles request pipelining
8
7
  require "spider-gazelle/gazelle" # Processes data received from connections
@@ -13,10 +12,9 @@ require "spider-gazelle/spider" # Accepts connections and offloads them
13
12
 
14
13
  require "spider-gazelle/upgrades/websocket" # Websocket implementation
15
14
 
16
-
17
15
  module SpiderGazelle
18
- # Delegate pipe used for passing sockets to the gazelles
19
- # Signal pipe used to pass control signals
20
- DELEGATE_PIPE = "/tmp/spider-gazelle.delegate"
21
- SIGNAL_PIPE = "/tmp/spider-gazelle.signal"
16
+ # Delegate pipe used for passing sockets to the gazelles
17
+ DELEGATE_PIPE = "/tmp/spider-gazelle.delegate"
18
+ # Signal pipe used to pass control signals
19
+ SIGNAL_PIPE = "/tmp/spider-gazelle.signal"
22
20
  end
@@ -1,69 +1,64 @@
1
1
  require 'thread'
2
2
  require 'radix/base'
3
3
 
4
-
5
4
  module SpiderGazelle
6
- module AppStore
7
- # Basic compression using UTF (more efficient for ID's stored as strings)
8
- B65 = ::Radix::Base.new(::Radix::BASE::B62 + ['-', '_', '~'])
9
- B10 = ::Radix::Base.new(10)
5
+ module AppStore
6
+ # Basic compression using UTF (more efficient for ID's stored as strings)
7
+ B65 = ::Radix::Base.new(::Radix::BASE::B62 + ['-', '_', '~'])
8
+ B10 = ::Radix::Base.new(10)
10
9
 
11
- @mutex = Mutex.new
12
- @count = 0
13
- @apps = ThreadSafe::Cache.new
14
- @loaded = ThreadSafe::Cache.new
10
+ @mutex = Mutex.new
11
+ @apps = ThreadSafe::Cache.new
12
+ @loaded = ThreadSafe::Cache.new
13
+ @count = 0
15
14
 
16
- # Load an app and assign it an ID
17
- def self.load(app, options={})
18
- is_rack_app = !app.is_a?(String)
19
- app_key = is_rack_app ? app.class.name.to_sym : app.to_sym
20
- id = @loaded[app_key]
15
+ # Load an app and assign it an ID
16
+ def self.load(app, options={})
17
+ is_rack_app = !app.is_a?(String)
18
+ app_key = is_rack_app ? app.class.name.to_sym : app.to_sym
19
+ id = @loaded[app_key]
21
20
 
22
- if id.nil?
23
- app, options = ::Rack::Builder.parse_file(app) unless is_rack_app
21
+ if id.nil?
22
+ app, options = ::Rack::Builder.parse_file(app) unless is_rack_app
24
23
 
25
- count = 0
26
- @mutex.synchronize {
27
- count = @count += 1
28
- }
29
- id = Radix.convert(count, B10, B65).to_sym
30
- @apps[id] = app
31
- @loaded[app_key] = id
32
- end
24
+ count = 0
25
+ @mutex.synchronize { count = @count += 1 }
26
+ id = Radix.convert(count, B10, B65).to_sym
27
+ @apps[id] = app
28
+ @loaded[app_key] = id
29
+ end
33
30
 
34
- id
35
- end
31
+ id
32
+ end
36
33
 
37
- # Manually load an app
38
- def self.add(app)
39
- id = @loaded[app.__id__]
34
+ # Manually load an app
35
+ def self.add(app)
36
+ id = @loaded[app.__id__]
40
37
 
41
- if id.nil?
42
- count = 0
43
- @mutex.synchronize {
44
- count = @count += 1
45
- }
46
- id = Radix.convert(count, B10, B65).to_sym
47
- @apps[id] = app
48
- @loaded[app.__id__] = id
49
- end
38
+ if id.nil?
39
+ count = 0
40
+ @mutex.synchronize { count = @count += 1 }
41
+ id = Radix.convert(count, B10, B65).to_sym
42
+ @apps[id] = app
43
+ @loaded[app.__id__] = id
44
+ end
50
45
 
51
- id
52
- end
46
+ id
47
+ end
53
48
 
54
- # Lookup an application
55
- def self.lookup(app)
56
- if app.is_a?(String) || app.is_a?(Symbol)
57
- @apps[@loaded[app.to_sym]]
58
- else
59
- @apps[@loaded[app.__id__]]
60
- end
61
- end
49
+ # Lookup an application
50
+ def self.lookup(app)
51
+ if app.is_a?(String) || app.is_a?(Symbol)
52
+ @apps[@loaded[app.to_sym]]
53
+ else
54
+ @apps[@loaded[app.__id__]]
55
+ end
56
+ end
62
57
 
63
- # Get an app using the id directly
64
- def self.get(id)
65
- id = id.to_sym if id.is_a?(String)
66
- @apps[id]
67
- end
58
+ # Get an app using the id directly
59
+ def self.get(id)
60
+ id = id.to_sym if id.is_a?(String)
61
+ @apps[id]
68
62
  end
63
+ end
69
64
  end
@@ -1,65 +1,59 @@
1
- require 'thread'
1
+ require 'spider-gazelle/const'
2
2
  require 'set'
3
-
3
+ require 'thread'
4
4
 
5
5
  module SpiderGazelle
6
- class Binding
7
-
8
-
9
- attr_reader :app_id
10
-
11
-
12
- def initialize(loop, delegate, app_id, options = {})
13
- @app_id = app_id
14
- @options = options
15
- @loop = loop
16
- @delegate = delegate
17
- @tls = @options[:tls] || false
18
- @port = @options[:Port] || (@tls ? 443 : 80)
19
- @optimize = @options[:optimize_for_latency] || true
20
-
21
- # Connection management functions
22
- @new_connection = method(:new_connection)
23
- @accept_connection = method(:accept_connection)
24
- end
25
-
26
- # Bind the application to the selected port
27
- def bind
28
- # Bind the socket
29
- @tcp = @loop.tcp
30
- @tcp.bind(@options[:Host], @port, @new_connection)
31
- @tcp.listen(@options[:backlog])
32
-
33
- # Delegate errors
34
- @tcp.catch do |e|
35
- @loop.log :error, 'application bind failed', e
36
- end
37
- @tcp
38
- end
6
+ class Binding
7
+ include Const
8
+
9
+ attr_reader :app_id
10
+
11
+ def initialize(loop, delegate, app_id, options = {})
12
+ @app_id = app_id
13
+ @options = options
14
+ @loop = loop
15
+ @delegate = delegate
16
+ @tls = @options[:tls] || false
17
+ @port = @options[:Port] || (@tls ? PORT_443 : PORT_80)
18
+ @optimize = @options[:optimize_for_latency] || true
19
+
20
+ # Connection management functions
21
+ @new_connection = method :new_connection
22
+ @accept_connection = method :accept_connection
23
+ end
39
24
 
40
- # Close the bindings
41
- def unbind
42
- # close unless we've never been bound
43
- @tcp.close unless @tcp.nil?
44
- @tcp
45
- end
25
+ # Bind the application to the selected port
26
+ def bind
27
+ # Bind the socket
28
+ @tcp = @loop.tcp
29
+ @tcp.bind @options[:Host], @port, @new_connection
30
+ @tcp.listen @options[:backlog]
46
31
 
32
+ # Delegate errors
33
+ @tcp.catch { |e| @loop.log(:error, 'application bind failed', e) }
34
+ @tcp
35
+ end
47
36
 
48
- protected
37
+ # Close the bindings
38
+ def unbind
39
+ # close unless we've never been bound
40
+ @tcp.close unless @tcp.nil?
41
+ @tcp
42
+ end
49
43
 
44
+ protected
50
45
 
51
- # There is a new connection pending
52
- # We accept it
53
- def new_connection(server)
54
- server.accept @accept_connection
55
- end
46
+ # There is a new connection pending. We accept it
47
+ def new_connection(server)
48
+ server.accept @accept_connection
49
+ end
56
50
 
57
- # Once the connection is accepted we disable Nagles Algorithm
58
- # This improves performance as we are using vectored or scatter/gather IO
59
- # Then the spider delegates to the gazelle loops
60
- def accept_connection(client)
61
- client.enable_nodelay if @optimize == true
62
- @delegate.call(client, @tls, @port, @app_id)
63
- end
51
+ # Once the connection is accepted we disable Nagles Algorithm
52
+ # This improves performance as we are using vectored or scatter/gather IO
53
+ # Then the spider delegates to the gazelle loops
54
+ def accept_connection(client)
55
+ client.enable_nodelay if @optimize == true
56
+ @delegate.call client, @tls, @port, @app_id
64
57
  end
58
+ end
65
59
  end
@@ -1,366 +1,330 @@
1
+ require 'spider-gazelle/const'
1
2
  require 'stringio'
2
3
 
3
-
4
4
  module SpiderGazelle
5
- class Connection
6
- Hijack = Struct.new(:socket, :env)
7
-
8
- REQUEST_METHOD = 'REQUEST_METHOD'.freeze # NOTE:: duplicate in gazelle.rb
9
- HEAD = 'HEAD'.freeze
10
-
11
- RACK = 'rack'.freeze # used for filtering headers
12
- CLOSE = 'close'.freeze
13
- CONNECTION = 'Connection'.freeze
14
- CONTENT_LENGTH = 'Content-Length'.freeze
15
- TRANSFER_ENCODING = 'Transfer-Encoding'.freeze
16
- CHUNKED = 'chunked'.freeze
17
- COLON_SPACE = ': '.freeze
18
- EOF = "0\r\n\r\n".freeze
19
- CRLF = "\r\n".freeze
20
- NEWLINE = "\n".freeze
21
- ZERO = '0'.freeze
22
-
23
- HTTP_STATUS_CODES = ::Rack::Utils::HTTP_STATUS_CODES
24
- HTTP_STATUS_DEFAULT = proc { 'CUSTOM' }
25
-
26
- HTTP_11_400 = "HTTP/1.1 400 Bad Request\r\n\r\n".freeze
27
-
28
-
29
- def self.on_progress(data, socket); end
30
- DUMMY_PROGRESS = self.method(:on_progress)
31
-
32
-
33
- # For Gazelle
34
- attr_reader :state, :parsing
35
- # For Request
36
- attr_reader :tls, :port, :loop, :socket, :async_callback
37
-
38
-
39
- def initialize(gazelle, loop, socket, port, state, app, queue)
40
- # A single parser instance per-connection (supports pipelining)
41
- @state = state
42
- @pending = []
43
-
44
- # Work callback for thread pool processing
45
- @request = nil
46
- @work = method(:work)
47
-
48
- # Called after the work on the thread pool is complete
49
- @send_response = method(:send_response)
50
- @send_error = method(:send_error)
51
-
52
- # Used to chain promises (ensures requests are processed in order)
53
- @process_next = method(:process_next)
54
- @write_chunk = method(:write_chunk)
55
- @current_worker = queue # keep track of work queue head to prevent unintentional GC
56
- @queue_worker = queue # start queue with an existing resolved promise (::Libuv::Q::ResolvedPromise.new(@loop, true))
57
-
58
- # Socket for writing the response
59
- @socket = socket
60
- @app = app
61
- @port = port
62
- @tls = @socket.tls?
63
- @loop = loop
64
- @gazelle = gazelle
65
- @async_callback = method(:deferred_callback)
66
-
67
- # Remove connection if the socket closes
68
- socket.finally &method(:unlink)
69
- end
70
-
71
- # Lazy eval the IP
72
- def remote_ip
73
- @remote_ip ||= @socket.peername[0]
74
- end
75
-
76
- # Creates a new request state object
77
- def start_parsing
78
- @parsing = Request.new(self, @app)
79
- end
5
+ class Connection
6
+ include Const
7
+
8
+ Hijack = Struct.new :socket, :env
9
+
10
+ def self.on_progress(data, socket); end
11
+ DUMMY_PROGRESS = self.method :on_progress
12
+
13
+ # For Gazelle
14
+ attr_reader :state, :parsing
15
+ # For Request
16
+ attr_reader :tls, :port, :loop, :socket, :async_callback
17
+
18
+ def initialize(gazelle, loop, socket, port, state, app, queue)
19
+ # A single parser instance per-connection (supports pipelining)
20
+ @state = state
21
+ @pending = []
22
+
23
+ # Work callback for thread pool processing
24
+ @request = nil
25
+ @work = method :work
26
+
27
+ # Called after the work on the thread pool is complete
28
+ @send_response = method :send_response
29
+ @send_error = method :send_error
30
+
31
+ # Used to chain promises (ensures requests are processed in order)
32
+ @process_next = method :process_next
33
+ @write_chunk = method :write_chunk
34
+ # Keep track of work queue head to prevent unintentional GC
35
+ @current_worker = queue
36
+ # Start queue with an existing resolved promise (::Libuv::Q::ResolvedPromise.new(@loop, true))
37
+ @queue_worker = queue
38
+
39
+ # Socket for writing the response
40
+ @socket = socket
41
+ @app = app
42
+ @port = port
43
+ @tls = @socket.tls?
44
+ @loop = loop
45
+ @gazelle = gazelle
46
+ @async_callback = method :deferred_callback
47
+
48
+ # Remove connection if the socket closes
49
+ socket.finally &method(:unlink)
50
+ end
80
51
 
81
- # Chains the work in a promise queue
82
- def finished_parsing
83
- if !@state.keep_alive?
84
- @parsing.keep_alive = false
85
- @socket.stop_read # we don't want to do any more work than we need to
86
- end
87
- @parsing.upgrade = @state.upgrade?
88
- @pending.push @parsing
89
- @queue_worker = @queue_worker.then @process_next
90
- end
52
+ # Lazy eval the IP
53
+ def remote_ip
54
+ @remote_ip ||= @socket.peername[0]
55
+ end
91
56
 
92
- # The parser encountered an error
93
- def parsing_error
94
- # Grab the error
95
- send_error(@state.error)
96
-
97
- # We no longer care for any further requests from this client
98
- # however we will finish processing any valid pipelined requests before shutting down
99
- @socket.stop_read
100
- @queue_worker = @queue_worker.then do
101
- # TODO:: send response (400 bad request)
102
- @socket.write HTTP_11_400
103
- @socket.shutdown
104
- end
105
- end
57
+ # Creates a new request state object
58
+ def start_parsing
59
+ @parsing = Request.new self, @app
60
+ end
106
61
 
107
- # Schedule send
108
- def response(data)
109
- @loop.schedule
110
- end
62
+ # Chains the work in a promise queue
63
+ def finished_parsing
64
+ if !@state.keep_alive?
65
+ @parsing.keep_alive = false
66
+ # We don't want to do any more work than we need to
67
+ @socket.stop_read
68
+ end
69
+
70
+ @parsing.upgrade = @state.upgrade?
71
+ @pending.push @parsing
72
+ @queue_worker = @queue_worker.then @process_next
73
+ end
111
74
 
75
+ # The parser encountered an error
76
+ def parsing_error
77
+ # Grab the error
78
+ send_error @state.error
79
+
80
+ # We no longer care for any further requests from this client
81
+ # however we will finish processing any valid pipelined requests before shutting down
82
+ @socket.stop_read
83
+ @queue_worker = @queue_worker.then do
84
+ @socket.write ERROR_400_RESPONSE
85
+ @socket.shutdown
86
+ end
87
+ end
112
88
 
113
- protected
89
+ # Schedule send
90
+ def response(data)
91
+ @loop.schedule
92
+ end
114
93
 
94
+ protected
115
95
 
116
- # --------------
117
- # State handlers:
118
- # --------------
96
+ ##
97
+ # State handlers
119
98
 
99
+ # Called when an error occurs at any point while responding
100
+ def send_error(reason)
101
+ # Close the socket as this is fatal (file read error, gazelle error etc)
102
+ @socket.close
120
103
 
121
- # Called when an error occurs at any point while responding
122
- def send_error(reason)
123
- # Close the socket as this is fatal (file read error, gazelle error etc)
124
- @socket.close
104
+ # Log the error in a worker thread
105
+ @loop.work do
106
+ msg = "connection error: #{reason.message}\n#{reason.backtrace.join("\n") if reason.backtrace}\n"
107
+ @gazelle.logger.error msg
108
+ end
109
+ end
125
110
 
126
- # Log the error in a worker thread
127
- @loop.work do
128
- msg = "connection error: #{reason.message}\n#{reason.backtrace.join("\n") if reason.backtrace}\n"
129
- puts msg
130
- @gazelle.logger.error msg
131
- end
132
- end
111
+ # We use promise chaining to move the requests forward
112
+ # This provides an elegant way to handle persistent and pipelined connections
113
+ def process_next(result)
114
+ @request = @pending.shift
115
+ @current_worker = @loop.work @work
116
+ # Resolves the promise with a promise
117
+ @current_worker.then @send_response, @send_error
118
+ end
133
119
 
134
- # We use promise chaining to move the requests forward
135
- # This provides an elegant way to handle persistent and pipelined connections
136
- def process_next(result)
137
- @request = @pending.shift
138
- @current_worker = @loop.work @work
139
- @current_worker.then @send_response, @send_error # resolves the promise with a promise
140
- end
120
+ # Returns the response as the result of the work
121
+ # We support the unofficial rack async api (multi-call version for chunked responses)
122
+ def work
123
+ @request.execute!
124
+ end
141
125
 
142
- # returns the response as the result of the work
143
- # We support the unofficial rack async api (multi-call version for chunked responses)
144
- def work
145
- @request.execute!
146
- end
126
+ # Unlinks the connection from the rack app
127
+ # This occurs when requested and when the socket closes
128
+ def unlink
129
+ if not @gazelle.nil?
130
+ # Unlink the progress callback (prevent funny business)
131
+ @socket.progress &DUMMY_PROGRESS
132
+ @gazelle.discard self
133
+ @gazelle = nil
134
+ @state = nil
135
+ end
136
+ end
147
137
 
148
- # Unlinks the connection from the rack app
149
- # This occurs when requested and when the socket closes
150
- def unlink
151
- if not @gazelle.nil?
152
- @socket.progress &DUMMY_PROGRESS # unlink the progress callback (prevent funny business)
153
- @gazelle.discard(self)
154
- @gazelle = nil
155
- @state = nil
138
+ ##
139
+ # Core response handlers
140
+
141
+ def send_response(result)
142
+ # As we have come back from another thread the socket may have closed
143
+ # This check is an optimisation, the call to write and shutdown would fail safely
144
+
145
+ if @request.hijacked
146
+ # Unlink the management of the socket
147
+ unlink
148
+
149
+ # Pass the hijack response to the captor using the promise. This forwards the socket and
150
+ # environment as well as moving continued execution onto the event loop.
151
+ @request.hijacked.resolve Hijack.new(@socket, @request.env)
152
+ elsif !@socket.closed
153
+ if @request.deferred
154
+ # Wait for the response using this promise
155
+ promise = @request.deferred.promise
156
+
157
+ # Process any responses that might have made it here first
158
+ if @deferred_responses
159
+ @deferred_responses.each &method(:respond_with)
160
+ @deferred_responses = nil
161
+ end
162
+
163
+ return promise
164
+ # NOTE:: Somehow getting to here with a nil request... needs investigation
165
+ elsif not result.nil?
166
+ # clear any cached responses just in case
167
+ # could be set by error in the rack application
168
+ @deferred_responses = nil if @deferred_responses
169
+
170
+ status, headers, body = result
171
+
172
+ send_body = @request.env[REQUEST_METHOD] != HEAD
173
+
174
+ # If a file, stream the body in a non-blocking fashion
175
+ if body.respond_to? :to_path
176
+ if headers[CONTENT_LENGTH2]
177
+ type = :raw
178
+ else
179
+ type = :http
180
+ headers[TRANSFER_ENCODING] = CHUNKED
156
181
  end
157
- end
158
182
 
183
+ write_headers status, headers
159
184
 
160
- # ----------------------
161
- # Core response handlers:
162
- # ----------------------
163
-
164
-
165
- def send_response(result)
166
- # As we have come back from another thread the socket may have closed
167
- # This check is an optimisation, the call to write and shutdown would fail safely
168
-
169
- if @request.hijacked
170
- unlink # unlink the management of the socket
171
-
172
- # Pass the hijack response to the captor using the promise
173
- # This forwards the socket and environment as well as moving
174
- # continued execution onto the event loop.
175
- @request.hijacked.resolve(Hijack.new(@socket, @request.env))
176
-
177
- elsif !@socket.closed
178
- if @request.deferred
179
- # Wait for the response using this promise
180
- promise = @request.deferred.promise
181
-
182
- # Process any responses that might have made it here first
183
- if @deferred_responses
184
- @deferred_responses.each &method(:respond_with)
185
- @deferred_responses = nil
186
- end
187
-
188
- return promise
189
-
190
- # NOTE:: Somehow getting to here with a nil request... needs investigation
191
- elsif not result.nil?
192
- # clear any cached responses just in case
193
- # could be set by error in the rack application
194
- @deferred_responses = nil if @deferred_responses
195
-
196
- status, headers, body = result
197
-
198
- send_body = @request.env[REQUEST_METHOD] != HEAD
199
-
200
- # If a file, stream the body in a non-blocking fashion
201
- if body.respond_to? :to_path
202
- if headers[CONTENT_LENGTH]
203
- type = :raw
204
- else
205
- type = :http
206
- headers[TRANSFER_ENCODING] = CHUNKED
207
- end
208
-
209
- write_headers(status, headers)
210
-
211
- if send_body
212
- file = @loop.file(body.to_path, File::RDONLY)
213
- file.progress do # File is open and available for reading
214
- file.send_file(@socket, type).finally do
215
- file.close
216
- if @request.keep_alive == false
217
- @socket.shutdown
218
- end
219
- end
220
- end
221
- return file
222
- end
223
- else
224
- # Optimize the response
225
- begin
226
- body_size = body.size
227
- if body_size < 2
228
- if body_size == 1
229
- headers[CONTENT_LENGTH] = body[0].bytesize
230
- else
231
- headers[CONTENT_LENGTH] = ZERO
232
- end
233
- end
234
- rescue # just in case
235
- end
236
-
237
- if send_body
238
- write_response(status, headers, body)
239
- else
240
- write_headers(status, headers)
241
- @socket.shutdown if @request.keep_alive == false
242
- end
243
- end
185
+ if send_body
186
+ file = @loop.file body.to_path, File::RDONLY
187
+ file.progress do
188
+ # File is open and available for reading
189
+ file.send_file(@socket, type).finally do
190
+ file.close
191
+ @socket.shutdown if @request.keep_alive == false
244
192
  end
193
+ end
194
+ return file
195
+ end
196
+ else
197
+ # Optimize the response
198
+ begin
199
+ if body.size < 2
200
+ headers[CONTENT_LENGTH2] = body.size == 1 ? body[0].bytesize : ZERO
201
+ end
202
+ rescue # just in case
245
203
  end
246
204
 
247
- # continue processing (don't wait for write to complete)
248
- # if the write fails it will close the socket
249
- nil
205
+ if send_body
206
+ write_response status, headers, body
207
+ else
208
+ write_headers status, headers
209
+ @socket.shutdown if @request.keep_alive == false
210
+ end
211
+ end
250
212
  end
213
+ end
251
214
 
252
- def write_response(status, headers, body)
253
- if headers[CONTENT_LENGTH]
254
- headers[CONTENT_LENGTH] = headers[CONTENT_LENGTH].to_s
255
- write_headers(status, headers)
256
-
257
- # Stream the response (pass directly into @socket.write)
258
- body.each &@socket.method(:write)
259
-
260
- if @request.deferred
261
- @request.deferred.resolve(true)
262
- @request.deferred = nil # prevent data being sent after completed
263
- end
215
+ # continue processing (don't wait for write to complete)
216
+ # if the write fails it will close the socket
217
+ nil
218
+ end
264
219
 
265
- @socket.shutdown if @request.keep_alive == false
266
- else
267
- headers[TRANSFER_ENCODING] = CHUNKED
268
- write_headers(status, headers)
220
+ def write_response(status, headers, body)
221
+ if headers[CONTENT_LENGTH2]
222
+ headers[CONTENT_LENGTH2] = headers[CONTENT_LENGTH2].to_s
223
+ write_headers status, headers
269
224
 
270
- # Stream the response
271
- body.each &@write_chunk
225
+ # Stream the response (pass directly into @socket.write)
226
+ body.each &@socket.method(:write)
272
227
 
273
- if @request.deferred.nil?
274
- @socket.write EOF
275
- @socket.shutdown if @request.keep_alive == false
276
- else
277
- @async_state = :chunked
278
- end
279
- end
228
+ if @request.deferred
229
+ @request.deferred.resolve true
230
+ # Prevent data being sent after completed
231
+ @request.deferred = nil
280
232
  end
281
233
 
282
- def write_headers(status, headers)
283
- headers[CONNECTION] = CLOSE if @request.keep_alive == false
234
+ @socket.shutdown if @request.keep_alive == false
235
+ else
236
+ headers[TRANSFER_ENCODING] = CHUNKED
237
+ write_headers status, headers
284
238
 
285
- header = "HTTP/1.1 #{status} #{fetch_code(status)}\r\n"
286
- headers.each do |key, value|
287
- next if key.start_with? RACK
239
+ # Stream the response
240
+ body.each &@write_chunk
288
241
 
289
- value.split(NEWLINE).each do |unique_value|
290
- header << key
291
- header << COLON_SPACE
292
- header << unique_value
293
- header << CRLF
294
- end
295
- end
296
- header << CRLF
297
- @socket.write header
242
+ if @request.deferred.nil?
243
+ @socket.write CLOSE_CHUNKED
244
+ @socket.shutdown if @request.keep_alive == false
245
+ else
246
+ @async_state = :chunked
298
247
  end
248
+ end
249
+ end
299
250
 
300
- def write_chunk(part)
301
- chunk = part.bytesize.to_s(16) << CRLF << part << CRLF
302
- @socket.write chunk
303
- end
251
+ def write_headers(status, headers)
252
+ headers[CONNECTION] = CLOSE if @request.keep_alive == false
304
253
 
305
- def fetch_code(status)
306
- HTTP_STATUS_CODES.fetch(status, &HTTP_STATUS_DEFAULT)
254
+ header = "HTTP/1.1 #{status} #{fetch_code(status)}\r\n"
255
+ headers.each do |key, value|
256
+ next if key.start_with? RACK
257
+
258
+ value.split(NEWLINE).each do |unique_value|
259
+ header << key
260
+ header << COLON
261
+ header << unique_value
262
+ header << LINE_END
307
263
  end
264
+ end
265
+ header << LINE_END
266
+ @socket.write header
267
+ end
308
268
 
269
+ def write_chunk(part)
270
+ chunk = part.bytesize.to_s(HEX_SIZE_CHUNKED_RESPONSE) << LINE_END << part << LINE_END
271
+ @socket.write chunk
272
+ end
309
273
 
310
- # ------------------------
311
- # Async response functions:
312
- # ------------------------
274
+ def fetch_code(status)
275
+ HTTP_STATUS_CODES.fetch(status, &HTTP_STATUS_DEFAULT)
276
+ end
313
277
 
278
+ ##
279
+ # Async response functions
314
280
 
315
- # Callback from a response that was marked async
316
- def deferred_callback(data)
317
- # We call close here, like on a regular response
318
- body = data[2]
319
- body.close if body.respond_to?(:close)
320
- @loop.next_tick do
321
- callback(data)
322
- end
323
- end
281
+ # Callback from a response that was marked async
282
+ def deferred_callback(data)
283
+ # We call close here, like on a regular response
284
+ body = data[2]
285
+ body.close if body.respond_to?(:close)
286
+ @loop.next_tick { callback(data) }
287
+ end
324
288
 
325
- # Process a response that was marked as async
326
- # Save the data if the request hasn't responded yet
327
- def callback(data)
328
- begin
329
- if @request.deferred && @deferred_responses.nil?
330
- respond_with(data)
331
- else
332
- @deferred_responses ||= []
333
- @deferred_responses << data
334
- end
335
- rescue Exception => e
336
- # This provides the same level of protection that
337
- # the regular responses provide
338
- send_error(e)
339
- end
289
+ # Process a response that was marked as async. Save the data if the request hasn't responded yet
290
+ def callback(data)
291
+ begin
292
+ if @request.deferred && @deferred_responses.nil?
293
+ respond_with data
294
+ else
295
+ @deferred_responses ||= []
296
+ @deferred_responses << data
340
297
  end
298
+ rescue Exception => e
299
+ # This provides the same level of protection that the regular responses provide
300
+ send_error e
301
+ end
302
+ end
341
303
 
342
- # Process the async request in the same way as Mizuno
343
- # See: http://polycrystal.org/2012/04/15/asynchronous_responses_in_rack.html
344
- def respond_with(data)
345
- status, headers, body = data
346
-
347
- if @async_state.nil?
348
- # Respond with the headers here
349
- write_response(status, headers, body)
350
- elsif body.empty?
351
- @socket.write EOF
352
- @socket.shutdown if @request.keep_alive == false
353
-
354
- # Complete the request here
355
- deferred = @request.deferred
356
- @request.deferred = nil # prevent data being sent after completed
357
- @async_state = nil
358
- deferred.resolve(true)
359
- else
360
- # Send the chunks provided
361
- body.each &@write_chunk
362
- end
363
- nil
364
- end
304
+ # Process the async request in the same way as Mizuno
305
+ # See: http://polycrystal.org/2012/04/15/asynchronous_responses_in_rack.html
306
+ def respond_with(data)
307
+ status, headers, body = data
308
+
309
+ if @async_state.nil?
310
+ # Respond with the headers here
311
+ write_response status, headers, body
312
+ elsif body.empty?
313
+ @socket.write CLOSE_CHUNK
314
+ @socket.shutdown if @request.keep_alive == false
315
+
316
+ # Complete the request here
317
+ deferred = @request.deferred
318
+ # Prevent data being sent after completed
319
+ @request.deferred = nil
320
+ @async_state = nil
321
+ deferred.resolve true
322
+ else
323
+ # Send the chunks provided
324
+ body.each &@write_chunk
325
+ end
326
+
327
+ nil
365
328
  end
329
+ end
366
330
  end