spider-gazelle 0.1.6 → 0.1.7
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/README.md +7 -0
- data/lib/rack/handler/spider-gazelle.rb +30 -32
- data/lib/spider-gazelle.rb +4 -6
- data/lib/spider-gazelle/app_store.rb +48 -53
- data/lib/spider-gazelle/binding.rb +48 -54
- data/lib/spider-gazelle/connection.rb +286 -322
- data/lib/spider-gazelle/const.rb +198 -0
- data/lib/spider-gazelle/gazelle.rb +121 -139
- data/lib/spider-gazelle/request.rb +95 -141
- data/lib/spider-gazelle/spider.rb +335 -351
- data/lib/spider-gazelle/upgrades/websocket.rb +88 -98
- data/spider-gazelle.gemspec +8 -3
- metadata +3 -4
- data/lib/spider-gazelle/error.rb +0 -16
- data/lib/spider-gazelle/version.rb +0 -3
checksums.yaml
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
---
|
2
2
|
SHA1:
|
3
|
-
metadata.gz:
|
4
|
-
data.tar.gz:
|
3
|
+
metadata.gz: 523c991f5e4470fe7b6170fad1fd32a59226c12d
|
4
|
+
data.tar.gz: 92b58b38698befe73be3fd9aa20d54beb675bb7e
|
5
5
|
SHA512:
|
6
|
-
metadata.gz:
|
7
|
-
data.tar.gz:
|
6
|
+
metadata.gz: 7b469356bdda20fdaa8caea8769d02595002a9f943dbd08320043f1a6e57026150f0e2cfe1692d3c929f3cc68b7a3007dd6835cda4dddfe2835609d9eea31bff
|
7
|
+
data.tar.gz: 75b41769b7c3006dd084d978b34a66eff5ab9669dc25f8ac695f45ac0852d87a9b5576105b186470f75174b5459325f53b833530a329c9cc03b77c5d8d06e399
|
data/README.md
CHANGED
@@ -1,5 +1,7 @@
|
|
1
1
|
# spider-gazelle
|
2
2
|
|
3
|
+
[<img src="https://codeclimate.com/github/cotag/spider-gazelle.png" />](https://codeclimate.com/github/cotag/spider-gazelle)
|
4
|
+
|
3
5
|
|
4
6
|
A fast, parallel and concurrent web server for ruby
|
5
7
|
|
@@ -28,3 +30,8 @@ Look out! Here comes the Spidergazelle!
|
|
28
30
|
|
29
31
|
For other command line options look at [the source](/bin/sg)
|
30
32
|
|
33
|
+
|
34
|
+
## Community support
|
35
|
+
|
36
|
+
* [GitHub issues](https://github.com/cotag/spider-gazelle/issues).
|
37
|
+
* IRC channel `#spider-gazelle` at `irc.freenode.net`.
|
@@ -1,39 +1,37 @@
|
|
1
|
-
require
|
2
|
-
require
|
3
|
-
|
1
|
+
require "rack/handler"
|
2
|
+
require "spider-gazelle"
|
3
|
+
require "spider-gazelle/const"
|
4
4
|
|
5
5
|
module Rack
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
if options[:environment]
|
22
|
-
ENV['RACK_ENV'] = options[:environment].to_s
|
23
|
-
end
|
24
|
-
|
25
|
-
::SpiderGazelle::Spider.run app, options
|
26
|
-
end
|
6
|
+
module Handler
|
7
|
+
module SpiderGazelle
|
8
|
+
DEFAULT_OPTIONS = {
|
9
|
+
:Host => "0.0.0.0",
|
10
|
+
:Port => 8080,
|
11
|
+
:Verbose => false
|
12
|
+
}
|
13
|
+
|
14
|
+
def self.run(app, options = {})
|
15
|
+
options = DEFAULT_OPTIONS.merge(options)
|
16
|
+
|
17
|
+
if options[:Verbose]
|
18
|
+
app = Rack::CommonLogger.new(app, STDOUT)
|
19
|
+
end
|
27
20
|
|
28
|
-
|
29
|
-
|
30
|
-
"Host=HOST" => "Hostname to listen on (default: 0.0.0.0)",
|
31
|
-
"Port=PORT" => "Port to listen on (default: 8080)",
|
32
|
-
"Quiet" => "Don't report each request"
|
33
|
-
}
|
34
|
-
end
|
21
|
+
if options[:environment]
|
22
|
+
ENV["RACK_ENV"] = options[:environment].to_s
|
35
23
|
end
|
36
24
|
|
37
|
-
|
25
|
+
::SpiderGazelle::Spider.run app, options
|
26
|
+
end
|
27
|
+
|
28
|
+
def self.valid_options
|
29
|
+
{ "Host=HOST" => "Hostname to listen on (default: 0.0.0.0)",
|
30
|
+
"Port=PORT" => "Port to listen on (default: 8080)",
|
31
|
+
"Quiet" => "Don't report each request" }
|
32
|
+
end
|
38
33
|
end
|
34
|
+
|
35
|
+
register :"spider-gazelle", SpiderGazelle
|
36
|
+
end
|
39
37
|
end
|
data/lib/spider-gazelle.rb
CHANGED
@@ -2,7 +2,6 @@ require "http-parser" # C based, fast, http parser
|
|
2
2
|
require "libuv" # Ruby Libuv FFI wrapper
|
3
3
|
require "rack" # Ruby webserver abstraction
|
4
4
|
|
5
|
-
require "spider-gazelle/version"
|
6
5
|
require "spider-gazelle/request" # Holds request information and handles request processing
|
7
6
|
require "spider-gazelle/connection" # Holds connection information and handles request pipelining
|
8
7
|
require "spider-gazelle/gazelle" # Processes data received from connections
|
@@ -13,10 +12,9 @@ require "spider-gazelle/spider" # Accepts connections and offloads them
|
|
13
12
|
|
14
13
|
require "spider-gazelle/upgrades/websocket" # Websocket implementation
|
15
14
|
|
16
|
-
|
17
15
|
module SpiderGazelle
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
16
|
+
# Delegate pipe used for passing sockets to the gazelles
|
17
|
+
DELEGATE_PIPE = "/tmp/spider-gazelle.delegate"
|
18
|
+
# Signal pipe used to pass control signals
|
19
|
+
SIGNAL_PIPE = "/tmp/spider-gazelle.signal"
|
22
20
|
end
|
@@ -1,69 +1,64 @@
|
|
1
1
|
require 'thread'
|
2
2
|
require 'radix/base'
|
3
3
|
|
4
|
-
|
5
4
|
module SpiderGazelle
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
5
|
+
module AppStore
|
6
|
+
# Basic compression using UTF (more efficient for ID's stored as strings)
|
7
|
+
B65 = ::Radix::Base.new(::Radix::BASE::B62 + ['-', '_', '~'])
|
8
|
+
B10 = ::Radix::Base.new(10)
|
10
9
|
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
10
|
+
@mutex = Mutex.new
|
11
|
+
@apps = ThreadSafe::Cache.new
|
12
|
+
@loaded = ThreadSafe::Cache.new
|
13
|
+
@count = 0
|
15
14
|
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
15
|
+
# Load an app and assign it an ID
|
16
|
+
def self.load(app, options={})
|
17
|
+
is_rack_app = !app.is_a?(String)
|
18
|
+
app_key = is_rack_app ? app.class.name.to_sym : app.to_sym
|
19
|
+
id = @loaded[app_key]
|
21
20
|
|
22
|
-
|
23
|
-
|
21
|
+
if id.nil?
|
22
|
+
app, options = ::Rack::Builder.parse_file(app) unless is_rack_app
|
24
23
|
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
@loaded[app_key] = id
|
32
|
-
end
|
24
|
+
count = 0
|
25
|
+
@mutex.synchronize { count = @count += 1 }
|
26
|
+
id = Radix.convert(count, B10, B65).to_sym
|
27
|
+
@apps[id] = app
|
28
|
+
@loaded[app_key] = id
|
29
|
+
end
|
33
30
|
|
34
|
-
|
35
|
-
|
31
|
+
id
|
32
|
+
end
|
36
33
|
|
37
|
-
|
38
|
-
|
39
|
-
|
34
|
+
# Manually load an app
|
35
|
+
def self.add(app)
|
36
|
+
id = @loaded[app.__id__]
|
40
37
|
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
@loaded[app.__id__] = id
|
49
|
-
end
|
38
|
+
if id.nil?
|
39
|
+
count = 0
|
40
|
+
@mutex.synchronize { count = @count += 1 }
|
41
|
+
id = Radix.convert(count, B10, B65).to_sym
|
42
|
+
@apps[id] = app
|
43
|
+
@loaded[app.__id__] = id
|
44
|
+
end
|
50
45
|
|
51
|
-
|
52
|
-
|
46
|
+
id
|
47
|
+
end
|
53
48
|
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
49
|
+
# Lookup an application
|
50
|
+
def self.lookup(app)
|
51
|
+
if app.is_a?(String) || app.is_a?(Symbol)
|
52
|
+
@apps[@loaded[app.to_sym]]
|
53
|
+
else
|
54
|
+
@apps[@loaded[app.__id__]]
|
55
|
+
end
|
56
|
+
end
|
62
57
|
|
63
|
-
|
64
|
-
|
65
|
-
|
66
|
-
|
67
|
-
end
|
58
|
+
# Get an app using the id directly
|
59
|
+
def self.get(id)
|
60
|
+
id = id.to_sym if id.is_a?(String)
|
61
|
+
@apps[id]
|
68
62
|
end
|
63
|
+
end
|
69
64
|
end
|
@@ -1,65 +1,59 @@
|
|
1
|
-
require '
|
1
|
+
require 'spider-gazelle/const'
|
2
2
|
require 'set'
|
3
|
-
|
3
|
+
require 'thread'
|
4
4
|
|
5
5
|
module SpiderGazelle
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
end
|
25
|
-
|
26
|
-
# Bind the application to the selected port
|
27
|
-
def bind
|
28
|
-
# Bind the socket
|
29
|
-
@tcp = @loop.tcp
|
30
|
-
@tcp.bind(@options[:Host], @port, @new_connection)
|
31
|
-
@tcp.listen(@options[:backlog])
|
32
|
-
|
33
|
-
# Delegate errors
|
34
|
-
@tcp.catch do |e|
|
35
|
-
@loop.log :error, 'application bind failed', e
|
36
|
-
end
|
37
|
-
@tcp
|
38
|
-
end
|
6
|
+
class Binding
|
7
|
+
include Const
|
8
|
+
|
9
|
+
attr_reader :app_id
|
10
|
+
|
11
|
+
def initialize(loop, delegate, app_id, options = {})
|
12
|
+
@app_id = app_id
|
13
|
+
@options = options
|
14
|
+
@loop = loop
|
15
|
+
@delegate = delegate
|
16
|
+
@tls = @options[:tls] || false
|
17
|
+
@port = @options[:Port] || (@tls ? PORT_443 : PORT_80)
|
18
|
+
@optimize = @options[:optimize_for_latency] || true
|
19
|
+
|
20
|
+
# Connection management functions
|
21
|
+
@new_connection = method :new_connection
|
22
|
+
@accept_connection = method :accept_connection
|
23
|
+
end
|
39
24
|
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
25
|
+
# Bind the application to the selected port
|
26
|
+
def bind
|
27
|
+
# Bind the socket
|
28
|
+
@tcp = @loop.tcp
|
29
|
+
@tcp.bind @options[:Host], @port, @new_connection
|
30
|
+
@tcp.listen @options[:backlog]
|
46
31
|
|
32
|
+
# Delegate errors
|
33
|
+
@tcp.catch { |e| @loop.log(:error, 'application bind failed', e) }
|
34
|
+
@tcp
|
35
|
+
end
|
47
36
|
|
48
|
-
|
37
|
+
# Close the bindings
|
38
|
+
def unbind
|
39
|
+
# close unless we've never been bound
|
40
|
+
@tcp.close unless @tcp.nil?
|
41
|
+
@tcp
|
42
|
+
end
|
49
43
|
|
44
|
+
protected
|
50
45
|
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
end
|
46
|
+
# There is a new connection pending. We accept it
|
47
|
+
def new_connection(server)
|
48
|
+
server.accept @accept_connection
|
49
|
+
end
|
56
50
|
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
end
|
51
|
+
# Once the connection is accepted we disable Nagles Algorithm
|
52
|
+
# This improves performance as we are using vectored or scatter/gather IO
|
53
|
+
# Then the spider delegates to the gazelle loops
|
54
|
+
def accept_connection(client)
|
55
|
+
client.enable_nodelay if @optimize == true
|
56
|
+
@delegate.call client, @tls, @port, @app_id
|
64
57
|
end
|
58
|
+
end
|
65
59
|
end
|
@@ -1,366 +1,330 @@
|
|
1
|
+
require 'spider-gazelle/const'
|
1
2
|
require 'stringio'
|
2
3
|
|
3
|
-
|
4
4
|
module SpiderGazelle
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
# Used to chain promises (ensures requests are processed in order)
|
53
|
-
@process_next = method(:process_next)
|
54
|
-
@write_chunk = method(:write_chunk)
|
55
|
-
@current_worker = queue # keep track of work queue head to prevent unintentional GC
|
56
|
-
@queue_worker = queue # start queue with an existing resolved promise (::Libuv::Q::ResolvedPromise.new(@loop, true))
|
57
|
-
|
58
|
-
# Socket for writing the response
|
59
|
-
@socket = socket
|
60
|
-
@app = app
|
61
|
-
@port = port
|
62
|
-
@tls = @socket.tls?
|
63
|
-
@loop = loop
|
64
|
-
@gazelle = gazelle
|
65
|
-
@async_callback = method(:deferred_callback)
|
66
|
-
|
67
|
-
# Remove connection if the socket closes
|
68
|
-
socket.finally &method(:unlink)
|
69
|
-
end
|
70
|
-
|
71
|
-
# Lazy eval the IP
|
72
|
-
def remote_ip
|
73
|
-
@remote_ip ||= @socket.peername[0]
|
74
|
-
end
|
75
|
-
|
76
|
-
# Creates a new request state object
|
77
|
-
def start_parsing
|
78
|
-
@parsing = Request.new(self, @app)
|
79
|
-
end
|
5
|
+
class Connection
|
6
|
+
include Const
|
7
|
+
|
8
|
+
Hijack = Struct.new :socket, :env
|
9
|
+
|
10
|
+
def self.on_progress(data, socket); end
|
11
|
+
DUMMY_PROGRESS = self.method :on_progress
|
12
|
+
|
13
|
+
# For Gazelle
|
14
|
+
attr_reader :state, :parsing
|
15
|
+
# For Request
|
16
|
+
attr_reader :tls, :port, :loop, :socket, :async_callback
|
17
|
+
|
18
|
+
def initialize(gazelle, loop, socket, port, state, app, queue)
|
19
|
+
# A single parser instance per-connection (supports pipelining)
|
20
|
+
@state = state
|
21
|
+
@pending = []
|
22
|
+
|
23
|
+
# Work callback for thread pool processing
|
24
|
+
@request = nil
|
25
|
+
@work = method :work
|
26
|
+
|
27
|
+
# Called after the work on the thread pool is complete
|
28
|
+
@send_response = method :send_response
|
29
|
+
@send_error = method :send_error
|
30
|
+
|
31
|
+
# Used to chain promises (ensures requests are processed in order)
|
32
|
+
@process_next = method :process_next
|
33
|
+
@write_chunk = method :write_chunk
|
34
|
+
# Keep track of work queue head to prevent unintentional GC
|
35
|
+
@current_worker = queue
|
36
|
+
# Start queue with an existing resolved promise (::Libuv::Q::ResolvedPromise.new(@loop, true))
|
37
|
+
@queue_worker = queue
|
38
|
+
|
39
|
+
# Socket for writing the response
|
40
|
+
@socket = socket
|
41
|
+
@app = app
|
42
|
+
@port = port
|
43
|
+
@tls = @socket.tls?
|
44
|
+
@loop = loop
|
45
|
+
@gazelle = gazelle
|
46
|
+
@async_callback = method :deferred_callback
|
47
|
+
|
48
|
+
# Remove connection if the socket closes
|
49
|
+
socket.finally &method(:unlink)
|
50
|
+
end
|
80
51
|
|
81
|
-
|
82
|
-
|
83
|
-
|
84
|
-
|
85
|
-
@socket.stop_read # we don't want to do any more work than we need to
|
86
|
-
end
|
87
|
-
@parsing.upgrade = @state.upgrade?
|
88
|
-
@pending.push @parsing
|
89
|
-
@queue_worker = @queue_worker.then @process_next
|
90
|
-
end
|
52
|
+
# Lazy eval the IP
|
53
|
+
def remote_ip
|
54
|
+
@remote_ip ||= @socket.peername[0]
|
55
|
+
end
|
91
56
|
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
# We no longer care for any further requests from this client
|
98
|
-
# however we will finish processing any valid pipelined requests before shutting down
|
99
|
-
@socket.stop_read
|
100
|
-
@queue_worker = @queue_worker.then do
|
101
|
-
# TODO:: send response (400 bad request)
|
102
|
-
@socket.write HTTP_11_400
|
103
|
-
@socket.shutdown
|
104
|
-
end
|
105
|
-
end
|
57
|
+
# Creates a new request state object
|
58
|
+
def start_parsing
|
59
|
+
@parsing = Request.new self, @app
|
60
|
+
end
|
106
61
|
|
107
|
-
|
108
|
-
|
109
|
-
|
110
|
-
|
62
|
+
# Chains the work in a promise queue
|
63
|
+
def finished_parsing
|
64
|
+
if !@state.keep_alive?
|
65
|
+
@parsing.keep_alive = false
|
66
|
+
# We don't want to do any more work than we need to
|
67
|
+
@socket.stop_read
|
68
|
+
end
|
69
|
+
|
70
|
+
@parsing.upgrade = @state.upgrade?
|
71
|
+
@pending.push @parsing
|
72
|
+
@queue_worker = @queue_worker.then @process_next
|
73
|
+
end
|
111
74
|
|
75
|
+
# The parser encountered an error
|
76
|
+
def parsing_error
|
77
|
+
# Grab the error
|
78
|
+
send_error @state.error
|
79
|
+
|
80
|
+
# We no longer care for any further requests from this client
|
81
|
+
# however we will finish processing any valid pipelined requests before shutting down
|
82
|
+
@socket.stop_read
|
83
|
+
@queue_worker = @queue_worker.then do
|
84
|
+
@socket.write ERROR_400_RESPONSE
|
85
|
+
@socket.shutdown
|
86
|
+
end
|
87
|
+
end
|
112
88
|
|
113
|
-
|
89
|
+
# Schedule send
|
90
|
+
def response(data)
|
91
|
+
@loop.schedule
|
92
|
+
end
|
114
93
|
|
94
|
+
protected
|
115
95
|
|
116
|
-
|
117
|
-
|
118
|
-
# --------------
|
96
|
+
##
|
97
|
+
# State handlers
|
119
98
|
|
99
|
+
# Called when an error occurs at any point while responding
|
100
|
+
def send_error(reason)
|
101
|
+
# Close the socket as this is fatal (file read error, gazelle error etc)
|
102
|
+
@socket.close
|
120
103
|
|
121
|
-
|
122
|
-
|
123
|
-
|
124
|
-
|
104
|
+
# Log the error in a worker thread
|
105
|
+
@loop.work do
|
106
|
+
msg = "connection error: #{reason.message}\n#{reason.backtrace.join("\n") if reason.backtrace}\n"
|
107
|
+
@gazelle.logger.error msg
|
108
|
+
end
|
109
|
+
end
|
125
110
|
|
126
|
-
|
127
|
-
|
128
|
-
|
129
|
-
|
130
|
-
|
131
|
-
|
132
|
-
|
111
|
+
# We use promise chaining to move the requests forward
|
112
|
+
# This provides an elegant way to handle persistent and pipelined connections
|
113
|
+
def process_next(result)
|
114
|
+
@request = @pending.shift
|
115
|
+
@current_worker = @loop.work @work
|
116
|
+
# Resolves the promise with a promise
|
117
|
+
@current_worker.then @send_response, @send_error
|
118
|
+
end
|
133
119
|
|
134
|
-
|
135
|
-
|
136
|
-
|
137
|
-
|
138
|
-
|
139
|
-
@current_worker.then @send_response, @send_error # resolves the promise with a promise
|
140
|
-
end
|
120
|
+
# Returns the response as the result of the work
|
121
|
+
# We support the unofficial rack async api (multi-call version for chunked responses)
|
122
|
+
def work
|
123
|
+
@request.execute!
|
124
|
+
end
|
141
125
|
|
142
|
-
|
143
|
-
|
144
|
-
|
145
|
-
|
146
|
-
|
126
|
+
# Unlinks the connection from the rack app
|
127
|
+
# This occurs when requested and when the socket closes
|
128
|
+
def unlink
|
129
|
+
if not @gazelle.nil?
|
130
|
+
# Unlink the progress callback (prevent funny business)
|
131
|
+
@socket.progress &DUMMY_PROGRESS
|
132
|
+
@gazelle.discard self
|
133
|
+
@gazelle = nil
|
134
|
+
@state = nil
|
135
|
+
end
|
136
|
+
end
|
147
137
|
|
148
|
-
|
149
|
-
|
150
|
-
|
151
|
-
|
152
|
-
|
153
|
-
|
154
|
-
|
155
|
-
|
138
|
+
##
|
139
|
+
# Core response handlers
|
140
|
+
|
141
|
+
def send_response(result)
|
142
|
+
# As we have come back from another thread the socket may have closed
|
143
|
+
# This check is an optimisation, the call to write and shutdown would fail safely
|
144
|
+
|
145
|
+
if @request.hijacked
|
146
|
+
# Unlink the management of the socket
|
147
|
+
unlink
|
148
|
+
|
149
|
+
# Pass the hijack response to the captor using the promise. This forwards the socket and
|
150
|
+
# environment as well as moving continued execution onto the event loop.
|
151
|
+
@request.hijacked.resolve Hijack.new(@socket, @request.env)
|
152
|
+
elsif !@socket.closed
|
153
|
+
if @request.deferred
|
154
|
+
# Wait for the response using this promise
|
155
|
+
promise = @request.deferred.promise
|
156
|
+
|
157
|
+
# Process any responses that might have made it here first
|
158
|
+
if @deferred_responses
|
159
|
+
@deferred_responses.each &method(:respond_with)
|
160
|
+
@deferred_responses = nil
|
161
|
+
end
|
162
|
+
|
163
|
+
return promise
|
164
|
+
# NOTE:: Somehow getting to here with a nil request... needs investigation
|
165
|
+
elsif not result.nil?
|
166
|
+
# clear any cached responses just in case
|
167
|
+
# could be set by error in the rack application
|
168
|
+
@deferred_responses = nil if @deferred_responses
|
169
|
+
|
170
|
+
status, headers, body = result
|
171
|
+
|
172
|
+
send_body = @request.env[REQUEST_METHOD] != HEAD
|
173
|
+
|
174
|
+
# If a file, stream the body in a non-blocking fashion
|
175
|
+
if body.respond_to? :to_path
|
176
|
+
if headers[CONTENT_LENGTH2]
|
177
|
+
type = :raw
|
178
|
+
else
|
179
|
+
type = :http
|
180
|
+
headers[TRANSFER_ENCODING] = CHUNKED
|
156
181
|
end
|
157
|
-
end
|
158
182
|
|
183
|
+
write_headers status, headers
|
159
184
|
|
160
|
-
|
161
|
-
|
162
|
-
|
163
|
-
|
164
|
-
|
165
|
-
|
166
|
-
|
167
|
-
# This check is an optimisation, the call to write and shutdown would fail safely
|
168
|
-
|
169
|
-
if @request.hijacked
|
170
|
-
unlink # unlink the management of the socket
|
171
|
-
|
172
|
-
# Pass the hijack response to the captor using the promise
|
173
|
-
# This forwards the socket and environment as well as moving
|
174
|
-
# continued execution onto the event loop.
|
175
|
-
@request.hijacked.resolve(Hijack.new(@socket, @request.env))
|
176
|
-
|
177
|
-
elsif !@socket.closed
|
178
|
-
if @request.deferred
|
179
|
-
# Wait for the response using this promise
|
180
|
-
promise = @request.deferred.promise
|
181
|
-
|
182
|
-
# Process any responses that might have made it here first
|
183
|
-
if @deferred_responses
|
184
|
-
@deferred_responses.each &method(:respond_with)
|
185
|
-
@deferred_responses = nil
|
186
|
-
end
|
187
|
-
|
188
|
-
return promise
|
189
|
-
|
190
|
-
# NOTE:: Somehow getting to here with a nil request... needs investigation
|
191
|
-
elsif not result.nil?
|
192
|
-
# clear any cached responses just in case
|
193
|
-
# could be set by error in the rack application
|
194
|
-
@deferred_responses = nil if @deferred_responses
|
195
|
-
|
196
|
-
status, headers, body = result
|
197
|
-
|
198
|
-
send_body = @request.env[REQUEST_METHOD] != HEAD
|
199
|
-
|
200
|
-
# If a file, stream the body in a non-blocking fashion
|
201
|
-
if body.respond_to? :to_path
|
202
|
-
if headers[CONTENT_LENGTH]
|
203
|
-
type = :raw
|
204
|
-
else
|
205
|
-
type = :http
|
206
|
-
headers[TRANSFER_ENCODING] = CHUNKED
|
207
|
-
end
|
208
|
-
|
209
|
-
write_headers(status, headers)
|
210
|
-
|
211
|
-
if send_body
|
212
|
-
file = @loop.file(body.to_path, File::RDONLY)
|
213
|
-
file.progress do # File is open and available for reading
|
214
|
-
file.send_file(@socket, type).finally do
|
215
|
-
file.close
|
216
|
-
if @request.keep_alive == false
|
217
|
-
@socket.shutdown
|
218
|
-
end
|
219
|
-
end
|
220
|
-
end
|
221
|
-
return file
|
222
|
-
end
|
223
|
-
else
|
224
|
-
# Optimize the response
|
225
|
-
begin
|
226
|
-
body_size = body.size
|
227
|
-
if body_size < 2
|
228
|
-
if body_size == 1
|
229
|
-
headers[CONTENT_LENGTH] = body[0].bytesize
|
230
|
-
else
|
231
|
-
headers[CONTENT_LENGTH] = ZERO
|
232
|
-
end
|
233
|
-
end
|
234
|
-
rescue # just in case
|
235
|
-
end
|
236
|
-
|
237
|
-
if send_body
|
238
|
-
write_response(status, headers, body)
|
239
|
-
else
|
240
|
-
write_headers(status, headers)
|
241
|
-
@socket.shutdown if @request.keep_alive == false
|
242
|
-
end
|
243
|
-
end
|
185
|
+
if send_body
|
186
|
+
file = @loop.file body.to_path, File::RDONLY
|
187
|
+
file.progress do
|
188
|
+
# File is open and available for reading
|
189
|
+
file.send_file(@socket, type).finally do
|
190
|
+
file.close
|
191
|
+
@socket.shutdown if @request.keep_alive == false
|
244
192
|
end
|
193
|
+
end
|
194
|
+
return file
|
195
|
+
end
|
196
|
+
else
|
197
|
+
# Optimize the response
|
198
|
+
begin
|
199
|
+
if body.size < 2
|
200
|
+
headers[CONTENT_LENGTH2] = body.size == 1 ? body[0].bytesize : ZERO
|
201
|
+
end
|
202
|
+
rescue # just in case
|
245
203
|
end
|
246
204
|
|
247
|
-
|
248
|
-
|
249
|
-
|
205
|
+
if send_body
|
206
|
+
write_response status, headers, body
|
207
|
+
else
|
208
|
+
write_headers status, headers
|
209
|
+
@socket.shutdown if @request.keep_alive == false
|
210
|
+
end
|
211
|
+
end
|
250
212
|
end
|
213
|
+
end
|
251
214
|
|
252
|
-
|
253
|
-
|
254
|
-
|
255
|
-
|
256
|
-
|
257
|
-
# Stream the response (pass directly into @socket.write)
|
258
|
-
body.each &@socket.method(:write)
|
259
|
-
|
260
|
-
if @request.deferred
|
261
|
-
@request.deferred.resolve(true)
|
262
|
-
@request.deferred = nil # prevent data being sent after completed
|
263
|
-
end
|
215
|
+
# continue processing (don't wait for write to complete)
|
216
|
+
# if the write fails it will close the socket
|
217
|
+
nil
|
218
|
+
end
|
264
219
|
|
265
|
-
|
266
|
-
|
267
|
-
|
268
|
-
|
220
|
+
def write_response(status, headers, body)
|
221
|
+
if headers[CONTENT_LENGTH2]
|
222
|
+
headers[CONTENT_LENGTH2] = headers[CONTENT_LENGTH2].to_s
|
223
|
+
write_headers status, headers
|
269
224
|
|
270
|
-
|
271
|
-
|
225
|
+
# Stream the response (pass directly into @socket.write)
|
226
|
+
body.each &@socket.method(:write)
|
272
227
|
|
273
|
-
|
274
|
-
|
275
|
-
|
276
|
-
|
277
|
-
@async_state = :chunked
|
278
|
-
end
|
279
|
-
end
|
228
|
+
if @request.deferred
|
229
|
+
@request.deferred.resolve true
|
230
|
+
# Prevent data being sent after completed
|
231
|
+
@request.deferred = nil
|
280
232
|
end
|
281
233
|
|
282
|
-
|
283
|
-
|
234
|
+
@socket.shutdown if @request.keep_alive == false
|
235
|
+
else
|
236
|
+
headers[TRANSFER_ENCODING] = CHUNKED
|
237
|
+
write_headers status, headers
|
284
238
|
|
285
|
-
|
286
|
-
|
287
|
-
next if key.start_with? RACK
|
239
|
+
# Stream the response
|
240
|
+
body.each &@write_chunk
|
288
241
|
|
289
|
-
|
290
|
-
|
291
|
-
|
292
|
-
|
293
|
-
|
294
|
-
end
|
295
|
-
end
|
296
|
-
header << CRLF
|
297
|
-
@socket.write header
|
242
|
+
if @request.deferred.nil?
|
243
|
+
@socket.write CLOSE_CHUNKED
|
244
|
+
@socket.shutdown if @request.keep_alive == false
|
245
|
+
else
|
246
|
+
@async_state = :chunked
|
298
247
|
end
|
248
|
+
end
|
249
|
+
end
|
299
250
|
|
300
|
-
|
301
|
-
|
302
|
-
@socket.write chunk
|
303
|
-
end
|
251
|
+
def write_headers(status, headers)
|
252
|
+
headers[CONNECTION] = CLOSE if @request.keep_alive == false
|
304
253
|
|
305
|
-
|
306
|
-
|
254
|
+
header = "HTTP/1.1 #{status} #{fetch_code(status)}\r\n"
|
255
|
+
headers.each do |key, value|
|
256
|
+
next if key.start_with? RACK
|
257
|
+
|
258
|
+
value.split(NEWLINE).each do |unique_value|
|
259
|
+
header << key
|
260
|
+
header << COLON
|
261
|
+
header << unique_value
|
262
|
+
header << LINE_END
|
307
263
|
end
|
264
|
+
end
|
265
|
+
header << LINE_END
|
266
|
+
@socket.write header
|
267
|
+
end
|
308
268
|
|
269
|
+
def write_chunk(part)
|
270
|
+
chunk = part.bytesize.to_s(HEX_SIZE_CHUNKED_RESPONSE) << LINE_END << part << LINE_END
|
271
|
+
@socket.write chunk
|
272
|
+
end
|
309
273
|
|
310
|
-
|
311
|
-
|
312
|
-
|
274
|
+
def fetch_code(status)
|
275
|
+
HTTP_STATUS_CODES.fetch(status, &HTTP_STATUS_DEFAULT)
|
276
|
+
end
|
313
277
|
|
278
|
+
##
|
279
|
+
# Async response functions
|
314
280
|
|
315
|
-
|
316
|
-
|
317
|
-
|
318
|
-
|
319
|
-
|
320
|
-
|
321
|
-
|
322
|
-
end
|
323
|
-
end
|
281
|
+
# Callback from a response that was marked async
|
282
|
+
def deferred_callback(data)
|
283
|
+
# We call close here, like on a regular response
|
284
|
+
body = data[2]
|
285
|
+
body.close if body.respond_to?(:close)
|
286
|
+
@loop.next_tick { callback(data) }
|
287
|
+
end
|
324
288
|
|
325
|
-
|
326
|
-
|
327
|
-
|
328
|
-
|
329
|
-
|
330
|
-
|
331
|
-
|
332
|
-
|
333
|
-
@deferred_responses << data
|
334
|
-
end
|
335
|
-
rescue Exception => e
|
336
|
-
# This provides the same level of protection that
|
337
|
-
# the regular responses provide
|
338
|
-
send_error(e)
|
339
|
-
end
|
289
|
+
# Process a response that was marked as async. Save the data if the request hasn't responded yet
|
290
|
+
def callback(data)
|
291
|
+
begin
|
292
|
+
if @request.deferred && @deferred_responses.nil?
|
293
|
+
respond_with data
|
294
|
+
else
|
295
|
+
@deferred_responses ||= []
|
296
|
+
@deferred_responses << data
|
340
297
|
end
|
298
|
+
rescue Exception => e
|
299
|
+
# This provides the same level of protection that the regular responses provide
|
300
|
+
send_error e
|
301
|
+
end
|
302
|
+
end
|
341
303
|
|
342
|
-
|
343
|
-
|
344
|
-
|
345
|
-
|
346
|
-
|
347
|
-
|
348
|
-
|
349
|
-
|
350
|
-
|
351
|
-
|
352
|
-
|
353
|
-
|
354
|
-
|
355
|
-
|
356
|
-
|
357
|
-
|
358
|
-
|
359
|
-
|
360
|
-
|
361
|
-
|
362
|
-
|
363
|
-
|
364
|
-
|
304
|
+
# Process the async request in the same way as Mizuno
|
305
|
+
# See: http://polycrystal.org/2012/04/15/asynchronous_responses_in_rack.html
|
306
|
+
def respond_with(data)
|
307
|
+
status, headers, body = data
|
308
|
+
|
309
|
+
if @async_state.nil?
|
310
|
+
# Respond with the headers here
|
311
|
+
write_response status, headers, body
|
312
|
+
elsif body.empty?
|
313
|
+
@socket.write CLOSE_CHUNK
|
314
|
+
@socket.shutdown if @request.keep_alive == false
|
315
|
+
|
316
|
+
# Complete the request here
|
317
|
+
deferred = @request.deferred
|
318
|
+
# Prevent data being sent after completed
|
319
|
+
@request.deferred = nil
|
320
|
+
@async_state = nil
|
321
|
+
deferred.resolve true
|
322
|
+
else
|
323
|
+
# Send the chunks provided
|
324
|
+
body.each &@write_chunk
|
325
|
+
end
|
326
|
+
|
327
|
+
nil
|
365
328
|
end
|
329
|
+
end
|
366
330
|
end
|