rainbows 0.97.0 → 1.0.0pre1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.manifest +14 -2
- data/ChangeLog +87 -118
- data/GIT-VERSION-FILE +1 -1
- data/GIT-VERSION-GEN +1 -1
- data/GNUmakefile +1 -1
- data/README +1 -1
- data/bin/rainbows +15 -20
- data/lib/rainbows/actor_spawn.rb +20 -22
- data/lib/rainbows/app_pool.rb +89 -93
- data/lib/rainbows/base.rb +4 -61
- data/lib/rainbows/client.rb +9 -0
- data/lib/rainbows/configurator.rb +37 -39
- data/lib/rainbows/const.rb +18 -18
- data/lib/rainbows/dev_fd_response.rb +2 -1
- data/lib/rainbows/error.rb +39 -37
- data/lib/rainbows/ev_core.rb +103 -109
- data/lib/rainbows/event_machine.rb +188 -196
- data/lib/rainbows/fiber/base.rb +69 -88
- data/lib/rainbows/fiber/io/compat.rb +13 -0
- data/lib/rainbows/fiber/io/methods.rb +49 -0
- data/lib/rainbows/fiber/io/pipe.rb +7 -0
- data/lib/rainbows/fiber/io/socket.rb +7 -0
- data/lib/rainbows/fiber/io.rb +125 -84
- data/lib/rainbows/fiber/rev/heartbeat.rb +8 -0
- data/lib/rainbows/fiber/rev/kato.rb +22 -0
- data/lib/rainbows/fiber/rev/methods.rb +55 -0
- data/lib/rainbows/fiber/rev/server.rb +32 -0
- data/lib/rainbows/fiber/rev/sleeper.rb +15 -0
- data/lib/rainbows/fiber/rev.rb +6 -164
- data/lib/rainbows/fiber.rb +23 -5
- data/lib/rainbows/fiber_pool.rb +31 -37
- data/lib/rainbows/fiber_spawn.rb +21 -28
- data/lib/rainbows/http_server.rb +80 -80
- data/lib/rainbows/max_body.rb +26 -28
- data/lib/rainbows/process_client.rb +61 -0
- data/lib/rainbows/queue_pool.rb +19 -22
- data/lib/rainbows/read_timeout.rb +28 -0
- data/lib/rainbows/rev/client.rb +10 -10
- data/lib/rainbows/rev/core.rb +2 -3
- data/lib/rainbows/rev/thread.rb +1 -1
- data/lib/rainbows/rev_fiber_spawn.rb +21 -24
- data/lib/rainbows/revactor.rb +18 -15
- data/lib/rainbows/thread_pool.rb +2 -4
- data/lib/rainbows/thread_spawn.rb +1 -2
- data/lib/rainbows/writer_thread_pool.rb +14 -4
- data/lib/rainbows/writer_thread_spawn.rb +14 -4
- data/lib/rainbows.rb +7 -15
- data/local.mk.sample +3 -11
- data/rainbows.gemspec +2 -4
- data/t/kgio-pipe-response.ru +10 -0
- data/t/t0035-kgio-pipe-response.sh +70 -0
- data/t/test_isolate.rb +2 -1
- metadata +46 -30
- data/lib/rainbows/acceptor.rb +0 -26
- data/lib/rainbows/byte_slice.rb +0 -17
data/lib/rainbows/ev_core.rb
CHANGED
@@ -1,131 +1,125 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
# :enddoc:
|
3
|
-
module
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
@state = :headers # [ :body [ :trailers ] ] :app_call :close
|
23
|
-
@buf = ""
|
24
|
-
end
|
3
|
+
# base module for evented models like Rev and EventMachine
|
4
|
+
module Rainbows::EvCore
|
5
|
+
include Rainbows::Const
|
6
|
+
include Rainbows::Response
|
7
|
+
G = Rainbows::G
|
8
|
+
NULL_IO = Unicorn::HttpRequest::NULL_IO
|
9
|
+
HttpParser = Unicorn::HttpParser
|
10
|
+
|
11
|
+
# Apps may return this Rack response: AsyncResponse = [ -1, {}, [] ]
|
12
|
+
ASYNC_CALLBACK = "async.callback".freeze
|
13
|
+
|
14
|
+
ASYNC_CLOSE = "async.close".freeze
|
15
|
+
|
16
|
+
def post_init
|
17
|
+
@env = {}
|
18
|
+
@hp = HttpParser.new
|
19
|
+
@state = :headers # [ :body [ :trailers ] ] :app_call :close
|
20
|
+
@buf = ""
|
21
|
+
end
|
25
22
|
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
23
|
+
# graceful exit, like SIGQUIT
|
24
|
+
def quit
|
25
|
+
@state = :close
|
26
|
+
end
|
30
27
|
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
28
|
+
def handle_error(e)
|
29
|
+
msg = Rainbows::Error.response(e) and write(msg)
|
30
|
+
ensure
|
31
|
+
quit
|
32
|
+
end
|
36
33
|
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
end
|
45
|
-
write(response_header(status, headers))
|
46
|
-
rv
|
34
|
+
# returns whether to enable response chunking for autochunk models
|
35
|
+
def stream_response_headers(status, headers)
|
36
|
+
if headers['Content-Length']
|
37
|
+
rv = false
|
38
|
+
else
|
39
|
+
rv = !!(headers['Transfer-Encoding'] =~ %r{\Achunked\z}i)
|
40
|
+
rv = false if headers.delete('X-Rainbows-Autochunk') == 'no'
|
47
41
|
end
|
42
|
+
write(response_header(status, headers))
|
43
|
+
rv
|
44
|
+
end
|
48
45
|
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
62
|
-
|
63
|
-
|
64
|
-
|
65
|
-
end
|
66
|
-
@input = CapInput.new(len, self)
|
67
|
-
@hp.filter_body(@buf2 = "", @buf)
|
68
|
-
@input << @buf2
|
69
|
-
on_read("")
|
70
|
-
end
|
71
|
-
when :body
|
72
|
-
if @hp.body_eof?
|
73
|
-
@state = :trailers
|
74
|
-
on_read(data)
|
75
|
-
elsif data.size > 0
|
76
|
-
@hp.filter_body(@buf2, @buf << data)
|
77
|
-
@input << @buf2
|
78
|
-
on_read("")
|
79
|
-
end
|
80
|
-
when :trailers
|
81
|
-
if @hp.trailers(@env, @buf << data)
|
82
|
-
@input.rewind
|
83
|
-
app_call
|
46
|
+
# TeeInput doesn't map too well to this right now...
|
47
|
+
def on_read(data)
|
48
|
+
case @state
|
49
|
+
when :headers
|
50
|
+
@hp.headers(@env, @buf << data) or return
|
51
|
+
@state = :body
|
52
|
+
len = @hp.content_length
|
53
|
+
if len == 0
|
54
|
+
@input = NULL_IO
|
55
|
+
app_call # common case
|
56
|
+
else # nil or len > 0
|
57
|
+
# since we don't do streaming input, we have no choice but
|
58
|
+
# to take over 100-continue handling from the Rack application
|
59
|
+
if @env[HTTP_EXPECT] =~ /\A100-continue\z/i
|
60
|
+
write(EXPECT_100_RESPONSE)
|
61
|
+
@env.delete(HTTP_EXPECT)
|
84
62
|
end
|
63
|
+
@input = CapInput.new(len, self)
|
64
|
+
@hp.filter_body(@buf2 = "", @buf)
|
65
|
+
@input << @buf2
|
66
|
+
on_read("")
|
67
|
+
end
|
68
|
+
when :body
|
69
|
+
if @hp.body_eof?
|
70
|
+
@state = :trailers
|
71
|
+
on_read(data)
|
72
|
+
elsif data.size > 0
|
73
|
+
@hp.filter_body(@buf2, @buf << data)
|
74
|
+
@input << @buf2
|
75
|
+
on_read("")
|
76
|
+
end
|
77
|
+
when :trailers
|
78
|
+
if @hp.trailers(@env, @buf << data)
|
79
|
+
@input.rewind
|
80
|
+
app_call
|
85
81
|
end
|
86
|
-
rescue => e
|
87
|
-
handle_error(e)
|
88
82
|
end
|
83
|
+
rescue => e
|
84
|
+
handle_error(e)
|
85
|
+
end
|
89
86
|
|
90
|
-
|
91
|
-
|
92
|
-
|
87
|
+
class CapInput < Struct.new(:io, :client, :bytes_left)
|
88
|
+
MAX_BODY = Unicorn::Const::MAX_BODY
|
89
|
+
TmpIO = Unicorn::TmpIO
|
93
90
|
|
94
|
-
|
95
|
-
|
96
|
-
|
91
|
+
def self.err(client, msg)
|
92
|
+
client.write(Rainbows::Const::ERROR_413_RESPONSE)
|
93
|
+
client.quit
|
97
94
|
|
98
|
-
|
99
|
-
|
100
|
-
|
95
|
+
# zip back up the stack
|
96
|
+
raise IOError, msg, []
|
97
|
+
end
|
101
98
|
|
102
|
-
|
103
|
-
|
104
|
-
|
105
|
-
|
106
|
-
|
107
|
-
end
|
108
|
-
len <= MAX_BODY ? StringIO.new("") : Util.tmpio
|
109
|
-
else
|
110
|
-
max ? super(Util.tmpio, client, max) : Util.tmpio
|
99
|
+
def self.new(len, client)
|
100
|
+
max = Rainbows.max_bytes
|
101
|
+
if len
|
102
|
+
if max && (len > max)
|
103
|
+
err(client, "Content-Length too big: #{len} > #{max}")
|
111
104
|
end
|
105
|
+
len <= MAX_BODY ? StringIO.new("") : TmpIO.new
|
106
|
+
else
|
107
|
+
max ? super(TmpIO.new, client, max) : TmpIO.new
|
112
108
|
end
|
109
|
+
end
|
113
110
|
|
114
|
-
|
115
|
-
|
116
|
-
|
117
|
-
|
118
|
-
end
|
119
|
-
io << buf
|
111
|
+
def <<(buf)
|
112
|
+
if (self.bytes_left -= buf.size) < 0
|
113
|
+
io.close
|
114
|
+
CapInput.err(client, "chunked request body too big")
|
120
115
|
end
|
121
|
-
|
122
|
-
def gets; io.gets; end
|
123
|
-
def each(&block); io.each(&block); end
|
124
|
-
def size; io.size; end
|
125
|
-
def rewind; io.rewind; end
|
126
|
-
def read(*args); io.read(*args); end
|
127
|
-
|
116
|
+
io << buf
|
128
117
|
end
|
129
118
|
|
119
|
+
def gets; io.gets; end
|
120
|
+
def each(&block); io.each(&block); end
|
121
|
+
def size; io.size; end
|
122
|
+
def rewind; io.rewind; end
|
123
|
+
def read(*args); io.read(*args); end
|
130
124
|
end
|
131
125
|
end
|
@@ -3,224 +3,216 @@ require 'eventmachine'
|
|
3
3
|
EM::VERSION >= '0.12.10' or abort 'eventmachine 0.12.10 is required'
|
4
4
|
require 'rainbows/ev_core'
|
5
5
|
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
|
38
|
-
|
39
|
-
|
40
|
-
|
41
|
-
|
42
|
-
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
6
|
+
# Implements a basic single-threaded event model with
|
7
|
+
# {EventMachine}[http://rubyeventmachine.com/]. It is capable of
|
8
|
+
# handling thousands of simultaneous client connections, but with only
|
9
|
+
# a single-threaded app dispatch. It is suited for slow clients,
|
10
|
+
# and can work with slow applications via asynchronous libraries such as
|
11
|
+
# {async_sinatra}[http://github.com/raggi/async_sinatra],
|
12
|
+
# {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp],
|
13
|
+
# and {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool].
|
14
|
+
#
|
15
|
+
# It does not require your Rack application to be thread-safe,
|
16
|
+
# reentrancy is only required for the DevFdResponse body
|
17
|
+
# generator.
|
18
|
+
#
|
19
|
+
# Compatibility: Whatever \EventMachine ~> 0.12.10 and Unicorn both
|
20
|
+
# support, currently Ruby 1.8/1.9.
|
21
|
+
#
|
22
|
+
# This model is compatible with users of "async.callback" in the Rack
|
23
|
+
# environment such as
|
24
|
+
# {async_sinatra}[http://github.com/raggi/async_sinatra].
|
25
|
+
#
|
26
|
+
# For a complete asynchronous framework,
|
27
|
+
# {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp] is fully
|
28
|
+
# supported when using this concurrency model.
|
29
|
+
#
|
30
|
+
# This model is fully-compatible with
|
31
|
+
# {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool]
|
32
|
+
# which allows each request to run inside its own \Fiber after
|
33
|
+
# all request processing is complete.
|
34
|
+
#
|
35
|
+
# Merb (and other frameworks/apps) supporting +deferred?+ execution as
|
36
|
+
# documented at http://brainspl.at/articles/2008/04/18/deferred-requests-with-merb-ebb-and-thin
|
37
|
+
# will also get the ability to conditionally defer request processing
|
38
|
+
# to a separate thread.
|
39
|
+
#
|
40
|
+
# This model does not implement as streaming "rack.input" which allows
|
41
|
+
# the Rack application to process data as it arrives. This means
|
42
|
+
# "rack.input" will be fully buffered in memory or to a temporary file
|
43
|
+
# before the application is entered.
|
44
|
+
module Rainbows::EventMachine
|
45
|
+
|
46
|
+
include Rainbows::Base
|
47
|
+
autoload :ResponsePipe, 'rainbows/event_machine/response_pipe'
|
48
|
+
autoload :ResponseChunkPipe, 'rainbows/event_machine/response_chunk_pipe'
|
49
|
+
autoload :TryDefer, 'rainbows/event_machine/try_defer'
|
50
|
+
|
51
|
+
class Client < EM::Connection # :nodoc: all
|
52
|
+
attr_writer :body
|
53
|
+
include Rainbows::EvCore
|
54
|
+
|
55
|
+
def initialize(io)
|
56
|
+
@_io = io
|
57
|
+
@body = nil
|
58
|
+
end
|
59
|
+
|
60
|
+
alias write send_data
|
61
|
+
|
62
|
+
def receive_data(data)
|
63
|
+
# To avoid clobbering the current streaming response
|
64
|
+
# (often a static file), we do not attempt to process another
|
65
|
+
# request on the same connection until the first is complete
|
66
|
+
if @body
|
67
|
+
@buf << data
|
68
|
+
@_io.shutdown(Socket::SHUT_RD) if @buf.size > 0x1c000
|
69
|
+
EM.next_tick { receive_data('') }
|
70
|
+
else
|
71
|
+
on_read(data)
|
62
72
|
end
|
73
|
+
end
|
63
74
|
|
64
|
-
|
75
|
+
def quit
|
76
|
+
super
|
77
|
+
close_connection_after_writing
|
78
|
+
end
|
65
79
|
|
66
|
-
|
67
|
-
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
80
|
+
def app_call
|
81
|
+
set_comm_inactivity_timeout 0
|
82
|
+
@env[RACK_INPUT] = @input
|
83
|
+
@env[REMOTE_ADDR] = @_io.kgio_addr
|
84
|
+
@env[ASYNC_CALLBACK] = method(:em_write_response)
|
85
|
+
@env[ASYNC_CLOSE] = EM::DefaultDeferrable.new
|
86
|
+
|
87
|
+
response = catch(:async) { APP.call(@env.update(RACK_DEFAULTS)) }
|
88
|
+
|
89
|
+
# too tricky to support pipelining with :async since the
|
90
|
+
# second (pipelined) request could be a stuck behind a
|
91
|
+
# long-running async response
|
92
|
+
(response.nil? || -1 == response[0]) and return @state = :close
|
93
|
+
|
94
|
+
alive = @hp.keepalive? && G.alive && G.kato > 0
|
95
|
+
em_write_response(response, alive)
|
96
|
+
if alive
|
97
|
+
@env.clear
|
98
|
+
@hp.reset
|
99
|
+
@state = :headers
|
100
|
+
if @buf.empty?
|
101
|
+
set_comm_inactivity_timeout(G.kato)
|
74
102
|
else
|
75
|
-
|
103
|
+
EM.next_tick { receive_data('') }
|
76
104
|
end
|
77
105
|
end
|
106
|
+
end
|
78
107
|
|
79
|
-
|
80
|
-
|
81
|
-
|
108
|
+
def em_write_response(response, alive = false)
|
109
|
+
status, headers, body = response
|
110
|
+
if @hp.headers?
|
111
|
+
headers = HH.new(headers)
|
112
|
+
headers[CONNECTION] = alive ? KEEP_ALIVE : CLOSE
|
113
|
+
else
|
114
|
+
headers = nil
|
82
115
|
end
|
83
116
|
|
84
|
-
|
85
|
-
|
86
|
-
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
|
97
|
-
|
98
|
-
|
99
|
-
|
100
|
-
if alive
|
101
|
-
@env.clear
|
102
|
-
@hp.reset
|
103
|
-
@state = :headers
|
104
|
-
if @buf.empty?
|
105
|
-
set_comm_inactivity_timeout(G.kato)
|
106
|
-
else
|
107
|
-
EM.next_tick { receive_data('') }
|
117
|
+
if body.respond_to?(:errback) && body.respond_to?(:callback)
|
118
|
+
@body = body
|
119
|
+
body.callback { quit }
|
120
|
+
body.errback { quit }
|
121
|
+
# async response, this could be a trickle as is in comet-style apps
|
122
|
+
headers[CONNECTION] = CLOSE if headers
|
123
|
+
alive = true
|
124
|
+
elsif body.respond_to?(:to_path)
|
125
|
+
st = File.stat(path = body.to_path)
|
126
|
+
|
127
|
+
if st.file?
|
128
|
+
write(response_header(status, headers)) if headers
|
129
|
+
@body = stream_file_data(path)
|
130
|
+
@body.errback do
|
131
|
+
body.close if body.respond_to?(:close)
|
132
|
+
quit
|
108
133
|
end
|
109
|
-
|
110
|
-
|
111
|
-
|
112
|
-
|
113
|
-
status, headers, body = response
|
114
|
-
if @hp.headers?
|
115
|
-
headers = HH.new(headers)
|
116
|
-
headers[CONNECTION] = alive ? KEEP_ALIVE : CLOSE
|
117
|
-
else
|
118
|
-
headers = nil
|
119
|
-
end
|
120
|
-
|
121
|
-
if body.respond_to?(:errback) && body.respond_to?(:callback)
|
122
|
-
@body = body
|
123
|
-
body.callback { quit }
|
124
|
-
body.errback { quit }
|
125
|
-
# async response, this could be a trickle as is in comet-style apps
|
126
|
-
headers[CONNECTION] = CLOSE if headers
|
127
|
-
alive = true
|
128
|
-
elsif body.respond_to?(:to_path)
|
129
|
-
st = File.stat(path = body.to_path)
|
130
|
-
|
131
|
-
if st.file?
|
132
|
-
write(response_header(status, headers)) if headers
|
133
|
-
@body = stream_file_data(path)
|
134
|
-
@body.errback do
|
135
|
-
body.close if body.respond_to?(:close)
|
136
|
-
quit
|
137
|
-
end
|
138
|
-
@body.callback do
|
139
|
-
body.close if body.respond_to?(:close)
|
140
|
-
@body = nil
|
141
|
-
alive ? receive_data('') : quit
|
142
|
-
end
|
143
|
-
return
|
144
|
-
elsif st.socket? || st.pipe?
|
145
|
-
@body = io = body_to_io(body)
|
146
|
-
chunk = stream_response_headers(status, headers) if headers
|
147
|
-
m = chunk ? ResponseChunkPipe : ResponsePipe
|
148
|
-
return EM.watch(io, m, self, alive, body).notify_readable = true
|
134
|
+
@body.callback do
|
135
|
+
body.close if body.respond_to?(:close)
|
136
|
+
@body = nil
|
137
|
+
alive ? receive_data('') : quit
|
149
138
|
end
|
150
|
-
|
139
|
+
return
|
140
|
+
elsif st.socket? || st.pipe?
|
141
|
+
@body = io = body_to_io(body)
|
142
|
+
chunk = stream_response_headers(status, headers) if headers
|
143
|
+
m = chunk ? ResponseChunkPipe : ResponsePipe
|
144
|
+
return EM.watch(io, m, self, alive, body).notify_readable = true
|
151
145
|
end
|
152
|
-
|
153
|
-
write(response_header(status, headers)) if headers
|
154
|
-
write_body_each(self, body)
|
155
|
-
quit unless alive
|
146
|
+
# char or block device... WTF? fall through to body.each
|
156
147
|
end
|
157
148
|
|
158
|
-
|
159
|
-
|
160
|
-
|
161
|
-
begin
|
162
|
-
@_io.close
|
163
|
-
rescue Errno::EBADF
|
164
|
-
# EventMachine's EventableDescriptor::Close() may close
|
165
|
-
# the underlying file descriptor without invalidating the
|
166
|
-
# associated IO object on errors, so @_io.closed? isn't
|
167
|
-
# sufficient.
|
168
|
-
end
|
169
|
-
end
|
149
|
+
write(response_header(status, headers)) if headers
|
150
|
+
write_body_each(self, body)
|
151
|
+
quit unless alive
|
170
152
|
end
|
171
153
|
|
172
|
-
|
173
|
-
|
174
|
-
|
175
|
-
|
176
|
-
|
177
|
-
|
178
|
-
|
179
|
-
|
180
|
-
|
181
|
-
|
182
|
-
io = accept(@io) or return
|
183
|
-
sig = EM.attach_fd(io.fileno, false)
|
184
|
-
CUR[sig] = CL.new(sig, io)
|
154
|
+
def unbind
|
155
|
+
async_close = @env[ASYNC_CLOSE] and async_close.succeed
|
156
|
+
@body.respond_to?(:fail) and @body.fail
|
157
|
+
begin
|
158
|
+
@_io.close
|
159
|
+
rescue Errno::EBADF
|
160
|
+
# EventMachine's EventableDescriptor::Close() may close
|
161
|
+
# the underlying file descriptor without invalidating the
|
162
|
+
# associated IO object on errors, so @_io.closed? isn't
|
163
|
+
# sufficient.
|
185
164
|
end
|
186
165
|
end
|
166
|
+
end
|
187
167
|
|
188
|
-
|
189
|
-
|
190
|
-
|
168
|
+
module Server # :nodoc: all
|
169
|
+
def close
|
170
|
+
detach
|
171
|
+
@io.close
|
191
172
|
end
|
192
173
|
|
193
|
-
|
194
|
-
|
195
|
-
|
196
|
-
|
197
|
-
|
198
|
-
G.server.app.respond_to?(:deferred?) and
|
199
|
-
G.server.app = TryDefer[G.server.app]
|
200
|
-
|
201
|
-
# enable them both, should be non-fatal if not supported
|
202
|
-
EM.epoll
|
203
|
-
EM.kqueue
|
204
|
-
logger.info "#@use: epoll=#{EM.epoll?} kqueue=#{EM.kqueue?}"
|
205
|
-
client_class = Rainbows.const_get(@use).const_get(:Client)
|
206
|
-
Server.const_set(:MAX, worker_connections + LISTENERS.size)
|
207
|
-
Server.const_set(:CL, client_class)
|
208
|
-
client_class.const_set(:APP, G.server.app)
|
209
|
-
EM.run {
|
210
|
-
conns = EM.instance_variable_get(:@conns) or
|
211
|
-
raise RuntimeError, "EM @conns instance variable not accessible!"
|
212
|
-
Server.const_set(:CUR, conns)
|
213
|
-
EM.add_periodic_timer(1) do
|
214
|
-
unless G.tick
|
215
|
-
conns.each_value { |c| client_class === c and c.quit }
|
216
|
-
EM.stop if conns.empty? && EM.reactor_running?
|
217
|
-
end
|
218
|
-
end
|
219
|
-
LISTENERS.map! do |s|
|
220
|
-
EM.watch(s, Server) { |c| c.notify_readable = true }
|
221
|
-
end
|
222
|
-
}
|
174
|
+
def notify_readable
|
175
|
+
return if CUR.size >= MAX
|
176
|
+
io = @io.kgio_tryaccept or return
|
177
|
+
sig = EM.attach_fd(io.fileno, false)
|
178
|
+
CUR[sig] = CL.new(sig, io)
|
223
179
|
end
|
180
|
+
end
|
181
|
+
|
182
|
+
def init_worker_process(worker) # :nodoc:
|
183
|
+
Rainbows::Response.setup(Rainbows::EventMachine::Client)
|
184
|
+
super
|
185
|
+
end
|
224
186
|
|
187
|
+
# runs inside each forked worker, this sits around and waits
|
188
|
+
# for connections and doesn't die until the parent dies (or is
|
189
|
+
# given a INT, QUIT, or TERM signal)
|
190
|
+
def worker_loop(worker) # :nodoc:
|
191
|
+
init_worker_process(worker)
|
192
|
+
G.server.app.respond_to?(:deferred?) and
|
193
|
+
G.server.app = TryDefer[G.server.app]
|
194
|
+
|
195
|
+
# enable them both, should be non-fatal if not supported
|
196
|
+
EM.epoll
|
197
|
+
EM.kqueue
|
198
|
+
logger.info "#@use: epoll=#{EM.epoll?} kqueue=#{EM.kqueue?}"
|
199
|
+
client_class = Rainbows.const_get(@use).const_get(:Client)
|
200
|
+
Server.const_set(:MAX, worker_connections + LISTENERS.size)
|
201
|
+
Server.const_set(:CL, client_class)
|
202
|
+
client_class.const_set(:APP, G.server.app)
|
203
|
+
EM.run {
|
204
|
+
conns = EM.instance_variable_get(:@conns) or
|
205
|
+
raise RuntimeError, "EM @conns instance variable not accessible!"
|
206
|
+
Server.const_set(:CUR, conns)
|
207
|
+
EM.add_periodic_timer(1) do
|
208
|
+
unless G.tick
|
209
|
+
conns.each_value { |c| client_class === c and c.quit }
|
210
|
+
EM.stop if conns.empty? && EM.reactor_running?
|
211
|
+
end
|
212
|
+
end
|
213
|
+
LISTENERS.map! do |s|
|
214
|
+
EM.watch(s, Server) { |c| c.notify_readable = true }
|
215
|
+
end
|
216
|
+
}
|
225
217
|
end
|
226
218
|
end
|