rainbows 0.97.0 → 1.0.0pre1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. data/.manifest +14 -2
  2. data/ChangeLog +87 -118
  3. data/GIT-VERSION-FILE +1 -1
  4. data/GIT-VERSION-GEN +1 -1
  5. data/GNUmakefile +1 -1
  6. data/README +1 -1
  7. data/bin/rainbows +15 -20
  8. data/lib/rainbows/actor_spawn.rb +20 -22
  9. data/lib/rainbows/app_pool.rb +89 -93
  10. data/lib/rainbows/base.rb +4 -61
  11. data/lib/rainbows/client.rb +9 -0
  12. data/lib/rainbows/configurator.rb +37 -39
  13. data/lib/rainbows/const.rb +18 -18
  14. data/lib/rainbows/dev_fd_response.rb +2 -1
  15. data/lib/rainbows/error.rb +39 -37
  16. data/lib/rainbows/ev_core.rb +103 -109
  17. data/lib/rainbows/event_machine.rb +188 -196
  18. data/lib/rainbows/fiber/base.rb +69 -88
  19. data/lib/rainbows/fiber/io/compat.rb +13 -0
  20. data/lib/rainbows/fiber/io/methods.rb +49 -0
  21. data/lib/rainbows/fiber/io/pipe.rb +7 -0
  22. data/lib/rainbows/fiber/io/socket.rb +7 -0
  23. data/lib/rainbows/fiber/io.rb +125 -84
  24. data/lib/rainbows/fiber/rev/heartbeat.rb +8 -0
  25. data/lib/rainbows/fiber/rev/kato.rb +22 -0
  26. data/lib/rainbows/fiber/rev/methods.rb +55 -0
  27. data/lib/rainbows/fiber/rev/server.rb +32 -0
  28. data/lib/rainbows/fiber/rev/sleeper.rb +15 -0
  29. data/lib/rainbows/fiber/rev.rb +6 -164
  30. data/lib/rainbows/fiber.rb +23 -5
  31. data/lib/rainbows/fiber_pool.rb +31 -37
  32. data/lib/rainbows/fiber_spawn.rb +21 -28
  33. data/lib/rainbows/http_server.rb +80 -80
  34. data/lib/rainbows/max_body.rb +26 -28
  35. data/lib/rainbows/process_client.rb +61 -0
  36. data/lib/rainbows/queue_pool.rb +19 -22
  37. data/lib/rainbows/read_timeout.rb +28 -0
  38. data/lib/rainbows/rev/client.rb +10 -10
  39. data/lib/rainbows/rev/core.rb +2 -3
  40. data/lib/rainbows/rev/thread.rb +1 -1
  41. data/lib/rainbows/rev_fiber_spawn.rb +21 -24
  42. data/lib/rainbows/revactor.rb +18 -15
  43. data/lib/rainbows/thread_pool.rb +2 -4
  44. data/lib/rainbows/thread_spawn.rb +1 -2
  45. data/lib/rainbows/writer_thread_pool.rb +14 -4
  46. data/lib/rainbows/writer_thread_spawn.rb +14 -4
  47. data/lib/rainbows.rb +7 -15
  48. data/local.mk.sample +3 -11
  49. data/rainbows.gemspec +2 -4
  50. data/t/kgio-pipe-response.ru +10 -0
  51. data/t/t0035-kgio-pipe-response.sh +70 -0
  52. data/t/test_isolate.rb +2 -1
  53. metadata +46 -30
  54. data/lib/rainbows/acceptor.rb +0 -26
  55. data/lib/rainbows/byte_slice.rb +0 -17
@@ -1,131 +1,125 @@
1
1
  # -*- encoding: binary -*-
2
2
  # :enddoc:
3
- module Rainbows
4
-
5
- # base module for evented models like Rev and EventMachine
6
- module EvCore
7
- include Unicorn
8
- include Rainbows::Const
9
- include Rainbows::Response
10
- G = Rainbows::G
11
- NULL_IO = Unicorn::HttpRequest::NULL_IO
12
-
13
- # Apps may return this Rack response: AsyncResponse = [ -1, {}, [] ]
14
- ASYNC_CALLBACK = "async.callback".freeze
15
-
16
- ASYNC_CLOSE = "async.close".freeze
17
-
18
- def post_init
19
- @remote_addr = Rainbows.addr(@_io)
20
- @env = {}
21
- @hp = HttpParser.new
22
- @state = :headers # [ :body [ :trailers ] ] :app_call :close
23
- @buf = ""
24
- end
3
+ # base module for evented models like Rev and EventMachine
4
+ module Rainbows::EvCore
5
+ include Rainbows::Const
6
+ include Rainbows::Response
7
+ G = Rainbows::G
8
+ NULL_IO = Unicorn::HttpRequest::NULL_IO
9
+ HttpParser = Unicorn::HttpParser
10
+
11
+ # Apps may return this Rack response: AsyncResponse = [ -1, {}, [] ]
12
+ ASYNC_CALLBACK = "async.callback".freeze
13
+
14
+ ASYNC_CLOSE = "async.close".freeze
15
+
16
+ def post_init
17
+ @env = {}
18
+ @hp = HttpParser.new
19
+ @state = :headers # [ :body [ :trailers ] ] :app_call :close
20
+ @buf = ""
21
+ end
25
22
 
26
- # graceful exit, like SIGQUIT
27
- def quit
28
- @state = :close
29
- end
23
+ # graceful exit, like SIGQUIT
24
+ def quit
25
+ @state = :close
26
+ end
30
27
 
31
- def handle_error(e)
32
- msg = Error.response(e) and write(msg)
33
- ensure
34
- quit
35
- end
28
+ def handle_error(e)
29
+ msg = Rainbows::Error.response(e) and write(msg)
30
+ ensure
31
+ quit
32
+ end
36
33
 
37
- # returns whether to enable response chunking for autochunk models
38
- def stream_response_headers(status, headers)
39
- if headers['Content-Length']
40
- rv = false
41
- else
42
- rv = !!(headers['Transfer-Encoding'] =~ %r{\Achunked\z}i)
43
- rv = false if headers.delete('X-Rainbows-Autochunk') == 'no'
44
- end
45
- write(response_header(status, headers))
46
- rv
34
+ # returns whether to enable response chunking for autochunk models
35
+ def stream_response_headers(status, headers)
36
+ if headers['Content-Length']
37
+ rv = false
38
+ else
39
+ rv = !!(headers['Transfer-Encoding'] =~ %r{\Achunked\z}i)
40
+ rv = false if headers.delete('X-Rainbows-Autochunk') == 'no'
47
41
  end
42
+ write(response_header(status, headers))
43
+ rv
44
+ end
48
45
 
49
- # TeeInput doesn't map too well to this right now...
50
- def on_read(data)
51
- case @state
52
- when :headers
53
- @hp.headers(@env, @buf << data) or return
54
- @state = :body
55
- len = @hp.content_length
56
- if len == 0
57
- @input = NULL_IO
58
- app_call # common case
59
- else # nil or len > 0
60
- # since we don't do streaming input, we have no choice but
61
- # to take over 100-continue handling from the Rack application
62
- if @env[HTTP_EXPECT] =~ /\A100-continue\z/i
63
- write(EXPECT_100_RESPONSE)
64
- @env.delete(HTTP_EXPECT)
65
- end
66
- @input = CapInput.new(len, self)
67
- @hp.filter_body(@buf2 = "", @buf)
68
- @input << @buf2
69
- on_read("")
70
- end
71
- when :body
72
- if @hp.body_eof?
73
- @state = :trailers
74
- on_read(data)
75
- elsif data.size > 0
76
- @hp.filter_body(@buf2, @buf << data)
77
- @input << @buf2
78
- on_read("")
79
- end
80
- when :trailers
81
- if @hp.trailers(@env, @buf << data)
82
- @input.rewind
83
- app_call
46
+ # TeeInput doesn't map too well to this right now...
47
+ def on_read(data)
48
+ case @state
49
+ when :headers
50
+ @hp.headers(@env, @buf << data) or return
51
+ @state = :body
52
+ len = @hp.content_length
53
+ if len == 0
54
+ @input = NULL_IO
55
+ app_call # common case
56
+ else # nil or len > 0
57
+ # since we don't do streaming input, we have no choice but
58
+ # to take over 100-continue handling from the Rack application
59
+ if @env[HTTP_EXPECT] =~ /\A100-continue\z/i
60
+ write(EXPECT_100_RESPONSE)
61
+ @env.delete(HTTP_EXPECT)
84
62
  end
63
+ @input = CapInput.new(len, self)
64
+ @hp.filter_body(@buf2 = "", @buf)
65
+ @input << @buf2
66
+ on_read("")
67
+ end
68
+ when :body
69
+ if @hp.body_eof?
70
+ @state = :trailers
71
+ on_read(data)
72
+ elsif data.size > 0
73
+ @hp.filter_body(@buf2, @buf << data)
74
+ @input << @buf2
75
+ on_read("")
76
+ end
77
+ when :trailers
78
+ if @hp.trailers(@env, @buf << data)
79
+ @input.rewind
80
+ app_call
85
81
  end
86
- rescue => e
87
- handle_error(e)
88
82
  end
83
+ rescue => e
84
+ handle_error(e)
85
+ end
89
86
 
90
- class CapInput < Struct.new(:io, :client, :bytes_left)
91
- MAX_BODY = Unicorn::Const::MAX_BODY
92
- Util = Unicorn::Util
87
+ class CapInput < Struct.new(:io, :client, :bytes_left)
88
+ MAX_BODY = Unicorn::Const::MAX_BODY
89
+ TmpIO = Unicorn::TmpIO
93
90
 
94
- def self.err(client, msg)
95
- client.write(Const::ERROR_413_RESPONSE)
96
- client.quit
91
+ def self.err(client, msg)
92
+ client.write(Rainbows::Const::ERROR_413_RESPONSE)
93
+ client.quit
97
94
 
98
- # zip back up the stack
99
- raise IOError, msg, []
100
- end
95
+ # zip back up the stack
96
+ raise IOError, msg, []
97
+ end
101
98
 
102
- def self.new(len, client)
103
- max = Rainbows.max_bytes
104
- if len
105
- if max && (len > max)
106
- err(client, "Content-Length too big: #{len} > #{max}")
107
- end
108
- len <= MAX_BODY ? StringIO.new("") : Util.tmpio
109
- else
110
- max ? super(Util.tmpio, client, max) : Util.tmpio
99
+ def self.new(len, client)
100
+ max = Rainbows.max_bytes
101
+ if len
102
+ if max && (len > max)
103
+ err(client, "Content-Length too big: #{len} > #{max}")
111
104
  end
105
+ len <= MAX_BODY ? StringIO.new("") : TmpIO.new
106
+ else
107
+ max ? super(TmpIO.new, client, max) : TmpIO.new
112
108
  end
109
+ end
113
110
 
114
- def <<(buf)
115
- if (self.bytes_left -= buf.size) < 0
116
- io.close
117
- CapInput.err(client, "chunked request body too big")
118
- end
119
- io << buf
111
+ def <<(buf)
112
+ if (self.bytes_left -= buf.size) < 0
113
+ io.close
114
+ CapInput.err(client, "chunked request body too big")
120
115
  end
121
-
122
- def gets; io.gets; end
123
- def each(&block); io.each(&block); end
124
- def size; io.size; end
125
- def rewind; io.rewind; end
126
- def read(*args); io.read(*args); end
127
-
116
+ io << buf
128
117
  end
129
118
 
119
+ def gets; io.gets; end
120
+ def each(&block); io.each(&block); end
121
+ def size; io.size; end
122
+ def rewind; io.rewind; end
123
+ def read(*args); io.read(*args); end
130
124
  end
131
125
  end
@@ -3,224 +3,216 @@ require 'eventmachine'
3
3
  EM::VERSION >= '0.12.10' or abort 'eventmachine 0.12.10 is required'
4
4
  require 'rainbows/ev_core'
5
5
 
6
- module Rainbows
7
-
8
- # Implements a basic single-threaded event model with
9
- # {EventMachine}[http://rubyeventmachine.com/]. It is capable of
10
- # handling thousands of simultaneous client connections, but with only
11
- # a single-threaded app dispatch. It is suited for slow clients,
12
- # and can work with slow applications via asynchronous libraries such as
13
- # {async_sinatra}[http://github.com/raggi/async_sinatra],
14
- # {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp],
15
- # and {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool].
16
- #
17
- # It does not require your Rack application to be thread-safe,
18
- # reentrancy is only required for the DevFdResponse body
19
- # generator.
20
- #
21
- # Compatibility: Whatever \EventMachine ~> 0.12.10 and Unicorn both
22
- # support, currently Ruby 1.8/1.9.
23
- #
24
- # This model is compatible with users of "async.callback" in the Rack
25
- # environment such as
26
- # {async_sinatra}[http://github.com/raggi/async_sinatra].
27
- #
28
- # For a complete asynchronous framework,
29
- # {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp] is fully
30
- # supported when using this concurrency model.
31
- #
32
- # This model is fully-compatible with
33
- # {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool]
34
- # which allows each request to run inside its own \Fiber after
35
- # all request processing is complete.
36
- #
37
- # Merb (and other frameworks/apps) supporting +deferred?+ execution as
38
- # documented at http://brainspl.at/articles/2008/04/18/deferred-requests-with-merb-ebb-and-thin
39
- # will also get the ability to conditionally defer request processing
40
- # to a separate thread.
41
- #
42
- # This model does not implement as streaming "rack.input" which allows
43
- # the Rack application to process data as it arrives. This means
44
- # "rack.input" will be fully buffered in memory or to a temporary file
45
- # before the application is entered.
46
-
47
- module EventMachine
48
-
49
- include Base
50
- autoload :ResponsePipe, 'rainbows/event_machine/response_pipe'
51
- autoload :ResponseChunkPipe, 'rainbows/event_machine/response_chunk_pipe'
52
- autoload :TryDefer, 'rainbows/event_machine/try_defer'
53
-
54
- class Client < EM::Connection # :nodoc: all
55
- attr_writer :body
56
- include Rainbows::EvCore
57
- G = Rainbows::G
58
-
59
- def initialize(io)
60
- @_io = io
61
- @body = nil
6
+ # Implements a basic single-threaded event model with
7
+ # {EventMachine}[http://rubyeventmachine.com/]. It is capable of
8
+ # handling thousands of simultaneous client connections, but with only
9
+ # a single-threaded app dispatch. It is suited for slow clients,
10
+ # and can work with slow applications via asynchronous libraries such as
11
+ # {async_sinatra}[http://github.com/raggi/async_sinatra],
12
+ # {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp],
13
+ # and {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool].
14
+ #
15
+ # It does not require your Rack application to be thread-safe,
16
+ # reentrancy is only required for the DevFdResponse body
17
+ # generator.
18
+ #
19
+ # Compatibility: Whatever \EventMachine ~> 0.12.10 and Unicorn both
20
+ # support, currently Ruby 1.8/1.9.
21
+ #
22
+ # This model is compatible with users of "async.callback" in the Rack
23
+ # environment such as
24
+ # {async_sinatra}[http://github.com/raggi/async_sinatra].
25
+ #
26
+ # For a complete asynchronous framework,
27
+ # {Cramp}[http://m.onkey.org/2010/1/7/introducing-cramp] is fully
28
+ # supported when using this concurrency model.
29
+ #
30
+ # This model is fully-compatible with
31
+ # {rack-fiber_pool}[http://github.com/mperham/rack-fiber_pool]
32
+ # which allows each request to run inside its own \Fiber after
33
+ # all request processing is complete.
34
+ #
35
+ # Merb (and other frameworks/apps) supporting +deferred?+ execution as
36
+ # documented at http://brainspl.at/articles/2008/04/18/deferred-requests-with-merb-ebb-and-thin
37
+ # will also get the ability to conditionally defer request processing
38
+ # to a separate thread.
39
+ #
40
+ # This model does not implement as streaming "rack.input" which allows
41
+ # the Rack application to process data as it arrives. This means
42
+ # "rack.input" will be fully buffered in memory or to a temporary file
43
+ # before the application is entered.
44
+ module Rainbows::EventMachine
45
+
46
+ include Rainbows::Base
47
+ autoload :ResponsePipe, 'rainbows/event_machine/response_pipe'
48
+ autoload :ResponseChunkPipe, 'rainbows/event_machine/response_chunk_pipe'
49
+ autoload :TryDefer, 'rainbows/event_machine/try_defer'
50
+
51
+ class Client < EM::Connection # :nodoc: all
52
+ attr_writer :body
53
+ include Rainbows::EvCore
54
+
55
+ def initialize(io)
56
+ @_io = io
57
+ @body = nil
58
+ end
59
+
60
+ alias write send_data
61
+
62
+ def receive_data(data)
63
+ # To avoid clobbering the current streaming response
64
+ # (often a static file), we do not attempt to process another
65
+ # request on the same connection until the first is complete
66
+ if @body
67
+ @buf << data
68
+ @_io.shutdown(Socket::SHUT_RD) if @buf.size > 0x1c000
69
+ EM.next_tick { receive_data('') }
70
+ else
71
+ on_read(data)
62
72
  end
73
+ end
63
74
 
64
- alias write send_data
75
+ def quit
76
+ super
77
+ close_connection_after_writing
78
+ end
65
79
 
66
- def receive_data(data)
67
- # To avoid clobbering the current streaming response
68
- # (often a static file), we do not attempt to process another
69
- # request on the same connection until the first is complete
70
- if @body
71
- @buf << data
72
- @_io.shutdown(Socket::SHUT_RD) if @buf.size > 0x1c000
73
- return EM.next_tick { receive_data('') }
80
+ def app_call
81
+ set_comm_inactivity_timeout 0
82
+ @env[RACK_INPUT] = @input
83
+ @env[REMOTE_ADDR] = @_io.kgio_addr
84
+ @env[ASYNC_CALLBACK] = method(:em_write_response)
85
+ @env[ASYNC_CLOSE] = EM::DefaultDeferrable.new
86
+
87
+ response = catch(:async) { APP.call(@env.update(RACK_DEFAULTS)) }
88
+
89
+ # too tricky to support pipelining with :async since the
90
+ # second (pipelined) request could be a stuck behind a
91
+ # long-running async response
92
+ (response.nil? || -1 == response[0]) and return @state = :close
93
+
94
+ alive = @hp.keepalive? && G.alive && G.kato > 0
95
+ em_write_response(response, alive)
96
+ if alive
97
+ @env.clear
98
+ @hp.reset
99
+ @state = :headers
100
+ if @buf.empty?
101
+ set_comm_inactivity_timeout(G.kato)
74
102
  else
75
- on_read(data)
103
+ EM.next_tick { receive_data('') }
76
104
  end
77
105
  end
106
+ end
78
107
 
79
- def quit
80
- super
81
- close_connection_after_writing
108
+ def em_write_response(response, alive = false)
109
+ status, headers, body = response
110
+ if @hp.headers?
111
+ headers = HH.new(headers)
112
+ headers[CONNECTION] = alive ? KEEP_ALIVE : CLOSE
113
+ else
114
+ headers = nil
82
115
  end
83
116
 
84
- def app_call
85
- set_comm_inactivity_timeout 0
86
- @env[RACK_INPUT] = @input
87
- @env[REMOTE_ADDR] = @remote_addr
88
- @env[ASYNC_CALLBACK] = method(:em_write_response)
89
- @env[ASYNC_CLOSE] = EM::DefaultDeferrable.new
90
-
91
- response = catch(:async) { APP.call(@env.update(RACK_DEFAULTS)) }
92
-
93
- # too tricky to support pipelining with :async since the
94
- # second (pipelined) request could be a stuck behind a
95
- # long-running async response
96
- (response.nil? || -1 == response[0]) and return @state = :close
97
-
98
- alive = @hp.keepalive? && G.alive && G.kato > 0
99
- em_write_response(response, alive)
100
- if alive
101
- @env.clear
102
- @hp.reset
103
- @state = :headers
104
- if @buf.empty?
105
- set_comm_inactivity_timeout(G.kato)
106
- else
107
- EM.next_tick { receive_data('') }
117
+ if body.respond_to?(:errback) && body.respond_to?(:callback)
118
+ @body = body
119
+ body.callback { quit }
120
+ body.errback { quit }
121
+ # async response, this could be a trickle as is in comet-style apps
122
+ headers[CONNECTION] = CLOSE if headers
123
+ alive = true
124
+ elsif body.respond_to?(:to_path)
125
+ st = File.stat(path = body.to_path)
126
+
127
+ if st.file?
128
+ write(response_header(status, headers)) if headers
129
+ @body = stream_file_data(path)
130
+ @body.errback do
131
+ body.close if body.respond_to?(:close)
132
+ quit
108
133
  end
109
- end
110
- end
111
-
112
- def em_write_response(response, alive = false)
113
- status, headers, body = response
114
- if @hp.headers?
115
- headers = HH.new(headers)
116
- headers[CONNECTION] = alive ? KEEP_ALIVE : CLOSE
117
- else
118
- headers = nil
119
- end
120
-
121
- if body.respond_to?(:errback) && body.respond_to?(:callback)
122
- @body = body
123
- body.callback { quit }
124
- body.errback { quit }
125
- # async response, this could be a trickle as is in comet-style apps
126
- headers[CONNECTION] = CLOSE if headers
127
- alive = true
128
- elsif body.respond_to?(:to_path)
129
- st = File.stat(path = body.to_path)
130
-
131
- if st.file?
132
- write(response_header(status, headers)) if headers
133
- @body = stream_file_data(path)
134
- @body.errback do
135
- body.close if body.respond_to?(:close)
136
- quit
137
- end
138
- @body.callback do
139
- body.close if body.respond_to?(:close)
140
- @body = nil
141
- alive ? receive_data('') : quit
142
- end
143
- return
144
- elsif st.socket? || st.pipe?
145
- @body = io = body_to_io(body)
146
- chunk = stream_response_headers(status, headers) if headers
147
- m = chunk ? ResponseChunkPipe : ResponsePipe
148
- return EM.watch(io, m, self, alive, body).notify_readable = true
134
+ @body.callback do
135
+ body.close if body.respond_to?(:close)
136
+ @body = nil
137
+ alive ? receive_data('') : quit
149
138
  end
150
- # char or block device... WTF? fall through to body.each
139
+ return
140
+ elsif st.socket? || st.pipe?
141
+ @body = io = body_to_io(body)
142
+ chunk = stream_response_headers(status, headers) if headers
143
+ m = chunk ? ResponseChunkPipe : ResponsePipe
144
+ return EM.watch(io, m, self, alive, body).notify_readable = true
151
145
  end
152
-
153
- write(response_header(status, headers)) if headers
154
- write_body_each(self, body)
155
- quit unless alive
146
+ # char or block device... WTF? fall through to body.each
156
147
  end
157
148
 
158
- def unbind
159
- async_close = @env[ASYNC_CLOSE] and async_close.succeed
160
- @body.respond_to?(:fail) and @body.fail
161
- begin
162
- @_io.close
163
- rescue Errno::EBADF
164
- # EventMachine's EventableDescriptor::Close() may close
165
- # the underlying file descriptor without invalidating the
166
- # associated IO object on errors, so @_io.closed? isn't
167
- # sufficient.
168
- end
169
- end
149
+ write(response_header(status, headers)) if headers
150
+ write_body_each(self, body)
151
+ quit unless alive
170
152
  end
171
153
 
172
- module Server # :nodoc: all
173
- include Rainbows::Acceptor
174
-
175
- def close
176
- detach
177
- @io.close
178
- end
179
-
180
- def notify_readable
181
- return if CUR.size >= MAX
182
- io = accept(@io) or return
183
- sig = EM.attach_fd(io.fileno, false)
184
- CUR[sig] = CL.new(sig, io)
154
+ def unbind
155
+ async_close = @env[ASYNC_CLOSE] and async_close.succeed
156
+ @body.respond_to?(:fail) and @body.fail
157
+ begin
158
+ @_io.close
159
+ rescue Errno::EBADF
160
+ # EventMachine's EventableDescriptor::Close() may close
161
+ # the underlying file descriptor without invalidating the
162
+ # associated IO object on errors, so @_io.closed? isn't
163
+ # sufficient.
185
164
  end
186
165
  end
166
+ end
187
167
 
188
- def init_worker_process(worker) # :nodoc:
189
- Rainbows::Response.setup(Rainbows::EventMachine::Client)
190
- super
168
+ module Server # :nodoc: all
169
+ def close
170
+ detach
171
+ @io.close
191
172
  end
192
173
 
193
- # runs inside each forked worker, this sits around and waits
194
- # for connections and doesn't die until the parent dies (or is
195
- # given a INT, QUIT, or TERM signal)
196
- def worker_loop(worker) # :nodoc:
197
- init_worker_process(worker)
198
- G.server.app.respond_to?(:deferred?) and
199
- G.server.app = TryDefer[G.server.app]
200
-
201
- # enable them both, should be non-fatal if not supported
202
- EM.epoll
203
- EM.kqueue
204
- logger.info "#@use: epoll=#{EM.epoll?} kqueue=#{EM.kqueue?}"
205
- client_class = Rainbows.const_get(@use).const_get(:Client)
206
- Server.const_set(:MAX, worker_connections + LISTENERS.size)
207
- Server.const_set(:CL, client_class)
208
- client_class.const_set(:APP, G.server.app)
209
- EM.run {
210
- conns = EM.instance_variable_get(:@conns) or
211
- raise RuntimeError, "EM @conns instance variable not accessible!"
212
- Server.const_set(:CUR, conns)
213
- EM.add_periodic_timer(1) do
214
- unless G.tick
215
- conns.each_value { |c| client_class === c and c.quit }
216
- EM.stop if conns.empty? && EM.reactor_running?
217
- end
218
- end
219
- LISTENERS.map! do |s|
220
- EM.watch(s, Server) { |c| c.notify_readable = true }
221
- end
222
- }
174
+ def notify_readable
175
+ return if CUR.size >= MAX
176
+ io = @io.kgio_tryaccept or return
177
+ sig = EM.attach_fd(io.fileno, false)
178
+ CUR[sig] = CL.new(sig, io)
223
179
  end
180
+ end
181
+
182
+ def init_worker_process(worker) # :nodoc:
183
+ Rainbows::Response.setup(Rainbows::EventMachine::Client)
184
+ super
185
+ end
224
186
 
187
+ # runs inside each forked worker, this sits around and waits
188
+ # for connections and doesn't die until the parent dies (or is
189
+ # given a INT, QUIT, or TERM signal)
190
+ def worker_loop(worker) # :nodoc:
191
+ init_worker_process(worker)
192
+ G.server.app.respond_to?(:deferred?) and
193
+ G.server.app = TryDefer[G.server.app]
194
+
195
+ # enable them both, should be non-fatal if not supported
196
+ EM.epoll
197
+ EM.kqueue
198
+ logger.info "#@use: epoll=#{EM.epoll?} kqueue=#{EM.kqueue?}"
199
+ client_class = Rainbows.const_get(@use).const_get(:Client)
200
+ Server.const_set(:MAX, worker_connections + LISTENERS.size)
201
+ Server.const_set(:CL, client_class)
202
+ client_class.const_set(:APP, G.server.app)
203
+ EM.run {
204
+ conns = EM.instance_variable_get(:@conns) or
205
+ raise RuntimeError, "EM @conns instance variable not accessible!"
206
+ Server.const_set(:CUR, conns)
207
+ EM.add_periodic_timer(1) do
208
+ unless G.tick
209
+ conns.each_value { |c| client_class === c and c.quit }
210
+ EM.stop if conns.empty? && EM.reactor_running?
211
+ end
212
+ end
213
+ LISTENERS.map! do |s|
214
+ EM.watch(s, Server) { |c| c.notify_readable = true }
215
+ end
216
+ }
225
217
  end
226
218
  end