rainbows 2.0.1 → 2.1.0
Sign up to get free protection for your applications and to get access to all the features.
- data/.document +1 -0
- data/.gitignore +1 -0
- data/.manifest +46 -18
- data/.wrongdoc.yml +8 -0
- data/ChangeLog +849 -374
- data/Documentation/comparison.haml +26 -21
- data/FAQ +6 -0
- data/GIT-VERSION-GEN +1 -1
- data/GNUmakefile +23 -65
- data/LATEST +27 -0
- data/NEWS +53 -26
- data/README +7 -7
- data/Rakefile +1 -98
- data/Summary +0 -7
- data/TODO +2 -2
- data/lib/rainbows/app_pool.rb +2 -1
- data/lib/rainbows/base.rb +1 -0
- data/lib/rainbows/configurator.rb +9 -0
- data/lib/rainbows/const.rb +1 -1
- data/lib/rainbows/coolio/client.rb +191 -0
- data/lib/rainbows/coolio/core.rb +25 -0
- data/lib/rainbows/{rev → coolio}/deferred_chunk_response.rb +3 -2
- data/lib/rainbows/{rev → coolio}/deferred_response.rb +3 -3
- data/lib/rainbows/coolio/heartbeat.rb +20 -0
- data/lib/rainbows/{rev → coolio}/master.rb +2 -3
- data/lib/rainbows/{rev → coolio}/sendfile.rb +1 -1
- data/lib/rainbows/coolio/server.rb +11 -0
- data/lib/rainbows/coolio/thread_client.rb +36 -0
- data/lib/rainbows/coolio.rb +45 -0
- data/lib/rainbows/coolio_fiber_spawn.rb +26 -0
- data/lib/rainbows/coolio_support.rb +9 -0
- data/lib/rainbows/coolio_thread_pool/client.rb +8 -0
- data/lib/rainbows/coolio_thread_pool/watcher.rb +14 -0
- data/lib/rainbows/coolio_thread_pool.rb +57 -0
- data/lib/rainbows/coolio_thread_spawn/client.rb +8 -0
- data/lib/rainbows/coolio_thread_spawn.rb +27 -0
- data/lib/rainbows/dev_fd_response.rb +6 -2
- data/lib/rainbows/ev_core/cap_input.rb +3 -2
- data/lib/rainbows/ev_core.rb +13 -3
- data/lib/rainbows/event_machine/client.rb +124 -0
- data/lib/rainbows/event_machine/response_pipe.rb +1 -2
- data/lib/rainbows/event_machine/server.rb +15 -0
- data/lib/rainbows/event_machine.rb +13 -137
- data/lib/rainbows/fiber/base.rb +6 -7
- data/lib/rainbows/fiber/body.rb +4 -2
- data/lib/rainbows/fiber/coolio/heartbeat.rb +15 -0
- data/lib/rainbows/fiber/{rev → coolio}/methods.rb +4 -5
- data/lib/rainbows/fiber/{rev → coolio}/server.rb +1 -1
- data/lib/rainbows/fiber/{rev → coolio}/sleeper.rb +2 -2
- data/lib/rainbows/fiber/coolio.rb +12 -0
- data/lib/rainbows/fiber/io/methods.rb +6 -0
- data/lib/rainbows/fiber/io.rb +8 -10
- data/lib/rainbows/fiber/queue.rb +24 -30
- data/lib/rainbows/fiber.rb +7 -4
- data/lib/rainbows/fiber_pool.rb +1 -1
- data/lib/rainbows/http_server.rb +9 -2
- data/lib/rainbows/max_body.rb +3 -1
- data/lib/rainbows/never_block/core.rb +15 -0
- data/lib/rainbows/never_block/event_machine.rb +8 -3
- data/lib/rainbows/never_block.rb +37 -70
- data/lib/rainbows/process_client.rb +3 -6
- data/lib/rainbows/rack_input.rb +17 -0
- data/lib/rainbows/response/body.rb +18 -19
- data/lib/rainbows/response.rb +1 -1
- data/lib/rainbows/rev.rb +21 -43
- data/lib/rainbows/rev_fiber_spawn.rb +4 -19
- data/lib/rainbows/rev_thread_pool.rb +21 -75
- data/lib/rainbows/rev_thread_spawn.rb +18 -36
- data/lib/rainbows/revactor/body.rb +4 -1
- data/lib/rainbows/revactor/tee_socket.rb +44 -0
- data/lib/rainbows/revactor.rb +13 -48
- data/lib/rainbows/socket_proxy.rb +24 -0
- data/lib/rainbows/sync_close.rb +37 -0
- data/lib/rainbows/thread_pool.rb +66 -70
- data/lib/rainbows/thread_spawn.rb +40 -50
- data/lib/rainbows/thread_timeout.rb +33 -27
- data/lib/rainbows/timed_read.rb +5 -1
- data/lib/rainbows/worker_yield.rb +16 -0
- data/lib/rainbows/writer_thread_pool/client.rb +19 -0
- data/lib/rainbows/writer_thread_pool.rb +60 -91
- data/lib/rainbows/writer_thread_spawn/client.rb +69 -0
- data/lib/rainbows/writer_thread_spawn.rb +37 -117
- data/lib/rainbows.rb +12 -4
- data/rainbows.gemspec +15 -19
- data/t/GNUmakefile +4 -4
- data/t/close-has-env.ru +65 -0
- data/t/simple-http_Coolio.ru +9 -0
- data/t/simple-http_CoolioFiberSpawn.ru +10 -0
- data/t/simple-http_CoolioThreadPool.ru +9 -0
- data/t/simple-http_CoolioThreadSpawn.ru +9 -0
- data/t/t0004-heartbeat-timeout.sh +2 -2
- data/t/t0007-worker-follows-master-to-death.sh +1 -1
- data/t/t0015-working_directory.sh +7 -1
- data/t/t0017-keepalive-timeout-zero.sh +1 -1
- data/t/t0019-keepalive-cpu-usage.sh +62 -0
- data/t/t0040-keepalive_requests-setting.sh +51 -0
- data/t/t0050-response-body-close-has-env.sh +109 -0
- data/t/t0102-rack-input-short.sh +6 -6
- data/t/t0106-rack-input-keepalive.sh +48 -2
- data/t/t0113-rewindable-input-false.sh +28 -0
- data/t/t0113.ru +12 -0
- data/t/t0114-rewindable-input-true.sh +28 -0
- data/t/t0114.ru +12 -0
- data/t/t9100-thread-timeout.sh +24 -2
- data/t/t9101-thread-timeout-threshold.sh +6 -13
- data/t/test-lib.sh +2 -1
- data/t/test_isolate.rb +9 -4
- data/t/times.ru +6 -0
- metadata +109 -42
- data/GIT-VERSION-FILE +0 -1
- data/lib/rainbows/fiber/rev/heartbeat.rb +0 -8
- data/lib/rainbows/fiber/rev/kato.rb +0 -22
- data/lib/rainbows/fiber/rev.rb +0 -13
- data/lib/rainbows/rev/client.rb +0 -194
- data/lib/rainbows/rev/core.rb +0 -41
- data/lib/rainbows/rev/heartbeat.rb +0 -23
- data/lib/rainbows/rev/thread.rb +0 -46
- data/man/man1/rainbows.1 +0 -193
@@ -1,76 +1,22 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
|
-
|
3
|
-
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
# :stopdoc:
|
24
|
-
DEFAULTS = {
|
25
|
-
:pool_size => 20, # same default size as ThreadPool (w/o Rev)
|
26
|
-
}
|
27
|
-
#:startdoc:
|
28
|
-
|
29
|
-
def self.setup # :nodoc:
|
30
|
-
DEFAULTS.each { |k,v| O[k] ||= v }
|
31
|
-
Integer === O[:pool_size] && O[:pool_size] > 0 or
|
32
|
-
raise ArgumentError, "pool_size must a be an Integer > 0"
|
33
|
-
end
|
34
|
-
|
35
|
-
class PoolWatcher < ::Rev::TimerWatcher # :nodoc: all
|
36
|
-
def initialize(threads)
|
37
|
-
@threads = threads
|
38
|
-
super(G.server.timeout, true)
|
39
|
-
end
|
40
|
-
|
41
|
-
def on_timer
|
42
|
-
@threads.each { |t| t.join(0) and G.quit! }
|
43
|
-
end
|
44
|
-
end
|
45
|
-
|
46
|
-
class Client < Rainbows::Rev::ThreadClient # :nodoc:
|
47
|
-
def app_dispatch
|
48
|
-
QUEUE << self
|
49
|
-
end
|
50
|
-
end
|
51
|
-
|
52
|
-
include Rainbows::Rev::Core
|
53
|
-
|
54
|
-
def init_worker_threads(master, queue) # :nodoc:
|
55
|
-
O[:pool_size].times.map do
|
56
|
-
Thread.new do
|
57
|
-
begin
|
58
|
-
client = queue.pop
|
59
|
-
master << [ client, client.app_response ]
|
60
|
-
rescue => e
|
61
|
-
Error.listen_loop(e)
|
62
|
-
end while true
|
63
|
-
end
|
64
|
-
end
|
65
|
-
end
|
66
|
-
|
67
|
-
def init_worker_process(worker) # :nodoc:
|
68
|
-
super
|
69
|
-
master = Rev::Master.new(Queue.new).attach(::Rev::Loop.default)
|
70
|
-
queue = Client.const_set(:QUEUE, Queue.new)
|
71
|
-
threads = init_worker_threads(master, queue)
|
72
|
-
PoolWatcher.new(threads).attach(::Rev::Loop.default)
|
73
|
-
logger.info "RevThreadPool pool_size=#{O[:pool_size]}"
|
74
|
-
end
|
75
|
-
end
|
76
|
-
end
|
2
|
+
# :stopdoc:
|
3
|
+
Rainbows.const_set(:RevThreadPool, Rainbows::CoolioThreadPool)
|
4
|
+
# :startdoc:
|
5
|
+
|
6
|
+
# CoolioThreadPool is the new version of this, use that instead.
|
7
|
+
#
|
8
|
+
# A combination of the Rev and ThreadPool models. This allows Ruby
|
9
|
+
# Thread-based concurrency for application processing. It DOES NOT
|
10
|
+
# expose a streamable "rack.input" for upload processing within the
|
11
|
+
# app. DevFdResponse should be used with this class to proxy
|
12
|
+
# asynchronous responses. All network I/O between the client and
|
13
|
+
# server are handled by the main thread and outside of the core
|
14
|
+
# application dispatch.
|
15
|
+
#
|
16
|
+
# Unlike ThreadPool, Rev makes this model highly suitable for
|
17
|
+
# slow clients and applications with medium-to-slow response times
|
18
|
+
# (I/O bound), but less suitable for sleepy applications.
|
19
|
+
#
|
20
|
+
# This concurrency model is designed for Ruby 1.9, and Ruby 1.8
|
21
|
+
# users are NOT advised to use this due to high CPU usage.
|
22
|
+
module Rainbows::RevThreadPool; end
|
@@ -1,38 +1,20 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
|
-
|
2
|
+
Rainbows.const_set(:RevThreadSpawn, Rainbows::CoolioThreadSpawn)
|
3
3
|
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
module RevThreadSpawn
|
22
|
-
|
23
|
-
class Client < Rainbows::Rev::ThreadClient # :nodoc: all
|
24
|
-
def app_dispatch
|
25
|
-
Thread.new(self) { |client| MASTER << [ client, app_response ] }
|
26
|
-
end
|
27
|
-
end
|
28
|
-
|
29
|
-
include Rainbows::Rev::Core
|
30
|
-
|
31
|
-
def init_worker_process(worker) # :nodoc:
|
32
|
-
super
|
33
|
-
master = Rev::Master.new(Queue.new).attach(::Rev::Loop.default)
|
34
|
-
Client.const_set(:MASTER, master)
|
35
|
-
end
|
36
|
-
|
37
|
-
end
|
38
|
-
end
|
4
|
+
# CoolioThreadPool is the new version of this, use that instead.
|
5
|
+
#
|
6
|
+
# A combination of the Rev and ThreadSpawn models. This allows Ruby
|
7
|
+
# Thread-based concurrency for application processing. It DOES NOT
|
8
|
+
# expose a streamable "rack.input" for upload processing within the
|
9
|
+
# app. DevFdResponse should be used with this class to proxy
|
10
|
+
# asynchronous responses. All network I/O between the client and
|
11
|
+
# server are handled by the main thread and outside of the core
|
12
|
+
# application dispatch.
|
13
|
+
#
|
14
|
+
# Unlike ThreadSpawn, Rev makes this model highly suitable for
|
15
|
+
# slow clients and applications with medium-to-slow response times
|
16
|
+
# (I/O bound), but less suitable for sleepy applications.
|
17
|
+
#
|
18
|
+
# This concurrency model is designed for Ruby 1.9, and Ruby 1.8
|
19
|
+
# users are NOT advised to use this due to high CPU usage.
|
20
|
+
module Rainbows::RevThreadSpawn; end
|
@@ -8,8 +8,9 @@ module Rainbows::Revactor::Body
|
|
8
8
|
|
9
9
|
if IO.method_defined?(:sendfile_nonblock)
|
10
10
|
def write_body_file(client, body, range)
|
11
|
+
body = body_to_io(body)
|
11
12
|
sock = client.instance_variable_get(:@_io)
|
12
|
-
pfx =
|
13
|
+
pfx = Revactor::TCP::Socket === client ? :tcp : :unix
|
13
14
|
write_complete = T[:"#{pfx}_write_complete", client]
|
14
15
|
closed = T[:"#{pfx}_closed", client]
|
15
16
|
offset, count = range ? range : [ 0, body.stat.size ]
|
@@ -29,6 +30,8 @@ module Rainbows::Revactor::Body
|
|
29
30
|
rescue EOFError
|
30
31
|
break
|
31
32
|
end while (count -= n) > 0
|
33
|
+
ensure
|
34
|
+
close_if_private(body)
|
32
35
|
end
|
33
36
|
else
|
34
37
|
ALIASES[:write_body] = :write_body_each
|
@@ -0,0 +1,44 @@
|
|
1
|
+
# -*- encoding: binary -*-
|
2
|
+
# :enddoc:
|
3
|
+
#
|
4
|
+
# Revactor Sockets do not implement readpartial, so we emulate just
|
5
|
+
# enough to avoid mucking with TeeInput internals. Fortunately
|
6
|
+
# this code is not heavily used so we can usually avoid the overhead
|
7
|
+
# of adding a userspace buffer.
|
8
|
+
class Rainbows::Revactor::TeeSocket
|
9
|
+
def initialize(socket)
|
10
|
+
# IO::Buffer is used internally by Rev which Revactor is based on
|
11
|
+
# so we'll always have it available
|
12
|
+
@socket, @rbuf = socket, IO::Buffer.new
|
13
|
+
end
|
14
|
+
|
15
|
+
def leftover
|
16
|
+
@rbuf.read
|
17
|
+
end
|
18
|
+
|
19
|
+
# Revactor socket reads always return an unspecified amount,
|
20
|
+
# sometimes too much
|
21
|
+
def kgio_read(length, dst = "")
|
22
|
+
return dst.replace("") if length == 0
|
23
|
+
|
24
|
+
# always check and return from the userspace buffer first
|
25
|
+
@rbuf.size > 0 and return dst.replace(@rbuf.read(length))
|
26
|
+
|
27
|
+
# read off the socket since there was nothing in rbuf
|
28
|
+
tmp = @socket.read
|
29
|
+
|
30
|
+
# we didn't read too much, good, just return it straight back
|
31
|
+
# to avoid needlessly wasting memory bandwidth
|
32
|
+
tmp.size <= length and return dst.replace(tmp)
|
33
|
+
|
34
|
+
# ugh, read returned too much
|
35
|
+
@rbuf << tmp[length, tmp.size]
|
36
|
+
dst.replace(tmp[0, length])
|
37
|
+
rescue EOFError
|
38
|
+
end
|
39
|
+
|
40
|
+
# just proxy any remaining methods TeeInput may use
|
41
|
+
def close
|
42
|
+
@socket.close
|
43
|
+
end
|
44
|
+
end
|
data/lib/rainbows/revactor.rb
CHANGED
@@ -24,10 +24,11 @@ module Rainbows::Revactor
|
|
24
24
|
RD_ARGS = {}
|
25
25
|
|
26
26
|
autoload :Proxy, 'rainbows/revactor/proxy'
|
27
|
+
autoload :TeeSocket, 'rainbows/revactor/tee_socket'
|
27
28
|
|
28
29
|
include Rainbows::Base
|
29
30
|
LOCALHOST = Kgio::LOCALHOST
|
30
|
-
TCP =
|
31
|
+
TCP = Revactor::TCP::Socket
|
31
32
|
|
32
33
|
# once a client is accepted, it is processed in its entirety here
|
33
34
|
# in 3 easy steps: read request, call app, write app response
|
@@ -46,13 +47,14 @@ module Rainbows::Revactor
|
|
46
47
|
alive = false
|
47
48
|
|
48
49
|
begin
|
50
|
+
ts = nil
|
49
51
|
until env = hp.parse
|
50
52
|
buf << client.read(*rd_args)
|
51
53
|
end
|
52
54
|
|
53
55
|
env[CLIENT_IO] = client
|
54
56
|
env[RACK_INPUT] = 0 == hp.content_length ?
|
55
|
-
NULL_IO :
|
57
|
+
NULL_IO : IC.new(ts = TeeSocket.new(client), hp)
|
56
58
|
env[REMOTE_ADDR] = remote_addr
|
57
59
|
status, headers, body = app.call(env.update(RACK_DEFAULTS))
|
58
60
|
|
@@ -68,10 +70,11 @@ module Rainbows::Revactor
|
|
68
70
|
alive = hp.next? && G.alive && G.kato > 0
|
69
71
|
headers[CONNECTION] = alive ? KEEP_ALIVE : CLOSE
|
70
72
|
client.write(response_header(status, headers))
|
73
|
+
alive && ts and buf << ts.leftover
|
71
74
|
end
|
72
75
|
write_body(client, body, range)
|
73
76
|
end while alive
|
74
|
-
rescue
|
77
|
+
rescue Revactor::TCP::ReadError
|
75
78
|
rescue => e
|
76
79
|
Rainbows::Error.write(io, e)
|
77
80
|
ensure
|
@@ -85,6 +88,7 @@ module Rainbows::Revactor
|
|
85
88
|
init_worker_process(worker)
|
86
89
|
require 'rainbows/revactor/body'
|
87
90
|
self.class.__send__(:include, Rainbows::Revactor::Body)
|
91
|
+
self.class.const_set(:IC, Unicorn::HttpRequest.input_class)
|
88
92
|
RD_ARGS[:timeout] = G.kato if G.kato > 0
|
89
93
|
nr = 0
|
90
94
|
limit = worker_connections
|
@@ -133,54 +137,15 @@ module Rainbows::Revactor
|
|
133
137
|
LISTENERS.map do |s|
|
134
138
|
case s
|
135
139
|
when TCPServer
|
136
|
-
l =
|
137
|
-
[ l, T[:tcp_closed,
|
138
|
-
T[:tcp_connection, l,
|
140
|
+
l = Revactor::TCP.listen(s, nil)
|
141
|
+
[ l, T[:tcp_closed, Revactor::TCP::Socket],
|
142
|
+
T[:tcp_connection, l, Revactor::TCP::Socket] ]
|
139
143
|
when UNIXServer
|
140
|
-
l =
|
141
|
-
[ l, T[:unix_closed,
|
142
|
-
T[:unix_connection, l,
|
144
|
+
l = Revactor::UNIX.listen(s)
|
145
|
+
[ l, T[:unix_closed, Revactor::UNIX::Socket ],
|
146
|
+
T[:unix_connection, l, Revactor::UNIX::Socket] ]
|
143
147
|
end
|
144
148
|
end
|
145
149
|
end
|
146
|
-
|
147
|
-
# Revactor Sockets do not implement readpartial, so we emulate just
|
148
|
-
# enough to avoid mucking with TeeInput internals. Fortunately
|
149
|
-
# this code is not heavily used so we can usually avoid the overhead
|
150
|
-
# of adding a userspace buffer.
|
151
|
-
class TeeSocket
|
152
|
-
def initialize(socket)
|
153
|
-
# IO::Buffer is used internally by Rev which Revactor is based on
|
154
|
-
# so we'll always have it available
|
155
|
-
@socket, @rbuf = socket, IO::Buffer.new
|
156
|
-
end
|
157
|
-
|
158
|
-
# Revactor socket reads always return an unspecified amount,
|
159
|
-
# sometimes too much
|
160
|
-
def kgio_read(length, dst = "")
|
161
|
-
return dst.replace("") if length == 0
|
162
|
-
|
163
|
-
# always check and return from the userspace buffer first
|
164
|
-
@rbuf.size > 0 and return dst.replace(@rbuf.read(length))
|
165
|
-
|
166
|
-
# read off the socket since there was nothing in rbuf
|
167
|
-
tmp = @socket.read
|
168
|
-
|
169
|
-
# we didn't read too much, good, just return it straight back
|
170
|
-
# to avoid needlessly wasting memory bandwidth
|
171
|
-
tmp.size <= length and return dst.replace(tmp)
|
172
|
-
|
173
|
-
# ugh, read returned too much
|
174
|
-
@rbuf << tmp[length, tmp.size]
|
175
|
-
dst.replace(tmp[0, length])
|
176
|
-
rescue EOFError
|
177
|
-
end
|
178
|
-
|
179
|
-
# just proxy any remaining methods TeeInput may use
|
180
|
-
def close
|
181
|
-
@socket.close
|
182
|
-
end
|
183
|
-
end
|
184
|
-
|
185
150
|
# :startdoc:
|
186
151
|
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
# -*- encoding: binary -*-
|
2
|
+
# :enddoc:
|
3
|
+
#
|
4
|
+
module Rainbows::SocketProxy
|
5
|
+
def kgio_addr
|
6
|
+
to_io.kgio_addr
|
7
|
+
end
|
8
|
+
|
9
|
+
def kgio_read(size, buf = "")
|
10
|
+
to_io.kgio_read(size, buf)
|
11
|
+
end
|
12
|
+
|
13
|
+
def kgio_read!(size, buf = "")
|
14
|
+
to_io.kgio_read!(size, buf)
|
15
|
+
end
|
16
|
+
|
17
|
+
def kgio_trywrite(buf)
|
18
|
+
to_io.kgio_trywrite(buf)
|
19
|
+
end
|
20
|
+
|
21
|
+
def timed_read(buf)
|
22
|
+
to_io.timed_read(buf)
|
23
|
+
end
|
24
|
+
end
|
@@ -0,0 +1,37 @@
|
|
1
|
+
# -*- encoding: binary -*-
|
2
|
+
# :enddoc:
|
3
|
+
require 'thread'
|
4
|
+
class Rainbows::SyncClose
|
5
|
+
def initialize(body)
|
6
|
+
@body = body
|
7
|
+
@mutex = Mutex.new
|
8
|
+
@cv = ConditionVariable.new
|
9
|
+
@mutex.synchronize do
|
10
|
+
yield self
|
11
|
+
@cv.wait(@mutex)
|
12
|
+
end
|
13
|
+
end
|
14
|
+
|
15
|
+
def respond_to?(m)
|
16
|
+
@body.respond_to?(m)
|
17
|
+
end
|
18
|
+
|
19
|
+
def to_path
|
20
|
+
@body.to_path
|
21
|
+
end
|
22
|
+
|
23
|
+
def each(&block)
|
24
|
+
@body.each(&block)
|
25
|
+
end
|
26
|
+
|
27
|
+
def to_io
|
28
|
+
@body.to_io
|
29
|
+
end
|
30
|
+
|
31
|
+
# called by the writer thread to wake up the original thread (in #initialize)
|
32
|
+
def close
|
33
|
+
@body.close
|
34
|
+
ensure
|
35
|
+
@mutex.synchronize { @cv.signal }
|
36
|
+
end
|
37
|
+
end
|
data/lib/rainbows/thread_pool.rb
CHANGED
@@ -1,82 +1,78 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
|
3
|
-
|
3
|
+
# Implements a worker thread pool model. This is suited for platforms
|
4
|
+
# like Ruby 1.9, where the cost of dynamically spawning a new thread
|
5
|
+
# for every new client connection is higher than with the ThreadSpawn
|
6
|
+
# model.
|
7
|
+
#
|
8
|
+
# This model should provide a high level of compatibility with all
|
9
|
+
# Ruby implementations, and most libraries and applications.
|
10
|
+
# Applications running under this model should be thread-safe
|
11
|
+
# but not necessarily reentrant.
|
12
|
+
#
|
13
|
+
# Applications using this model are required to be thread-safe.
|
14
|
+
# Threads are never spawned dynamically under this model. If you're
|
15
|
+
# connecting to external services and need to perform DNS lookups,
|
16
|
+
# consider using the "resolv-replace" library which replaces parts of
|
17
|
+
# the core Socket package with concurrent DNS lookup capabilities.
|
18
|
+
#
|
19
|
+
# This model probably less suited for many slow clients than the
|
20
|
+
# others and thus a lower +worker_connections+ setting is recommended.
|
4
21
|
|
5
|
-
|
6
|
-
|
7
|
-
# for every new client connection is higher than with the ThreadSpawn
|
8
|
-
# model.
|
9
|
-
#
|
10
|
-
# This model should provide a high level of compatibility with all
|
11
|
-
# Ruby implementations, and most libraries and applications.
|
12
|
-
# Applications running under this model should be thread-safe
|
13
|
-
# but not necessarily reentrant.
|
14
|
-
#
|
15
|
-
# Applications using this model are required to be thread-safe.
|
16
|
-
# Threads are never spawned dynamically under this model. If you're
|
17
|
-
# connecting to external services and need to perform DNS lookups,
|
18
|
-
# consider using the "resolv-replace" library which replaces parts of
|
19
|
-
# the core Socket package with concurrent DNS lookup capabilities.
|
20
|
-
#
|
21
|
-
# This model probably less suited for many slow clients than the
|
22
|
-
# others and thus a lower +worker_connections+ setting is recommended.
|
22
|
+
module Rainbows::ThreadPool
|
23
|
+
include Rainbows::Base
|
23
24
|
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
init_worker_process(worker)
|
29
|
-
pool = (1..worker_connections).map do
|
30
|
-
Thread.new { LISTENERS.size == 1 ? sync_worker : async_worker }
|
31
|
-
end
|
32
|
-
|
33
|
-
while G.alive
|
34
|
-
# if any worker dies, something is serious wrong, bail
|
35
|
-
pool.each do |thr|
|
36
|
-
G.tick or break
|
37
|
-
thr.join(1) and G.quit!
|
38
|
-
end
|
39
|
-
end
|
40
|
-
join_threads(pool)
|
25
|
+
def worker_loop(worker) # :nodoc:
|
26
|
+
init_worker_process(worker)
|
27
|
+
pool = (1..worker_connections).map do
|
28
|
+
Thread.new { LISTENERS.size == 1 ? sync_worker : async_worker }
|
41
29
|
end
|
42
30
|
|
43
|
-
|
44
|
-
|
45
|
-
|
46
|
-
|
47
|
-
|
48
|
-
|
49
|
-
end while G.alive
|
31
|
+
while G.alive
|
32
|
+
# if any worker dies, something is serious wrong, bail
|
33
|
+
pool.each do |thr|
|
34
|
+
G.tick or break
|
35
|
+
thr.join(1) and G.quit!
|
36
|
+
end
|
50
37
|
end
|
38
|
+
join_threads(pool)
|
39
|
+
end
|
51
40
|
|
52
|
-
|
53
|
-
|
54
|
-
|
55
|
-
|
56
|
-
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
s = s.kgio_tryaccept and process_client(s)
|
61
|
-
end
|
62
|
-
rescue Errno::EINTR
|
63
|
-
rescue => e
|
64
|
-
Error.listen_loop(e)
|
65
|
-
end while G.alive
|
66
|
-
end
|
41
|
+
def sync_worker # :nodoc:
|
42
|
+
s = LISTENERS[0]
|
43
|
+
begin
|
44
|
+
c = s.kgio_accept and process_client(c)
|
45
|
+
rescue => e
|
46
|
+
Rainbows::Error.listen_loop(e)
|
47
|
+
end while G.alive
|
48
|
+
end
|
67
49
|
|
68
|
-
|
69
|
-
|
70
|
-
|
71
|
-
|
72
|
-
|
73
|
-
|
74
|
-
|
75
|
-
|
76
|
-
|
77
|
-
|
78
|
-
|
79
|
-
|
50
|
+
def async_worker # :nodoc:
|
51
|
+
begin
|
52
|
+
# TODO: check if select() or accept() is a problem on large
|
53
|
+
# SMP systems under Ruby 1.9. Hundreds of native threads
|
54
|
+
# all working off the same socket could be a thundering herd
|
55
|
+
# problem. On the other hand, a thundering herd may not
|
56
|
+
# even incur as much overhead as an extra Mutex#synchronize
|
57
|
+
ret = select(LISTENERS) and ret[0].each do |s|
|
58
|
+
s = s.kgio_tryaccept and process_client(s)
|
59
|
+
end
|
60
|
+
rescue Errno::EINTR
|
61
|
+
rescue => e
|
62
|
+
Rainbows::Error.listen_loop(e)
|
63
|
+
end while G.alive
|
64
|
+
end
|
80
65
|
|
66
|
+
def join_threads(threads) # :nodoc:
|
67
|
+
G.quit!
|
68
|
+
threads.delete_if do |thr|
|
69
|
+
G.tick
|
70
|
+
begin
|
71
|
+
thr.run
|
72
|
+
thr.join(0.01)
|
73
|
+
rescue
|
74
|
+
true
|
75
|
+
end
|
76
|
+
end until threads.empty?
|
81
77
|
end
|
82
78
|
end
|
@@ -1,62 +1,52 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
require 'thread'
|
3
|
-
module Rainbows
|
4
3
|
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
4
|
+
# Spawns a new thread for every client connection we accept(). This
|
5
|
+
# model is recommended for platforms like Ruby 1.8 where spawning new
|
6
|
+
# threads is inexpensive.
|
7
|
+
#
|
8
|
+
# This model should provide a high level of compatibility with all
|
9
|
+
# Ruby implementations, and most libraries and applications.
|
10
|
+
# Applications running under this model should be thread-safe
|
11
|
+
# but not necessarily reentrant.
|
12
|
+
#
|
13
|
+
# If you're connecting to external services and need to perform DNS
|
14
|
+
# lookups, consider using the "resolv-replace" library which replaces
|
15
|
+
# parts of the core Socket package with concurrent DNS lookup
|
16
|
+
# capabilities
|
18
17
|
|
19
|
-
|
20
|
-
|
18
|
+
module Rainbows::ThreadSpawn
|
19
|
+
include Rainbows::Base
|
20
|
+
include Rainbows::WorkerYield
|
21
21
|
|
22
|
-
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
|
37
|
-
# by other worker _processes_.
|
38
|
-
sleep(0.01)
|
39
|
-
elsif c = l.kgio_accept
|
40
|
-
klass.new(c) do |c|
|
41
|
-
begin
|
42
|
-
lock.synchronize { G.cur += 1 }
|
43
|
-
process_client(c)
|
44
|
-
ensure
|
45
|
-
lock.synchronize { G.cur -= 1 }
|
46
|
-
end
|
22
|
+
def accept_loop(klass) #:nodoc:
|
23
|
+
lock = Mutex.new
|
24
|
+
limit = worker_connections
|
25
|
+
LISTENERS.each do |l|
|
26
|
+
klass.new(l) do |l|
|
27
|
+
begin
|
28
|
+
if lock.synchronize { G.cur >= limit }
|
29
|
+
worker_yield
|
30
|
+
elsif c = l.kgio_accept
|
31
|
+
klass.new(c) do |c|
|
32
|
+
begin
|
33
|
+
lock.synchronize { G.cur += 1 }
|
34
|
+
process_client(c)
|
35
|
+
ensure
|
36
|
+
lock.synchronize { G.cur -= 1 }
|
47
37
|
end
|
48
38
|
end
|
49
|
-
|
50
|
-
|
51
|
-
|
52
|
-
end
|
39
|
+
end
|
40
|
+
rescue => e
|
41
|
+
Rainbows::Error.listen_loop(e)
|
42
|
+
end while G.alive
|
53
43
|
end
|
54
|
-
sleep 1 while G.tick || lock.synchronize { G.cur > 0 }
|
55
44
|
end
|
45
|
+
sleep 1 while G.tick || lock.synchronize { G.cur > 0 }
|
46
|
+
end
|
56
47
|
|
57
|
-
|
58
|
-
|
59
|
-
|
60
|
-
end
|
48
|
+
def worker_loop(worker) #:nodoc:
|
49
|
+
init_worker_process(worker)
|
50
|
+
accept_loop(Thread)
|
61
51
|
end
|
62
52
|
end
|