rainbows 3.2.0 → 3.3.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.document +1 -0
- data/COPYING +617 -282
- data/Documentation/comparison.haml +81 -24
- data/FAQ +3 -0
- data/GIT-VERSION-GEN +1 -1
- data/LICENSE +14 -5
- data/README +10 -9
- data/Sandbox +25 -0
- data/TODO +2 -22
- data/lib/rainbows.rb +50 -49
- data/lib/rainbows/client.rb +6 -5
- data/lib/rainbows/configurator.rb +191 -37
- data/lib/rainbows/const.rb +1 -1
- data/lib/rainbows/coolio.rb +4 -1
- data/lib/rainbows/coolio/client.rb +2 -2
- data/lib/rainbows/coolio/heartbeat.rb +2 -1
- data/lib/rainbows/coolio_fiber_spawn.rb +12 -7
- data/lib/rainbows/coolio_thread_pool.rb +19 -10
- data/lib/rainbows/coolio_thread_spawn.rb +3 -0
- data/lib/rainbows/epoll.rb +27 -5
- data/lib/rainbows/epoll/client.rb +3 -3
- data/lib/rainbows/ev_core.rb +2 -1
- data/lib/rainbows/event_machine.rb +4 -0
- data/lib/rainbows/event_machine/client.rb +2 -1
- data/lib/rainbows/fiber.rb +5 -0
- data/lib/rainbows/fiber/base.rb +1 -0
- data/lib/rainbows/fiber/coolio/methods.rb +0 -1
- data/lib/rainbows/fiber/io.rb +10 -6
- data/lib/rainbows/fiber/io/pipe.rb +6 -1
- data/lib/rainbows/fiber/io/socket.rb +6 -1
- data/lib/rainbows/fiber_pool.rb +12 -7
- data/lib/rainbows/fiber_spawn.rb +11 -6
- data/lib/rainbows/http_server.rb +55 -59
- data/lib/rainbows/join_threads.rb +4 -0
- data/lib/rainbows/max_body.rb +29 -10
- data/lib/rainbows/never_block.rb +7 -10
- data/lib/rainbows/pool_size.rb +14 -0
- data/lib/rainbows/process_client.rb +23 -1
- data/lib/rainbows/queue_pool.rb +8 -6
- data/lib/rainbows/response.rb +12 -11
- data/lib/rainbows/revactor.rb +14 -7
- data/lib/rainbows/revactor/client.rb +2 -2
- data/lib/rainbows/stream_file.rb +11 -4
- data/lib/rainbows/thread_pool.rb +12 -28
- data/lib/rainbows/thread_spawn.rb +14 -13
- data/lib/rainbows/thread_timeout.rb +118 -30
- data/lib/rainbows/writer_thread_pool/client.rb +1 -1
- data/lib/rainbows/writer_thread_spawn/client.rb +2 -2
- data/lib/rainbows/xepoll.rb +13 -5
- data/lib/rainbows/xepoll/client.rb +19 -17
- data/lib/rainbows/xepoll_thread_pool.rb +82 -0
- data/lib/rainbows/xepoll_thread_pool/client.rb +129 -0
- data/lib/rainbows/xepoll_thread_spawn.rb +58 -0
- data/lib/rainbows/xepoll_thread_spawn/client.rb +121 -0
- data/pkg.mk +4 -0
- data/rainbows.gemspec +4 -1
- data/t/GNUmakefile +5 -1
- data/t/client_header_buffer_size.ru +5 -0
- data/t/simple-http_XEpollThreadPool.ru +10 -0
- data/t/simple-http_XEpollThreadSpawn.ru +10 -0
- data/t/t0022-copy_stream-byte-range.sh +1 -15
- data/t/t0026-splice-copy_stream-byte-range.sh +25 -0
- data/t/t0027-nil-copy_stream.sh +60 -0
- data/t/t0041-optional-pool-size.sh +2 -2
- data/t/t0042-client_header_buffer_size.sh +65 -0
- data/t/t9100-thread-timeout.sh +1 -6
- data/t/t9101-thread-timeout-threshold.sh +1 -6
- data/t/test-lib.sh +58 -0
- data/t/test_isolate.rb +9 -3
- metadata +47 -16
data/lib/rainbows/never_block.rb
CHANGED
@@ -6,8 +6,11 @@
|
|
6
6
|
# a streaming "rack.input" but is compatible with everything else
|
7
7
|
# EventMachine supports.
|
8
8
|
#
|
9
|
+
# === :pool_size vs worker_connections
|
10
|
+
#
|
9
11
|
# In your Rainbows! config block, you may specify a Fiber pool size
|
10
12
|
# to limit your application concurrency (without using Rainbows::AppPool)
|
13
|
+
# independently of worker_connections.
|
11
14
|
#
|
12
15
|
# Rainbows! do
|
13
16
|
# use :NeverBlock, :pool_size => 50
|
@@ -15,20 +18,14 @@
|
|
15
18
|
# end
|
16
19
|
#
|
17
20
|
module Rainbows::NeverBlock
|
18
|
-
|
19
21
|
# :stopdoc:
|
20
|
-
|
21
|
-
:pool_size => 20, # same default size used by NB
|
22
|
-
:backend => :EventMachine, # NeverBlock doesn't support Rev yet
|
23
|
-
}
|
22
|
+
extend Rainbows::PoolSize
|
24
23
|
|
25
24
|
# same pool size NB core itself uses
|
26
25
|
def self.setup # :nodoc:
|
27
|
-
|
28
|
-
|
29
|
-
|
30
|
-
raise ArgumentError, "pool_size must a be an Integer > 0"
|
31
|
-
mod = Rainbows.const_get(o[:backend])
|
26
|
+
super
|
27
|
+
Rainbows::O[:backend] ||= :EventMachine # no Cool.io support, yet
|
28
|
+
Rainbows.const_get(Rainbows::O[:backend])
|
32
29
|
require "never_block" # require EM first since we need a higher version
|
33
30
|
end
|
34
31
|
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# -*- encoding: binary -*-
|
2
|
+
# :stopdoc:
|
3
|
+
module Rainbows::PoolSize
|
4
|
+
DEFAULTS = {
|
5
|
+
:pool_size => 50, # same as the default worker_connections
|
6
|
+
}
|
7
|
+
|
8
|
+
def setup
|
9
|
+
o = Rainbows::O
|
10
|
+
DEFAULTS.each { |k,v| o[k] ||= v }
|
11
|
+
Integer === o[:pool_size] && o[:pool_size] > 0 or
|
12
|
+
raise ArgumentError, "pool_size must a be an Integer > 0"
|
13
|
+
end
|
14
|
+
end
|
@@ -7,10 +7,11 @@ module Rainbows::ProcessClient
|
|
7
7
|
NULL_IO = Unicorn::HttpRequest::NULL_IO
|
8
8
|
RACK_INPUT = Unicorn::HttpRequest::RACK_INPUT
|
9
9
|
IC = Unicorn::HttpRequest.input_class
|
10
|
+
Rainbows.config!(self, :client_header_buffer_size)
|
10
11
|
|
11
12
|
def process_loop
|
12
13
|
@hp = hp = Rainbows::HttpParser.new
|
13
|
-
kgio_read!(
|
14
|
+
kgio_read!(CLIENT_HEADER_BUFFER_SIZE, buf = hp.buf) or return
|
14
15
|
|
15
16
|
begin # loop
|
16
17
|
until env = hp.parse
|
@@ -46,4 +47,25 @@ module Rainbows::ProcessClient
|
|
46
47
|
def set_input(env, hp)
|
47
48
|
env[RACK_INPUT] = 0 == hp.content_length ? NULL_IO : IC.new(self, hp)
|
48
49
|
end
|
50
|
+
|
51
|
+
def process_pipeline(env, hp)
|
52
|
+
begin
|
53
|
+
set_input(env, hp)
|
54
|
+
env[REMOTE_ADDR] = kgio_addr
|
55
|
+
status, headers, body = APP.call(env.merge!(RACK_DEFAULTS))
|
56
|
+
if 100 == status.to_i
|
57
|
+
write(EXPECT_100_RESPONSE)
|
58
|
+
env.delete(HTTP_EXPECT)
|
59
|
+
status, headers, body = APP.call(env)
|
60
|
+
end
|
61
|
+
write_response(status, headers, body, alive = hp.next?)
|
62
|
+
end while alive && pipeline_ready(hp)
|
63
|
+
alive or close
|
64
|
+
rescue => e
|
65
|
+
handle_error(e)
|
66
|
+
end
|
67
|
+
|
68
|
+
# override this in subclass/module
|
69
|
+
def pipeline_ready(hp)
|
70
|
+
end
|
49
71
|
end
|
data/lib/rainbows/queue_pool.rb
CHANGED
@@ -5,24 +5,26 @@ require 'thread'
|
|
5
5
|
# Thread pool class based on pulling off a single Ruby Queue.
|
6
6
|
# This is NOT used for the ThreadPool class, since that class does not
|
7
7
|
# need a userspace Queue.
|
8
|
-
class Rainbows::QueuePool
|
8
|
+
class Rainbows::QueuePool
|
9
|
+
attr_reader :queue
|
10
|
+
|
9
11
|
def initialize(size = 20)
|
10
12
|
q = Queue.new
|
11
|
-
|
13
|
+
@threads = (1..size).map do
|
12
14
|
Thread.new do
|
13
15
|
while job = q.shift
|
14
16
|
yield job
|
15
17
|
end
|
16
18
|
end
|
17
19
|
end
|
18
|
-
|
20
|
+
@queue = q
|
19
21
|
end
|
20
22
|
|
21
23
|
def quit!
|
22
|
-
threads.each { |_| queue << nil }
|
23
|
-
threads.delete_if do |t|
|
24
|
+
@threads.each { |_| @queue << nil }
|
25
|
+
@threads.delete_if do |t|
|
24
26
|
Rainbows.tick
|
25
27
|
t.alive? ? t.join(0.01) : true
|
26
|
-
end until threads.empty?
|
28
|
+
end until @threads.empty?
|
27
29
|
end
|
28
30
|
end
|
data/lib/rainbows/response.rb
CHANGED
@@ -6,6 +6,7 @@ module Rainbows::Response
|
|
6
6
|
KeepAlive = "keep-alive"
|
7
7
|
Content_Length = "Content-Length".freeze
|
8
8
|
Transfer_Encoding = "Transfer-Encoding".freeze
|
9
|
+
Rainbows.config!(self, :copy_stream)
|
9
10
|
|
10
11
|
# private file class for IO objects opened by Rainbows! itself (and not
|
11
12
|
# the app or middleware)
|
@@ -14,7 +15,7 @@ module Rainbows::Response
|
|
14
15
|
# called after forking
|
15
16
|
def self.setup(klass)
|
16
17
|
Kgio.accept_class = Rainbows::Client
|
17
|
-
0 == Rainbows.keepalive_timeout and
|
18
|
+
0 == Rainbows.server.keepalive_timeout and
|
18
19
|
Rainbows::HttpParser.keepalive_requests = 0
|
19
20
|
end
|
20
21
|
|
@@ -67,7 +68,7 @@ module Rainbows::Response
|
|
67
68
|
end
|
68
69
|
|
69
70
|
# generic response writer, used for most dynamically-generated responses
|
70
|
-
# and also when
|
71
|
+
# and also when copy_stream and/or IO#trysendfile is unavailable
|
71
72
|
def write_response(status, headers, body, alive)
|
72
73
|
write_headers(status, headers, alive)
|
73
74
|
write_body_each(body)
|
@@ -89,29 +90,29 @@ module Rainbows::Response
|
|
89
90
|
include Sendfile
|
90
91
|
end
|
91
92
|
|
92
|
-
if
|
93
|
+
if COPY_STREAM
|
93
94
|
unless IO.method_defined?(:trysendfile)
|
94
95
|
module CopyStream
|
95
96
|
def write_body_file(body, range)
|
96
|
-
range ?
|
97
|
-
|
97
|
+
range ? COPY_STREAM.copy_stream(body, self, range[1], range[0]) :
|
98
|
+
COPY_STREAM.copy_stream(body, self, nil, 0)
|
98
99
|
end
|
99
100
|
end
|
100
101
|
include CopyStream
|
101
102
|
end
|
102
103
|
|
103
|
-
# write_body_stream is an alias for write_body_each if
|
104
|
+
# write_body_stream is an alias for write_body_each if copy_stream
|
104
105
|
# isn't used or available.
|
105
106
|
def write_body_stream(body)
|
106
|
-
|
107
|
+
COPY_STREAM.copy_stream(io = body_to_io(body), self)
|
107
108
|
ensure
|
108
109
|
close_if_private(io)
|
109
110
|
end
|
110
|
-
else # !
|
111
|
+
else # ! COPY_STREAM
|
111
112
|
alias write_body_stream write_body_each
|
112
|
-
end # !
|
113
|
+
end # ! COPY_STREAM
|
113
114
|
|
114
|
-
if IO.method_defined?(:trysendfile) ||
|
115
|
+
if IO.method_defined?(:trysendfile) || COPY_STREAM
|
115
116
|
HTTP_RANGE = 'HTTP_RANGE'
|
116
117
|
Content_Range = 'Content-Range'.freeze
|
117
118
|
|
@@ -181,5 +182,5 @@ module Rainbows::Response
|
|
181
182
|
end
|
182
183
|
end
|
183
184
|
include ToPath
|
184
|
-
end #
|
185
|
+
end # COPY_STREAM || IO.method_defined?(:trysendfile)
|
185
186
|
end
|
data/lib/rainbows/revactor.rb
CHANGED
@@ -3,10 +3,14 @@ require 'revactor'
|
|
3
3
|
require 'fcntl'
|
4
4
|
Revactor::VERSION >= '0.1.5' or abort 'revactor 0.1.5 is required'
|
5
5
|
|
6
|
-
# Enables use of the Actor model through
|
7
|
-
#
|
8
|
-
#
|
9
|
-
#
|
6
|
+
# Enables use of the Actor model through {Revactor}[http://revactor.org]
|
7
|
+
# under Ruby 1.9.
|
8
|
+
#
|
9
|
+
# \Revactor dormant upstream, so the use of this is NOT recommended for
|
10
|
+
# new applications.
|
11
|
+
#
|
12
|
+
# It spawns one long-lived Actor for every listen socket in the process
|
13
|
+
# and spawns a new Actor for every client connection accept()-ed.
|
10
14
|
# +worker_connections+ will limit the number of client Actors we have
|
11
15
|
# running at any one time.
|
12
16
|
#
|
@@ -18,6 +22,9 @@ Revactor::VERSION >= '0.1.5' or abort 'revactor 0.1.5 is required'
|
|
18
22
|
# in the application using this model should be implemented using the
|
19
23
|
# \Revactor library as well, to take advantage of the networking
|
20
24
|
# concurrency features this model provides.
|
25
|
+
#
|
26
|
+
# === RubyGem Requirements
|
27
|
+
# * revactor 0.1.5 or later
|
21
28
|
module Rainbows::Revactor
|
22
29
|
autoload :Client, 'rainbows/revactor/client'
|
23
30
|
autoload :Proxy, 'rainbows/revactor/proxy'
|
@@ -34,8 +41,8 @@ module Rainbows::Revactor
|
|
34
41
|
limit = worker_connections
|
35
42
|
actor_exit = Case[:exit, Actor, Object]
|
36
43
|
|
37
|
-
revactorize_listeners.each do |l,
|
38
|
-
Actor.spawn
|
44
|
+
revactorize_listeners.each do |l,close,accept|
|
45
|
+
Actor.spawn do
|
39
46
|
Actor.current.trap_exit = true
|
40
47
|
l.controller = l.instance_variable_set(:@receiver, Actor.current)
|
41
48
|
begin
|
@@ -73,7 +80,7 @@ module Rainbows::Revactor
|
|
73
80
|
# ignore, let another worker process take it
|
74
81
|
end
|
75
82
|
|
76
|
-
def revactorize_listeners
|
83
|
+
def revactorize_listeners #:nodoc:
|
77
84
|
LISTENERS.map do |s|
|
78
85
|
case s
|
79
86
|
when TCPServer
|
@@ -4,8 +4,8 @@ require 'fcntl'
|
|
4
4
|
class Rainbows::Revactor::Client
|
5
5
|
autoload :TeeSocket, 'rainbows/revactor/client/tee_socket'
|
6
6
|
RD_ARGS = {}
|
7
|
-
Rainbows.keepalive_timeout > 0 and
|
8
|
-
RD_ARGS[:timeout] = Rainbows.keepalive_timeout
|
7
|
+
Rainbows.server.keepalive_timeout > 0 and
|
8
|
+
RD_ARGS[:timeout] = Rainbows.server.keepalive_timeout
|
9
9
|
attr_reader :kgio_addr
|
10
10
|
|
11
11
|
def initialize(client)
|
data/lib/rainbows/stream_file.rb
CHANGED
@@ -5,10 +5,17 @@
|
|
5
5
|
# models. We always maintain our own file offsets in userspace because
|
6
6
|
# because sendfile() implementations offer pread()-like idempotency for
|
7
7
|
# concurrency (multiple clients can read the same underlying file handle).
|
8
|
-
class Rainbows::StreamFile
|
8
|
+
class Rainbows::StreamFile
|
9
|
+
attr_reader :to_io
|
10
|
+
attr_accessor :offset, :count
|
11
|
+
|
12
|
+
def initialize(offset, count, io, body)
|
13
|
+
@offset, @count, @to_io, @body = offset, count, io, body
|
14
|
+
end
|
15
|
+
|
9
16
|
def close
|
10
|
-
body.close if body.respond_to?(:close)
|
11
|
-
to_io.close unless to_io.closed?
|
12
|
-
|
17
|
+
@body.close if @body.respond_to?(:close)
|
18
|
+
@to_io.close unless @to_io.closed?
|
19
|
+
@to_io = nil
|
13
20
|
end
|
14
21
|
end
|
data/lib/rainbows/thread_pool.rb
CHANGED
@@ -1,24 +1,21 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
|
3
3
|
# Implements a worker thread pool model. This is suited for platforms
|
4
|
-
# like Ruby 1.9, where the cost of dynamically spawning a new thread
|
5
|
-
#
|
6
|
-
#
|
4
|
+
# like Ruby 1.9, where the cost of dynamically spawning a new thread for
|
5
|
+
# every new client connection is higher than with the ThreadSpawn model,
|
6
|
+
# but the cost of an idle thread is low (e.g. NPTL under Linux).
|
7
7
|
#
|
8
|
-
# This model should provide a high level of compatibility with all
|
9
|
-
#
|
10
|
-
#
|
11
|
-
#
|
8
|
+
# This model should provide a high level of compatibility with all Ruby
|
9
|
+
# implementations, and most libraries and applications. Applications
|
10
|
+
# running under this model should be thread-safe but not necessarily
|
11
|
+
# reentrant.
|
12
12
|
#
|
13
|
-
# Applications using this model are required to be thread-safe.
|
14
|
-
#
|
15
|
-
# connecting to external services and need to perform DNS lookups,
|
16
|
-
# consider using the "resolv-replace" library which replaces parts of
|
17
|
-
# the core Socket package with concurrent DNS lookup capabilities.
|
13
|
+
# Applications using this model are required to be thread-safe. Threads
|
14
|
+
# are never spawned dynamically under this model.
|
18
15
|
#
|
19
|
-
#
|
20
|
-
#
|
21
|
-
|
16
|
+
# If you're using green threads (MRI 1.8) and need to perform DNS lookups,
|
17
|
+
# consider using the "resolv-replace" library which replaces parts of the
|
18
|
+
# core Socket package with concurrent DNS lookup capabilities.
|
22
19
|
module Rainbows::ThreadPool
|
23
20
|
include Rainbows::Base
|
24
21
|
|
@@ -62,17 +59,4 @@ module Rainbows::ThreadPool
|
|
62
59
|
Rainbows::Error.listen_loop(e)
|
63
60
|
end while Rainbows.alive
|
64
61
|
end
|
65
|
-
|
66
|
-
def join_threads(threads) # :nodoc:
|
67
|
-
Rainbows.quit!
|
68
|
-
threads.delete_if do |thr|
|
69
|
-
Rainbows.tick
|
70
|
-
begin
|
71
|
-
thr.run
|
72
|
-
thr.join(0.01)
|
73
|
-
rescue
|
74
|
-
true
|
75
|
-
end
|
76
|
-
end until threads.empty?
|
77
|
-
end
|
78
62
|
end
|
@@ -2,18 +2,19 @@
|
|
2
2
|
require 'thread'
|
3
3
|
|
4
4
|
# Spawns a new thread for every client connection we accept(). This
|
5
|
-
# model is recommended for platforms like Ruby 1.8 where spawning
|
6
|
-
# threads is inexpensive
|
5
|
+
# model is recommended for platforms like Ruby (MRI) 1.8 where spawning
|
6
|
+
# new threads is inexpensive, but still seems to work well enough with
|
7
|
+
# good native threading implementations such as NPTL under Linux on
|
8
|
+
# Ruby (MRI/YARV) 1.9
|
7
9
|
#
|
8
|
-
# This model should provide a high level of compatibility with all
|
9
|
-
#
|
10
|
-
#
|
11
|
-
#
|
10
|
+
# This model should provide a high level of compatibility with all Ruby
|
11
|
+
# implementations, and most libraries and applications. Applications
|
12
|
+
# running under this model should be thread-safe but not necessarily
|
13
|
+
# reentrant.
|
12
14
|
#
|
13
|
-
# If you're
|
14
|
-
#
|
15
|
-
#
|
16
|
-
# capabilities
|
15
|
+
# If you're using green threads (MRI 1.8) and need to perform DNS lookups,
|
16
|
+
# consider using the "resolv-replace" library which replaces parts of the
|
17
|
+
# core Socket package with concurrent DNS lookup capabilities.
|
17
18
|
|
18
19
|
module Rainbows::ThreadSpawn
|
19
20
|
include Rainbows::Base
|
@@ -24,12 +25,12 @@ module Rainbows::ThreadSpawn
|
|
24
25
|
limit = worker_connections
|
25
26
|
nr = 0
|
26
27
|
LISTENERS.each do |l|
|
27
|
-
klass.new
|
28
|
+
klass.new do
|
28
29
|
begin
|
29
30
|
if lock.synchronize { nr >= limit }
|
30
31
|
worker_yield
|
31
|
-
elsif
|
32
|
-
klass.new(
|
32
|
+
elsif client = l.kgio_accept
|
33
|
+
klass.new(client) do |c|
|
33
34
|
begin
|
34
35
|
lock.synchronize { nr += 1 }
|
35
36
|
c.process_loop
|
@@ -29,14 +29,35 @@ require 'thread'
|
|
29
29
|
#
|
30
30
|
# Timed-out requests will cause this middleware to return with a
|
31
31
|
# "408 Request Timeout" response.
|
32
|
-
|
32
|
+
#
|
33
|
+
# == Caveats
|
34
|
+
#
|
35
|
+
# Badly-written C extensions may not be timed out. Audit and fix
|
36
|
+
# (or remove) those extensions before relying on this module.
|
37
|
+
#
|
38
|
+
# Do NOT, under any circumstances nest and load this in
|
39
|
+
# the same middleware stack. You may load this in parallel in the
|
40
|
+
# same process completely independent middleware stacks, but DO NOT
|
41
|
+
# load this twice so it nests. Things will break!
|
42
|
+
#
|
43
|
+
# This will behave badly if system time is changed since Ruby
|
44
|
+
# does not expose a monotonic clock for users, so don't change
|
45
|
+
# the system time while this is running. All servers should be
|
46
|
+
# running ntpd anyways.
|
33
47
|
class Rainbows::ThreadTimeout
|
34
48
|
|
35
49
|
# :stopdoc:
|
36
|
-
|
37
|
-
|
50
|
+
#
|
51
|
+
# we subclass Exception to get rid of normal StandardError rescues
|
52
|
+
# in app-level code. timeout.rb does something similar
|
53
|
+
ExecutionExpired = Class.new(Exception)
|
54
|
+
|
55
|
+
# The MRI 1.8 won't be usable in January 2038, we'll raise this
|
56
|
+
# when we eventually drop support for 1.8 (before 2038, hopefully)
|
57
|
+
NEVER = Time.at(0x7fffffff)
|
38
58
|
|
39
59
|
def initialize(app, opts)
|
60
|
+
# @timeout must be Numeric since we add this to Time
|
40
61
|
@timeout = opts[:timeout]
|
41
62
|
Numeric === @timeout or
|
42
63
|
raise TypeError, "timeout=#{@timeout.inspect} is not numeric"
|
@@ -44,56 +65,123 @@ class Rainbows::ThreadTimeout
|
|
44
65
|
if @threshold = opts[:threshold]
|
45
66
|
Integer === @threshold or
|
46
67
|
raise TypeError, "threshold=#{@threshold.inspect} is not an integer"
|
47
|
-
@threshold == 0 and
|
48
|
-
|
49
|
-
@threshold < 0 and
|
50
|
-
@threshold += Rainbows.server.worker_connections
|
68
|
+
@threshold == 0 and raise ArgumentError, "threshold=0 does not make sense"
|
69
|
+
@threshold < 0 and @threshold += Rainbows.server.worker_connections
|
51
70
|
end
|
52
71
|
@app = app
|
72
|
+
|
73
|
+
# This is the main datastructure for communicating Threads eligible
|
74
|
+
# for expiration to the watchdog thread. If the eligible thread
|
75
|
+
# completes its job before its expiration time, it will delete itself
|
76
|
+
# @active. If the watchdog thread notices the thread is timed out,
|
77
|
+
# the watchdog thread will delete the thread from this hash as it
|
78
|
+
# raises the exception.
|
79
|
+
#
|
80
|
+
# key: Thread to be timed out
|
81
|
+
# value: Time of expiration
|
53
82
|
@active = {}
|
83
|
+
|
84
|
+
# Protects all access to @active. It is important since it also limits
|
85
|
+
# safe points for asynchronously raising exceptions.
|
54
86
|
@lock = Mutex.new
|
87
|
+
|
88
|
+
# There is one long-running watchdog thread that watches @active and
|
89
|
+
# kills threads that have been running too long
|
90
|
+
# see start_watchdog
|
91
|
+
@watchdog = nil
|
55
92
|
end
|
56
93
|
|
94
|
+
# entry point for Rack middleware
|
57
95
|
def call(env)
|
58
|
-
|
59
|
-
|
60
|
-
|
61
|
-
|
96
|
+
# Once we have this lock, we ensure two things:
|
97
|
+
# 1) there is only one watchdog thread started
|
98
|
+
# 2) we can't be killed once we have this lock, it's unlikely
|
99
|
+
# to happen unless @timeout is really low and the machine
|
100
|
+
# is very slow.
|
101
|
+
@lock.lock
|
102
|
+
|
103
|
+
# we're dead if anything in the next two lines raises, but it's
|
104
|
+
# highly unlikely that they will, and anything such as NoMemoryError
|
105
|
+
# is hopeless and we might as well just die anyways.
|
106
|
+
# initialize guarantees @timeout will be Numeric
|
107
|
+
start_watchdog(env) unless @watchdog
|
108
|
+
@active[Thread.current] = Time.now + @timeout
|
109
|
+
|
62
110
|
begin
|
111
|
+
# It is important to unlock inside this begin block
|
112
|
+
# Mutex#unlock really can't fail here since we did a successful
|
113
|
+
# Mutex#lock before
|
114
|
+
@lock.unlock
|
115
|
+
|
116
|
+
# Once the Mutex was unlocked, we're open to Thread#raise from
|
117
|
+
# the watchdog process. This is the main place we expect to receive
|
118
|
+
# Thread#raise. @app is of course the next layer of the Rack
|
119
|
+
# application stack
|
63
120
|
@app.call(env)
|
64
121
|
ensure
|
122
|
+
# I's still possible to receive a Thread#raise here from
|
123
|
+
# the watchdog, but that's alright, the "rescue ExecutionExpired"
|
124
|
+
# line will catch that.
|
65
125
|
@lock.synchronize { @active.delete(Thread.current) }
|
126
|
+
# Thread#raise no longer possible here
|
66
127
|
end
|
67
128
|
rescue ExecutionExpired
|
129
|
+
# If we got here, it's because the watchdog thread raised an exception
|
130
|
+
# here to kill us. The watchdog uses @active.delete_if with a lock,
|
131
|
+
# so we guaranteed it's
|
68
132
|
[ 408, { 'Content-Type' => 'text/plain', 'Content-Length' => '0' }, [] ]
|
69
133
|
end
|
70
134
|
|
71
|
-
|
72
|
-
|
135
|
+
# The watchdog thread is the one that does the job of killing threads
|
136
|
+
# that have expired.
|
137
|
+
def start_watchdog(env)
|
138
|
+
@watchdog = Thread.new(env["rack.logger"]) do |logger|
|
73
139
|
begin
|
74
|
-
if
|
75
|
-
|
76
|
-
|
77
|
-
#
|
78
|
-
#
|
79
|
-
#
|
80
|
-
#
|
81
|
-
|
82
|
-
next_wake > 0 ? sleep(next_wake) : Thread.pass
|
83
|
-
else
|
84
|
-
sleep(@timeout)
|
140
|
+
if @threshold
|
141
|
+
# Hash#size is atomic in MRI 1.8 and 1.9 and we
|
142
|
+
# expect that from other implementations.
|
143
|
+
#
|
144
|
+
# Even without a memory barrier, sleep(@timeout) vs
|
145
|
+
# sleep(@timeout - time-for-SMP-to-synchronize-a-word)
|
146
|
+
# is too trivial to worry about here.
|
147
|
+
sleep(@timeout) while @active.size < @threshold
|
85
148
|
end
|
86
149
|
|
87
|
-
|
88
|
-
next if @threshold && @active.size < @threshold
|
150
|
+
next_expiry = NEVER
|
89
151
|
|
90
|
-
|
152
|
+
# We always lock access to @active, so we can't kill threads
|
153
|
+
# that are about to release themselves from the eye of the
|
154
|
+
# watchdog thread.
|
91
155
|
@lock.synchronize do
|
92
|
-
|
93
|
-
|
156
|
+
now = Time.now
|
157
|
+
@active.delete_if do |thread, expire_at|
|
158
|
+
# We also use this loop to get the maximum possible time to
|
159
|
+
# sleep for if we're not killing the thread.
|
160
|
+
if expire_at > now
|
161
|
+
next_expiry = expire_at if next_expiry > expire_at
|
162
|
+
false
|
163
|
+
else
|
164
|
+
# Terminate execution and delete this from the @active
|
165
|
+
thread.raise(ExecutionExpired)
|
166
|
+
true
|
167
|
+
end
|
94
168
|
end
|
95
169
|
end
|
96
|
-
|
170
|
+
|
171
|
+
# We always try to sleep as long as possible to avoid consuming
|
172
|
+
# resources from the app. So that's the user-configured @timeout
|
173
|
+
# value.
|
174
|
+
if next_expiry == NEVER
|
175
|
+
sleep(@timeout)
|
176
|
+
else
|
177
|
+
# sleep until the next known thread is about to expire.
|
178
|
+
sec = next_expiry - Time.now
|
179
|
+
sec > 0.0 ? sleep(sec) : Thread.pass # give other threads a chance
|
180
|
+
end
|
181
|
+
rescue => e
|
182
|
+
# just in case
|
183
|
+
logger.error e
|
184
|
+
end while true # we run this forever
|
97
185
|
end
|
98
186
|
end
|
99
187
|
# :startdoc:
|