rainbows 0.97.0 → 1.0.0pre1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.manifest +14 -2
- data/ChangeLog +87 -118
- data/GIT-VERSION-FILE +1 -1
- data/GIT-VERSION-GEN +1 -1
- data/GNUmakefile +1 -1
- data/README +1 -1
- data/bin/rainbows +15 -20
- data/lib/rainbows/actor_spawn.rb +20 -22
- data/lib/rainbows/app_pool.rb +89 -93
- data/lib/rainbows/base.rb +4 -61
- data/lib/rainbows/client.rb +9 -0
- data/lib/rainbows/configurator.rb +37 -39
- data/lib/rainbows/const.rb +18 -18
- data/lib/rainbows/dev_fd_response.rb +2 -1
- data/lib/rainbows/error.rb +39 -37
- data/lib/rainbows/ev_core.rb +103 -109
- data/lib/rainbows/event_machine.rb +188 -196
- data/lib/rainbows/fiber/base.rb +69 -88
- data/lib/rainbows/fiber/io/compat.rb +13 -0
- data/lib/rainbows/fiber/io/methods.rb +49 -0
- data/lib/rainbows/fiber/io/pipe.rb +7 -0
- data/lib/rainbows/fiber/io/socket.rb +7 -0
- data/lib/rainbows/fiber/io.rb +125 -84
- data/lib/rainbows/fiber/rev/heartbeat.rb +8 -0
- data/lib/rainbows/fiber/rev/kato.rb +22 -0
- data/lib/rainbows/fiber/rev/methods.rb +55 -0
- data/lib/rainbows/fiber/rev/server.rb +32 -0
- data/lib/rainbows/fiber/rev/sleeper.rb +15 -0
- data/lib/rainbows/fiber/rev.rb +6 -164
- data/lib/rainbows/fiber.rb +23 -5
- data/lib/rainbows/fiber_pool.rb +31 -37
- data/lib/rainbows/fiber_spawn.rb +21 -28
- data/lib/rainbows/http_server.rb +80 -80
- data/lib/rainbows/max_body.rb +26 -28
- data/lib/rainbows/process_client.rb +61 -0
- data/lib/rainbows/queue_pool.rb +19 -22
- data/lib/rainbows/read_timeout.rb +28 -0
- data/lib/rainbows/rev/client.rb +10 -10
- data/lib/rainbows/rev/core.rb +2 -3
- data/lib/rainbows/rev/thread.rb +1 -1
- data/lib/rainbows/rev_fiber_spawn.rb +21 -24
- data/lib/rainbows/revactor.rb +18 -15
- data/lib/rainbows/thread_pool.rb +2 -4
- data/lib/rainbows/thread_spawn.rb +1 -2
- data/lib/rainbows/writer_thread_pool.rb +14 -4
- data/lib/rainbows/writer_thread_spawn.rb +14 -4
- data/lib/rainbows.rb +7 -15
- data/local.mk.sample +3 -11
- data/rainbows.gemspec +2 -4
- data/t/kgio-pipe-response.ru +10 -0
- data/t/t0035-kgio-pipe-response.sh +70 -0
- data/t/test_isolate.rb +2 -1
- metadata +46 -30
- data/lib/rainbows/acceptor.rb +0 -26
- data/lib/rainbows/byte_slice.rb +0 -17
data/lib/rainbows/app_pool.rb
CHANGED
@@ -2,105 +2,101 @@
|
|
2
2
|
|
3
3
|
require 'thread'
|
4
4
|
|
5
|
-
|
5
|
+
# Rack middleware to limit application-level concurrency independently
|
6
|
+
# of network conncurrency in \Rainbows! Since the +worker_connections+
|
7
|
+
# option in \Rainbows! is only intended to limit the number of
|
8
|
+
# simultaneous clients, this middleware may be used to limit the
|
9
|
+
# number of concurrent application dispatches independently of
|
10
|
+
# concurrent clients.
|
11
|
+
#
|
12
|
+
# Instead of using M:N concurrency in \Rainbows!, this middleware
|
13
|
+
# allows M:N:P concurrency where +P+ is the AppPool +:size+ while
|
14
|
+
# +M+ remains the number of +worker_processes+ and +N+ remains the
|
15
|
+
# number of +worker_connections+.
|
16
|
+
#
|
17
|
+
# rainbows master
|
18
|
+
# \_ rainbows worker[0]
|
19
|
+
# | \_ client[0,0]------\ ___app[0]
|
20
|
+
# | \_ client[0,1]-------\ /___app[1]
|
21
|
+
# | \_ client[0,2]-------->--< ...
|
22
|
+
# | ... __/ `---app[P]
|
23
|
+
# | \_ client[0,N]----/
|
24
|
+
# \_ rainbows worker[1]
|
25
|
+
# | \_ client[1,0]------\ ___app[0]
|
26
|
+
# | \_ client[1,1]-------\ /___app[1]
|
27
|
+
# | \_ client[1,2]-------->--< ...
|
28
|
+
# | ... __/ `---app[P]
|
29
|
+
# | \_ client[1,N]----/
|
30
|
+
# \_ rainbows worker[M]
|
31
|
+
# \_ client[M,0]------\ ___app[0]
|
32
|
+
# \_ client[M,1]-------\ /___app[1]
|
33
|
+
# \_ client[M,2]-------->--< ...
|
34
|
+
# ... __/ `---app[P]
|
35
|
+
# \_ client[M,N]----/
|
36
|
+
#
|
37
|
+
# AppPool should be used if you want to enforce a lower value of +P+
|
38
|
+
# than +N+.
|
39
|
+
#
|
40
|
+
# AppPool has no effect on the Rev or EventMachine concurrency models
|
41
|
+
# as those are single-threaded/single-instance as far as application
|
42
|
+
# concurrency goes. In other words, +P+ is always +one+ when using
|
43
|
+
# Rev or EventMachine. As of \Rainbows! 0.7.0, it is safe to use with
|
44
|
+
# Revactor and the new FiberSpawn and FiberPool concurrency models.
|
45
|
+
#
|
46
|
+
# Since this is Rack middleware, you may load this in your Rack
|
47
|
+
# config.ru file and even use it in threaded servers other than
|
48
|
+
# \Rainbows!
|
49
|
+
#
|
50
|
+
# use Rainbows::AppPool, :size => 30
|
51
|
+
# map "/lobster" do
|
52
|
+
# run Rack::Lobster.new
|
53
|
+
# end
|
54
|
+
#
|
55
|
+
# You may to load this earlier or later in your middleware chain
|
56
|
+
# depending on the concurrency/copy-friendliness of your middleware(s).
|
57
|
+
class Rainbows::AppPool < Struct.new(:pool, :re)
|
6
58
|
|
7
|
-
#
|
8
|
-
#
|
9
|
-
#
|
10
|
-
#
|
11
|
-
#
|
12
|
-
#
|
13
|
-
#
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
23
|
-
|
24
|
-
# | ... __/ `---app[P]
|
25
|
-
# | \_ client[0,N]----/
|
26
|
-
# \_ rainbows worker[1]
|
27
|
-
# | \_ client[1,0]------\ ___app[0]
|
28
|
-
# | \_ client[1,1]-------\ /___app[1]
|
29
|
-
# | \_ client[1,2]-------->--< ...
|
30
|
-
# | ... __/ `---app[P]
|
31
|
-
# | \_ client[1,N]----/
|
32
|
-
# \_ rainbows worker[M]
|
33
|
-
# \_ client[M,0]------\ ___app[0]
|
34
|
-
# \_ client[M,1]-------\ /___app[1]
|
35
|
-
# \_ client[M,2]-------->--< ...
|
36
|
-
# ... __/ `---app[P]
|
37
|
-
# \_ client[M,N]----/
|
38
|
-
#
|
39
|
-
# AppPool should be used if you want to enforce a lower value of +P+
|
40
|
-
# than +N+.
|
41
|
-
#
|
42
|
-
# AppPool has no effect on the Rev or EventMachine concurrency models
|
43
|
-
# as those are single-threaded/single-instance as far as application
|
44
|
-
# concurrency goes. In other words, +P+ is always +one+ when using
|
45
|
-
# Rev or EventMachine. As of \Rainbows! 0.7.0, it is safe to use with
|
46
|
-
# Revactor and the new FiberSpawn and FiberPool concurrency models.
|
47
|
-
#
|
48
|
-
# Since this is Rack middleware, you may load this in your Rack
|
49
|
-
# config.ru file and even use it in threaded servers other than
|
50
|
-
# \Rainbows!
|
51
|
-
#
|
52
|
-
# use Rainbows::AppPool, :size => 30
|
53
|
-
# map "/lobster" do
|
54
|
-
# run Rack::Lobster.new
|
55
|
-
# end
|
56
|
-
#
|
57
|
-
# You may to load this earlier or later in your middleware chain
|
58
|
-
# depending on the concurrency/copy-friendliness of your middleware(s).
|
59
|
-
|
60
|
-
class AppPool < Struct.new(:pool, :re)
|
61
|
-
|
62
|
-
# +opt+ is a hash, +:size+ is the size of the pool (default: 6)
|
63
|
-
# meaning you can have up to 6 concurrent instances of +app+
|
64
|
-
# within one \Rainbows! worker process. We support various
|
65
|
-
# methods of the +:copy+ option: +dup+, +clone+, +deep+ and +none+.
|
66
|
-
# Depending on your +app+, one of these options should be set.
|
67
|
-
# The default +:copy+ is +:dup+ as is commonly seen in existing
|
68
|
-
# Rack middleware.
|
69
|
-
def initialize(app, opt = {})
|
70
|
-
self.pool = Queue.new
|
71
|
-
(1...(opt[:size] || 6)).each do
|
72
|
-
pool << case (opt[:copy] || :dup)
|
73
|
-
when :none then app
|
74
|
-
when :dup then app.dup
|
75
|
-
when :clone then app.clone
|
76
|
-
when :deep then Marshal.load(Marshal.dump(app)) # unlikely...
|
77
|
-
else
|
78
|
-
raise ArgumentError, "unsupported copy method: #{opt[:copy].inspect}"
|
79
|
-
end
|
59
|
+
# +opt+ is a hash, +:size+ is the size of the pool (default: 6)
|
60
|
+
# meaning you can have up to 6 concurrent instances of +app+
|
61
|
+
# within one \Rainbows! worker process. We support various
|
62
|
+
# methods of the +:copy+ option: +dup+, +clone+, +deep+ and +none+.
|
63
|
+
# Depending on your +app+, one of these options should be set.
|
64
|
+
# The default +:copy+ is +:dup+ as is commonly seen in existing
|
65
|
+
# Rack middleware.
|
66
|
+
def initialize(app, opt = {})
|
67
|
+
self.pool = Queue.new
|
68
|
+
(1...(opt[:size] || 6)).each do
|
69
|
+
pool << case (opt[:copy] || :dup)
|
70
|
+
when :none then app
|
71
|
+
when :dup then app.dup
|
72
|
+
when :clone then app.clone
|
73
|
+
when :deep then Marshal.load(Marshal.dump(app)) # unlikely...
|
74
|
+
else
|
75
|
+
raise ArgumentError, "unsupported copy method: #{opt[:copy].inspect}"
|
80
76
|
end
|
81
|
-
pool << app # the original
|
82
77
|
end
|
78
|
+
pool << app # the original
|
79
|
+
end
|
83
80
|
|
84
|
-
|
85
|
-
|
81
|
+
# Rack application endpoint, +env+ is the Rack environment
|
82
|
+
def call(env) # :nodoc:
|
86
83
|
|
87
|
-
|
88
|
-
|
89
|
-
|
90
|
-
|
91
|
-
|
92
|
-
|
93
|
-
|
94
|
-
|
95
|
-
|
96
|
-
end
|
97
|
-
true
|
84
|
+
# we have to do this check at call time (and not initialize)
|
85
|
+
# because of preload_app=true and models being changeable with SIGHUP
|
86
|
+
# fortunately this is safe for all the reentrant (but not multithreaded)
|
87
|
+
# classes that depend on it and a safe no-op for multithreaded
|
88
|
+
# concurrency models
|
89
|
+
self.re ||= begin
|
90
|
+
case env["rainbows.model"]
|
91
|
+
when :FiberSpawn, :FiberPool, :Revactor, :NeverBlock, :RevFiberSpawn
|
92
|
+
self.pool = Rainbows::Fiber::Queue.new(pool)
|
98
93
|
end
|
99
|
-
|
100
|
-
app = pool.shift
|
101
|
-
app.call(env)
|
102
|
-
ensure
|
103
|
-
pool << app
|
94
|
+
true
|
104
95
|
end
|
96
|
+
|
97
|
+
app = pool.shift
|
98
|
+
app.call(env)
|
99
|
+
ensure
|
100
|
+
pool << app
|
105
101
|
end
|
106
102
|
end
|
data/lib/rainbows/base.rb
CHANGED
@@ -8,14 +8,10 @@
|
|
8
8
|
module Rainbows::Base
|
9
9
|
|
10
10
|
# :stopdoc:
|
11
|
-
include Rainbows::
|
12
|
-
include Rainbows::Response
|
11
|
+
include Rainbows::ProcessClient
|
13
12
|
|
14
13
|
# shortcuts...
|
15
14
|
G = Rainbows::G
|
16
|
-
NULL_IO = Unicorn::HttpRequest::NULL_IO
|
17
|
-
TeeInput = Rainbows::TeeInput
|
18
|
-
HttpParser = Unicorn::HttpParser
|
19
15
|
|
20
16
|
# this method is called by all current concurrency models
|
21
17
|
def init_worker_process(worker) # :nodoc:
|
@@ -24,11 +20,8 @@ module Rainbows::Base
|
|
24
20
|
Rainbows::MaxBody.setup
|
25
21
|
G.tmp = worker.tmp
|
26
22
|
|
27
|
-
|
28
|
-
|
29
|
-
require "io/nonblock"
|
30
|
-
Rainbows::HttpServer::LISTENERS.each { |l| l.nonblock = true }
|
31
|
-
end
|
23
|
+
listeners = Rainbows::HttpServer::LISTENERS
|
24
|
+
Rainbows::HttpServer::IO_PURGATORY.concat(listeners)
|
32
25
|
|
33
26
|
# we're don't use the self-pipe mechanism in the Rainbows! worker
|
34
27
|
# since we don't defer reopening logs
|
@@ -36,60 +29,10 @@ module Rainbows::Base
|
|
36
29
|
trap(:USR1) { reopen_worker_logs(worker.nr) }
|
37
30
|
trap(:QUIT) { G.quit! }
|
38
31
|
[:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
|
32
|
+
Rainbows::ProcessClient.const_set(:APP, G.server.app)
|
39
33
|
logger.info "Rainbows! #@use worker_connections=#@worker_connections"
|
40
34
|
end
|
41
35
|
|
42
|
-
def wait_headers_readable(client) # :nodoc:
|
43
|
-
IO.select([client], nil, nil, G.kato)
|
44
|
-
end
|
45
|
-
|
46
|
-
# once a client is accepted, it is processed in its entirety here
|
47
|
-
# in 3 easy steps: read request, call app, write app response
|
48
|
-
# this is used by synchronous concurrency models
|
49
|
-
# Base, ThreadSpawn, ThreadPool
|
50
|
-
def process_client(client) # :nodoc:
|
51
|
-
buf = client.readpartial(CHUNK_SIZE) # accept filters protect us here
|
52
|
-
hp = HttpParser.new
|
53
|
-
env = {}
|
54
|
-
remote_addr = Rainbows.addr(client)
|
55
|
-
|
56
|
-
begin # loop
|
57
|
-
until hp.headers(env, buf)
|
58
|
-
wait_headers_readable(client) or return
|
59
|
-
buf << client.readpartial(CHUNK_SIZE)
|
60
|
-
end
|
61
|
-
|
62
|
-
env[CLIENT_IO] = client
|
63
|
-
env[RACK_INPUT] = 0 == hp.content_length ?
|
64
|
-
NULL_IO : TeeInput.new(client, env, hp, buf)
|
65
|
-
env[REMOTE_ADDR] = remote_addr
|
66
|
-
status, headers, body = app.call(env.update(RACK_DEFAULTS))
|
67
|
-
|
68
|
-
if 100 == status.to_i
|
69
|
-
client.write(EXPECT_100_RESPONSE)
|
70
|
-
env.delete(HTTP_EXPECT)
|
71
|
-
status, headers, body = app.call(env)
|
72
|
-
end
|
73
|
-
|
74
|
-
if hp.headers?
|
75
|
-
headers = HH.new(headers)
|
76
|
-
range = make_range!(env, status, headers) and status = range.shift
|
77
|
-
env = false unless hp.keepalive? && G.alive
|
78
|
-
headers[CONNECTION] = env ? KEEP_ALIVE : CLOSE
|
79
|
-
client.write(response_header(status, headers))
|
80
|
-
end
|
81
|
-
write_body(client, body, range)
|
82
|
-
end while env && env.clear && hp.reset.nil?
|
83
|
-
# if we get any error, try to write something back to the client
|
84
|
-
# assuming we haven't closed the socket, but don't get hung up
|
85
|
-
# if the socket is already closed or broken. We'll always ensure
|
86
|
-
# the socket is closed at the end of this function
|
87
|
-
rescue => e
|
88
|
-
Rainbows::Error.write(client, e)
|
89
|
-
ensure
|
90
|
-
client.close unless client.closed?
|
91
|
-
end
|
92
|
-
|
93
36
|
def self.included(klass) # :nodoc:
|
94
37
|
klass.const_set :LISTENERS, Rainbows::HttpServer::LISTENERS
|
95
38
|
klass.const_set :G, Rainbows::G
|
@@ -1,46 +1,44 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
|
-
module Rainbows
|
3
2
|
|
4
|
-
|
5
|
-
|
6
|
-
|
7
|
-
|
8
|
-
# configures \Rainbows! with a given concurrency model to +use+ and
|
9
|
-
# a +worker_connections+ upper-bound. This method may be called
|
10
|
-
# inside a Unicorn/\Rainbows! configuration file:
|
11
|
-
#
|
12
|
-
# Rainbows! do
|
13
|
-
# use :ThreadSpawn # concurrency model to use
|
14
|
-
# worker_connections 400
|
15
|
-
# keepalive_timeout 0 # zero disables keepalives entirely
|
16
|
-
# client_max_body_size 5*1024*1024 # 5 megabytes
|
17
|
-
# end
|
18
|
-
#
|
19
|
-
# # the rest of the Unicorn configuration
|
20
|
-
# worker_processes 8
|
21
|
-
#
|
22
|
-
# See the documentation for the respective Revactor, ThreadSpawn,
|
23
|
-
# and ThreadPool classes for descriptions and recommendations for
|
24
|
-
# each of them. The total number of clients we're able to serve is
|
25
|
-
# +worker_processes+ * +worker_connections+, so in the above example
|
26
|
-
# we can serve 8 * 400 = 3200 clients concurrently.
|
27
|
-
#
|
28
|
-
# The default is +keepalive_timeout+ is 5 seconds, which should be
|
29
|
-
# enough under most conditions for browsers to render the page and
|
30
|
-
# start retrieving extra elements for. Increasing this beyond 5
|
31
|
-
# seconds is not recommended. Zero disables keepalive entirely
|
32
|
-
# (but pipelining fully-formed requests is still works).
|
33
|
-
#
|
34
|
-
# The default +client_max_body_size+ is 1 megabyte (1024 * 1024 bytes),
|
35
|
-
# setting this to +nil+ will disable body size checks and allow any
|
36
|
-
# size to be specified.
|
37
|
-
def Rainbows!(&block)
|
38
|
-
block_given? or raise ArgumentError, "Rainbows! requires a block"
|
39
|
-
HttpServer.setup(block)
|
40
|
-
end
|
3
|
+
# This module adds \Rainbows! to the
|
4
|
+
# {Unicorn::Configurator}[http://unicorn.bogomips.org/Unicorn/Configurator.html]
|
5
|
+
module Rainbows::Configurator
|
41
6
|
|
7
|
+
# configures \Rainbows! with a given concurrency model to +use+ and
|
8
|
+
# a +worker_connections+ upper-bound. This method may be called
|
9
|
+
# inside a Unicorn/\Rainbows! configuration file:
|
10
|
+
#
|
11
|
+
# Rainbows! do
|
12
|
+
# use :ThreadSpawn # concurrency model to use
|
13
|
+
# worker_connections 400
|
14
|
+
# keepalive_timeout 0 # zero disables keepalives entirely
|
15
|
+
# client_max_body_size 5*1024*1024 # 5 megabytes
|
16
|
+
# end
|
17
|
+
#
|
18
|
+
# # the rest of the Unicorn configuration
|
19
|
+
# worker_processes 8
|
20
|
+
#
|
21
|
+
# See the documentation for the respective Revactor, ThreadSpawn,
|
22
|
+
# and ThreadPool classes for descriptions and recommendations for
|
23
|
+
# each of them. The total number of clients we're able to serve is
|
24
|
+
# +worker_processes+ * +worker_connections+, so in the above example
|
25
|
+
# we can serve 8 * 400 = 3200 clients concurrently.
|
26
|
+
#
|
27
|
+
# The default is +keepalive_timeout+ is 5 seconds, which should be
|
28
|
+
# enough under most conditions for browsers to render the page and
|
29
|
+
# start retrieving extra elements for. Increasing this beyond 5
|
30
|
+
# seconds is not recommended. Zero disables keepalive entirely
|
31
|
+
# (but pipelining fully-formed requests is still works).
|
32
|
+
#
|
33
|
+
# The default +client_max_body_size+ is 1 megabyte (1024 * 1024 bytes),
|
34
|
+
# setting this to +nil+ will disable body size checks and allow any
|
35
|
+
# size to be specified.
|
36
|
+
def Rainbows!(&block)
|
37
|
+
block_given? or raise ArgumentError, "Rainbows! requires a block"
|
38
|
+
Rainbows::HttpServer.setup(block)
|
42
39
|
end
|
43
40
|
end
|
44
41
|
|
42
|
+
# :enddoc:
|
45
43
|
# inject the Rainbows! method into Unicorn::Configurator
|
46
|
-
Unicorn::Configurator.
|
44
|
+
Unicorn::Configurator.__send__(:include, Rainbows::Configurator)
|
data/lib/rainbows/const.rb
CHANGED
@@ -1,28 +1,28 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
# :enddoc:
|
3
|
-
module Rainbows
|
3
|
+
module Rainbows::Const
|
4
4
|
|
5
|
-
|
6
|
-
RAINBOWS_VERSION = '0.97.0'
|
5
|
+
RAINBOWS_VERSION = '1.0.0pre1'
|
7
6
|
|
8
|
-
|
7
|
+
include Unicorn::Const
|
9
8
|
|
10
|
-
|
11
|
-
|
9
|
+
RACK_DEFAULTS = Unicorn::HttpRequest::DEFAULTS.update({
|
10
|
+
"SERVER_SOFTWARE" => "Rainbows! #{RAINBOWS_VERSION}",
|
12
11
|
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
12
|
+
# using the Rev model, we'll automatically chunk pipe and socket objects
|
13
|
+
# if they're the response body. Unset by default.
|
14
|
+
# "rainbows.autochunk" => false,
|
15
|
+
})
|
17
16
|
|
18
|
-
|
19
|
-
|
20
|
-
|
21
|
-
|
22
|
-
|
17
|
+
# client IO object that supports reading and writing directly
|
18
|
+
# without filtering it through the HTTP chunk parser.
|
19
|
+
# Maybe we can get this renamed to "rack.io" if it becomes part
|
20
|
+
# of the official spec, but for now it is "hack.io"
|
21
|
+
CLIENT_IO = "hack.io".freeze
|
23
22
|
|
24
|
-
|
25
|
-
|
23
|
+
ERROR_413_RESPONSE = "HTTP/1.1 413 Request Entity Too Large\r\n\r\n"
|
24
|
+
ERROR_416_RESPONSE = "HTTP/1.1 416 Requested Range Not Satisfiable\r\n\r\n"
|
26
25
|
|
27
|
-
|
26
|
+
RACK_INPUT = Unicorn::HttpRequest::RACK_INPUT
|
27
|
+
REMOTE_ADDR = Unicorn::HttpRequest::REMOTE_ADDR
|
28
28
|
end
|
@@ -54,7 +54,8 @@ class Rainbows::DevFdResponse < Struct.new(:app)
|
|
54
54
|
# we need to make sure our pipe output is Fiber-compatible
|
55
55
|
case env["rainbows.model"]
|
56
56
|
when :FiberSpawn, :FiberPool, :RevFiberSpawn
|
57
|
-
io
|
57
|
+
io.respond_to?(:wait_readable) or
|
58
|
+
io = Rainbows::Fiber::IO.new(io)
|
58
59
|
when :Revactor
|
59
60
|
io = Rainbows::Revactor::Proxy.new(io)
|
60
61
|
end
|
data/lib/rainbows/error.rb
CHANGED
@@ -1,48 +1,50 @@
|
|
1
1
|
# -*- encoding: binary -*-
|
2
2
|
# :enddoc:
|
3
|
-
module Rainbows
|
3
|
+
module Rainbows::Error
|
4
4
|
|
5
|
-
|
6
|
-
class << self
|
5
|
+
G = Rainbows::G
|
7
6
|
|
8
|
-
|
9
|
-
|
10
|
-
|
11
|
-
|
12
|
-
|
13
|
-
|
14
|
-
|
15
|
-
|
16
|
-
|
17
|
-
|
18
|
-
G.server.logger.error "app error: #{e.inspect}"
|
19
|
-
G.server.logger.error e.backtrace.join("\n")
|
20
|
-
rescue
|
7
|
+
# if we get any error, try to write something back to the client
|
8
|
+
# assuming we haven't closed the socket, but don't get hung up
|
9
|
+
# if the socket is already closed or broken. We'll always ensure
|
10
|
+
# the socket is closed at the end of this function
|
11
|
+
def self.write(io, e)
|
12
|
+
if msg = response(e)
|
13
|
+
if io.respond_to?(:kgio_trywrite)
|
14
|
+
io.kgio_trywrite(msg)
|
15
|
+
else
|
16
|
+
io.write_nonblock(msg)
|
21
17
|
end
|
18
|
+
end
|
19
|
+
rescue
|
20
|
+
end
|
22
21
|
|
23
|
-
|
24
|
-
|
25
|
-
|
26
|
-
|
27
|
-
|
28
|
-
end
|
22
|
+
def self.app(e)
|
23
|
+
G.server.logger.error "app error: #{e.inspect}"
|
24
|
+
G.server.logger.error e.backtrace.join("\n")
|
25
|
+
rescue
|
26
|
+
end
|
29
27
|
|
30
|
-
|
31
|
-
|
32
|
-
|
33
|
-
|
34
|
-
|
35
|
-
|
36
|
-
Const::ERROR_416_RESPONSE
|
37
|
-
when Unicorn::HttpParserError
|
38
|
-
Const::ERROR_400_RESPONSE # try to tell the client they're bad
|
39
|
-
when IOError # HttpParserError is an IOError
|
40
|
-
else
|
41
|
-
app(e)
|
42
|
-
Const::ERROR_500_RESPONSE
|
43
|
-
end
|
44
|
-
end
|
28
|
+
def self.listen_loop(e)
|
29
|
+
G.alive or return
|
30
|
+
G.server.logger.error "listen loop error: #{e.inspect}."
|
31
|
+
G.server.logger.error e.backtrace.join("\n")
|
32
|
+
rescue
|
33
|
+
end
|
45
34
|
|
35
|
+
def self.response(e)
|
36
|
+
case e
|
37
|
+
when EOFError, Errno::ECONNRESET, Errno::EPIPE, Errno::EINVAL,
|
38
|
+
Errno::EBADF, Errno::ENOTCONN
|
39
|
+
# swallow error if client shuts down one end or disconnects
|
40
|
+
when Rainbows::Response416
|
41
|
+
Rainbows::Const::ERROR_416_RESPONSE
|
42
|
+
when Unicorn::HttpParserError
|
43
|
+
Rainbows::Const::ERROR_400_RESPONSE # try to tell the client they're bad
|
44
|
+
when IOError # HttpParserError is an IOError
|
45
|
+
else
|
46
|
+
app(e)
|
47
|
+
Rainbows::Const::ERROR_500_RESPONSE
|
46
48
|
end
|
47
49
|
end
|
48
50
|
end
|