rainbows 0.6.0 → 0.7.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (55) hide show
  1. data/.document +1 -0
  2. data/Documentation/GNUmakefile +4 -1
  3. data/Documentation/comparison.css +6 -0
  4. data/Documentation/comparison.haml +297 -0
  5. data/GIT-VERSION-GEN +1 -1
  6. data/GNUmakefile +24 -17
  7. data/README +32 -28
  8. data/Summary +7 -0
  9. data/TODO +4 -6
  10. data/bin/rainbows +2 -2
  11. data/lib/rainbows.rb +33 -3
  12. data/lib/rainbows/actor_spawn.rb +29 -0
  13. data/lib/rainbows/app_pool.rb +17 -6
  14. data/lib/rainbows/base.rb +10 -13
  15. data/lib/rainbows/const.rb +1 -1
  16. data/lib/rainbows/dev_fd_response.rb +6 -0
  17. data/lib/rainbows/error.rb +34 -0
  18. data/lib/rainbows/ev_core.rb +3 -12
  19. data/lib/rainbows/event_machine.rb +7 -9
  20. data/lib/rainbows/fiber.rb +15 -0
  21. data/lib/rainbows/fiber/base.rb +112 -0
  22. data/lib/rainbows/fiber/io.rb +65 -0
  23. data/lib/rainbows/fiber/queue.rb +35 -0
  24. data/lib/rainbows/fiber_pool.rb +44 -0
  25. data/lib/rainbows/fiber_spawn.rb +34 -0
  26. data/lib/rainbows/http_server.rb +14 -1
  27. data/lib/rainbows/never_block.rb +69 -0
  28. data/lib/rainbows/rev.rb +7 -0
  29. data/lib/rainbows/rev/client.rb +9 -3
  30. data/lib/rainbows/rev/core.rb +2 -5
  31. data/lib/rainbows/rev/heartbeat.rb +5 -1
  32. data/lib/rainbows/rev_thread_spawn.rb +62 -60
  33. data/lib/rainbows/revactor.rb +22 -23
  34. data/lib/rainbows/thread_pool.rb +28 -26
  35. data/lib/rainbows/thread_spawn.rb +33 -33
  36. data/local.mk.sample +9 -7
  37. data/rainbows.gemspec +8 -2
  38. data/t/GNUmakefile +14 -7
  39. data/t/fork-sleep.ru +10 -0
  40. data/t/simple-http_FiberPool.ru +9 -0
  41. data/t/simple-http_FiberSpawn.ru +9 -0
  42. data/t/simple-http_NeverBlock.ru +11 -0
  43. data/t/sleep.ru +2 -0
  44. data/t/t0000-simple-http.sh +12 -1
  45. data/t/t0001-unix-http.sh +12 -1
  46. data/t/t0009-broken-app.sh +56 -0
  47. data/t/t0009.ru +13 -0
  48. data/t/t0010-keepalive-timeout-effective.sh +42 -0
  49. data/t/t0011-close-on-exec-set.sh +54 -0
  50. data/t/t0300-async_sinatra.sh +1 -1
  51. data/t/t9000-rack-app-pool.sh +1 -1
  52. data/t/t9000.ru +8 -5
  53. data/t/test-lib.sh +14 -4
  54. metadata +33 -5
  55. data/lib/rainbows/ev_thread_core.rb +0 -80
data/Summary ADDED
@@ -0,0 +1,7 @@
1
+ = \Rainbows! at a glance
2
+
3
+ Confused by all the options we give you? So are we! Here's some tables
4
+ to help keep your head straight. Remember, engineering is all about
5
+ trade-offs.
6
+
7
+ INCLUDE
data/TODO CHANGED
@@ -7,15 +7,13 @@ care about.
7
7
  unit tests, only integration tests that exercise externally
8
8
  visible parts.
9
9
 
10
- * Rev + Thread - current Rev model with threading, which will give
11
- us a streaming (but rewindable) "rack.input".
12
-
13
10
  * EventMachine.spawn - should be like Revactor, maybe?
14
11
 
15
- * Rev + callcc - current Rev model with callcc (should work with MBARI)
12
+ * {Packet,Rev,EventMachine}+Fibers
16
13
 
17
- * Fiber support - Revactor already uses these with Ruby 1.9, also not
18
- sure how TeeInput can be done with this.
14
+ * {Packet,Rev}ThreadPool
15
+
16
+ * Rev + callcc - current Rev model with callcc (should work with MBARI)
19
17
 
20
18
  * Omnibus - haven't looked into it, probably like Revactor with 1.8?
21
19
 
data/bin/rainbows CHANGED
@@ -69,7 +69,7 @@ opts = OptionParser.new("", 24, ' ') do |opts|
69
69
  opts.on("-P", "--pid FILE", "DEPRECATED") do |f|
70
70
  warn %q{Use of --pid/-P is strongly discouraged}
71
71
  warn %q{Use the 'pid' directive in the Unicorn config file instead}
72
- options[:pid] = File.expand_path(f)
72
+ options[:pid] = f
73
73
  end
74
74
 
75
75
  opts.on("-s", "--server SERVER",
@@ -86,7 +86,7 @@ opts = OptionParser.new("", 24, ' ') do |opts|
86
86
  end
87
87
 
88
88
  opts.on("-c", "--config-file FILE", "Unicorn-specific config file") do |f|
89
- options[:config_file] = File.expand_path(f)
89
+ options[:config_file] = f
90
90
  end
91
91
 
92
92
  # I'm avoiding Unicorn-specific config options on the command-line.
data/lib/rainbows.rb CHANGED
@@ -1,12 +1,14 @@
1
1
  # -*- encoding: binary -*-
2
2
  require 'unicorn'
3
+ require 'rainbows/error'
4
+ require 'fcntl'
3
5
 
4
6
  module Rainbows
5
7
 
6
8
  # global vars because class/instance variables are confusing me :<
7
9
  # this struct is only accessed inside workers and thus private to each
8
- # G.cur may not be used the network concurrency model
9
- class State < Struct.new(:alive,:m,:cur,:server,:tmp)
10
+ # G.cur may not be used in the network concurrency model
11
+ class State < Struct.new(:alive,:m,:cur,:kato,:server,:tmp)
10
12
  def tick
11
13
  tmp.chmod(self.m = m == 0 ? 1 : 0)
12
14
  alive && server.master_pid == Process.ppid or quit!
@@ -18,7 +20,8 @@ module Rainbows
18
20
  false
19
21
  end
20
22
  end
21
- G = State.new(true, 0, 0)
23
+ G = State.new(true, 0, 0, 2)
24
+ O = {}
22
25
 
23
26
  require 'rainbows/const'
24
27
  require 'rainbows/http_server'
@@ -43,6 +46,7 @@ module Rainbows
43
46
  # Rainbows! do
44
47
  # use :Revactor # this may also be :ThreadSpawn or :ThreadPool
45
48
  # worker_connections 400
49
+ # keepalive_timeout 0 # zero disables keepalives entirely
46
50
  # end
47
51
  #
48
52
  # # the rest of the Unicorn configuration
@@ -53,6 +57,12 @@ module Rainbows
53
57
  # each of them. The total number of clients we're able to serve is
54
58
  # +worker_processes+ * +worker_connections+, so in the above example
55
59
  # we can serve 8 * 400 = 3200 clients concurrently.
60
+ #
61
+ # The default is +keepalive_timeout+ is 2 seconds, which should be
62
+ # enough under most conditions for browsers to render the page and
63
+ # start retrieving extra elements for. Increasing this beyond 5
64
+ # seconds is not recommended. Zero disables keepalive entirely
65
+ # (but pipelining fully-formed requests is still works).
56
66
  def Rainbows!(&block)
57
67
  block_given? or raise ArgumentError, "Rainbows! requires a block"
58
68
  HttpServer.setup(block)
@@ -69,10 +79,30 @@ module Rainbows
69
79
  :Rev => 50,
70
80
  :RevThreadSpawn => 50,
71
81
  :EventMachine => 50,
82
+ :FiberSpawn => 50,
83
+ :FiberPool => 50,
84
+ :ActorSpawn => 50,
85
+ :NeverBlock => 50,
72
86
  }.each do |model, _|
73
87
  u = model.to_s.gsub(/([a-z0-9])([A-Z0-9])/) { "#{$1}_#{$2.downcase!}" }
74
88
  autoload model, "rainbows/#{u.downcase!}"
75
89
  end
90
+ autoload :Fiber, 'rainbows/fiber' # core class
91
+
92
+ # returns nil if accept fails
93
+ if defined?(Fcntl::FD_CLOEXEC)
94
+ def self.accept(sock)
95
+ rv = sock.accept_nonblock
96
+ rv.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
97
+ rv
98
+ rescue Errno::EAGAIN, Errno::ECONNABORTED
99
+ end
100
+ else
101
+ def self.accept(sock)
102
+ sock.accept_nonblock
103
+ rescue Errno::EAGAIN, Errno::ECONNABORTED
104
+ end
105
+ end
76
106
 
77
107
  end
78
108
 
@@ -0,0 +1,29 @@
1
+ # -*- encoding: binary -*-
2
+
3
+ require 'actor'
4
+ module Rainbows
5
+
6
+ # Actor concurrency model for Rubinius. We can't seem to get message
7
+ # passing working right, so we're throwing a Mutex into the mix for
8
+ # now. Hopefully somebody can fix things for us. Currently, this is
9
+ # exactly the same as the ThreadSpawn model since we don't use the
10
+ # message passing capabilities of the Actor model (and even then
11
+ # it wouldn't really make sense since Actors in Rubinius are just
12
+ # Threads underneath and our ThreadSpawn model is one layer of
13
+ # complexity less.
14
+ #
15
+ # This is different from the Revactor one which is not prone to race
16
+ # conditions within the same process at all (since it uses Fibers).
17
+ module ActorSpawn
18
+ include ThreadSpawn
19
+
20
+ # runs inside each forked worker, this sits around and waits
21
+ # for connections and doesn't die until the parent dies (or is
22
+ # given a INT, QUIT, or TERM signal)
23
+ def worker_loop(worker)
24
+ Const::RACK_DEFAULTS["rack.multithread"] = true # :(
25
+ init_worker_process(worker)
26
+ accept_loop(Actor)
27
+ end
28
+ end
29
+ end
@@ -42,11 +42,8 @@ module Rainbows
42
42
  # AppPool has no effect on the Rev or EventMachine concurrency models
43
43
  # as those are single-threaded/single-instance as far as application
44
44
  # concurrency goes. In other words, +P+ is always +one+ when using
45
- # Rev or EventMachine. AppPool currently only works with the
46
- # ThreadSpawn and ThreadPool models. It does not yet work reliably
47
- # with the Revactor model, but actors are far more lightweight and
48
- # probably better suited for lightweight applications that would
49
- # not benefit from AppPool.
45
+ # Rev or EventMachine. As of \Rainbows! 0.7.0, it is safe to use with
46
+ # Revactor and the new FiberSpawn and FiberPool concurrency models.
50
47
  #
51
48
  # Since this is Rack middleware, you may load this in your Rack
52
49
  # config.ru file and even use it in threaded servers other than
@@ -60,7 +57,7 @@ module Rainbows
60
57
  # You may to load this earlier or later in your middleware chain
61
58
  # depending on the concurrency/copy-friendliness of your middleware(s).
62
59
 
63
- class AppPool < Struct.new(:pool)
60
+ class AppPool < Struct.new(:pool, :re)
64
61
 
65
62
  # +opt+ is a hash, +:size+ is the size of the pool (default: 6)
66
63
  # meaning you can have up to 6 concurrent instances of +app+
@@ -86,6 +83,20 @@ module Rainbows
86
83
 
87
84
  # Rack application endpoint, +env+ is the Rack environment
88
85
  def call(env)
86
+
87
+ # we have to do this check at call time (and not initialize)
88
+ # because of preload_app=true and models being changeable with SIGHUP
89
+ # fortunately this is safe for all the reentrant (but not multithreaded)
90
+ # classes that depend on it and a safe no-op for multithreaded
91
+ # concurrency models
92
+ self.re ||= begin
93
+ case env["rainbows.model"]
94
+ when :FiberSpawn, :FiberPool, :Revactor, :NeverBlock
95
+ self.pool = Rainbows::Fiber::Queue.new(pool)
96
+ end
97
+ true
98
+ end
99
+
89
100
  app = pool.shift
90
101
  app.call(env)
91
102
  ensure
data/lib/rainbows/base.rb CHANGED
@@ -10,10 +10,9 @@ module Rainbows
10
10
  include Rainbows::Const
11
11
  G = Rainbows::G
12
12
 
13
- def listen_loop_error(e)
14
- G.alive or return
15
- logger.error "Unhandled listen loop exception #{e.inspect}."
16
- logger.error e.backtrace.join("\n")
13
+ def handle_error(client, e)
14
+ msg = Error.response(e) and client.write_nonblock(msg)
15
+ rescue
17
16
  end
18
17
 
19
18
  def init_worker_process(worker)
@@ -32,7 +31,7 @@ module Rainbows
32
31
  # once a client is accepted, it is processed in its entirety here
33
32
  # in 3 easy steps: read request, call app, write app response
34
33
  def process_client(client)
35
- buf = client.readpartial(CHUNK_SIZE)
34
+ buf = client.readpartial(CHUNK_SIZE) # accept filters protect us here
36
35
  hp = HttpParser.new
37
36
  env = {}
38
37
  alive = true
@@ -40,6 +39,7 @@ module Rainbows
40
39
 
41
40
  begin # loop
42
41
  while ! hp.headers(env, buf)
42
+ IO.select([client], nil, nil, G.kato) or return
43
43
  buf << client.readpartial(CHUNK_SIZE)
44
44
  end
45
45
 
@@ -59,25 +59,22 @@ module Rainbows
59
59
  out = [ alive ? CONN_ALIVE : CONN_CLOSE ] if hp.headers?
60
60
  HttpResponse.write(client, response, out)
61
61
  end while alive and hp.reset.nil? and env.clear
62
- client.close
63
62
  # if we get any error, try to write something back to the client
64
63
  # assuming we haven't closed the socket, but don't get hung up
65
64
  # if the socket is already closed or broken. We'll always ensure
66
65
  # the socket is closed at the end of this function
67
66
  rescue => e
68
67
  handle_error(client, e)
68
+ ensure
69
+ client.close
69
70
  end
70
71
 
71
72
  def join_threads(threads)
72
- G.quit!
73
73
  expire = Time.now + (timeout * 2.0)
74
- until (threads.delete_if { |thr| ! thr.alive? }).empty?
75
- threads.each { |thr|
76
- G.tick
77
- thr.join(1)
78
- break if Time.now >= expire
79
- }
74
+ until threads.empty? || Time.now >= expire
75
+ threads.delete_if { |thr| thr.alive? ? thr.join(0.01) : true }
80
76
  end
77
+ exit!(0) unless threads.empty?
81
78
  end
82
79
 
83
80
  def self.included(klass)
@@ -3,7 +3,7 @@
3
3
  module Rainbows
4
4
 
5
5
  module Const
6
- RAINBOWS_VERSION = '0.6.0'
6
+ RAINBOWS_VERSION = '0.7.0'
7
7
 
8
8
  include Unicorn::Const
9
9
 
@@ -36,6 +36,12 @@ module Rainbows
36
36
  else
37
37
  headers['X-Rainbows-Autochunk'] = 'no'
38
38
  end
39
+
40
+ # we need to make sure our pipe output is Fiber-compatible
41
+ case env["rainbows.model"]
42
+ when :FiberSpawn, :FiberPool
43
+ return [ status, headers.to_hash, Fiber::IO.new(io,::Fiber.current) ]
44
+ end
39
45
  else # unlikely, char/block device file, directory, ...
40
46
  return response
41
47
  end
@@ -0,0 +1,34 @@
1
+ # -*- encoding: binary -*-
2
+ module Rainbows
3
+
4
+ class Error
5
+ class << self
6
+
7
+ def app(e)
8
+ G.server.logger.error "app error: #{e.inspect}"
9
+ G.server.logger.error e.backtrace.join("\n")
10
+ rescue
11
+ end
12
+
13
+ def listen_loop(e)
14
+ G.alive or return
15
+ G.server.logger.error "listen loop error: #{e.inspect}."
16
+ G.server.logger.error e.backtrace.join("\n")
17
+ rescue
18
+ end
19
+
20
+ def response(e)
21
+ case e
22
+ when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
23
+ # swallow error if client shuts down one end or disconnects
24
+ when Unicorn::HttpParserError
25
+ Const::ERROR_400_RESPONSE # try to tell the client they're bad
26
+ else
27
+ app(e)
28
+ Const::ERROR_500_RESPONSE
29
+ end
30
+ end
31
+
32
+ end
33
+ end
34
+ end
@@ -26,17 +26,7 @@ module Rainbows
26
26
  end
27
27
 
28
28
  def handle_error(e)
29
- msg = case e
30
- when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
31
- ERROR_500_RESPONSE
32
- when HttpParserError # try to tell the client they're bad
33
- ERROR_400_RESPONSE
34
- else
35
- G.server.logger.error "Read error: #{e.inspect}"
36
- G.server.logger.error e.backtrace.join("\n")
37
- ERROR_500_RESPONSE
38
- end
39
- write(msg)
29
+ msg = Error.response(e) and write(msg)
40
30
  ensure
41
31
  quit
42
32
  end
@@ -74,11 +64,12 @@ module Rainbows
74
64
  end
75
65
  when :trailers
76
66
  if @hp.trailers(@env, @buf << data)
67
+ @input.rewind
77
68
  app_call
78
69
  @input.close if File === @input
79
70
  end
80
71
  end
81
- rescue Object => e
72
+ rescue => e
82
73
  handle_error(e)
83
74
  end
84
75
 
@@ -51,8 +51,9 @@ module Rainbows
51
51
  end
52
52
 
53
53
  def app_call
54
+ set_comm_inactivity_timeout 0
54
55
  begin
55
- (@env[RACK_INPUT] = @input).rewind
56
+ @env[RACK_INPUT] = @input
56
57
  @env[REMOTE_ADDR] = @remote_addr
57
58
  @env[ASYNC_CALLBACK] = method(:response_write)
58
59
 
@@ -73,6 +74,7 @@ module Rainbows
73
74
  @state = :headers
74
75
  # keepalive requests are always body-less, so @input is unchanged
75
76
  @hp.headers(@env, @buf) and next
77
+ set_comm_inactivity_timeout G.kato
76
78
  end
77
79
  return
78
80
  end while true
@@ -172,12 +174,9 @@ module Rainbows
172
174
 
173
175
  def notify_readable
174
176
  return if CUR.size >= MAX
175
- begin
176
- io = @io.accept_nonblock
177
- sig = EM.attach_fd(io.fileno, false)
178
- CUR[sig] = Client.new(sig, io)
179
- rescue Errno::EAGAIN, Errno::ECONNABORTED
180
- end
177
+ io = Rainbows.accept(@io) or return
178
+ sig = EM.attach_fd(io.fileno, false)
179
+ CUR[sig] = Client.new(sig, io)
181
180
  end
182
181
  end
183
182
 
@@ -191,8 +190,7 @@ module Rainbows
191
190
  EM.epoll
192
191
  EM.kqueue
193
192
  logger.info "EventMachine: epoll=#{EM.epoll?} kqueue=#{EM.kqueue?}"
194
- Server.const_set(:MAX, G.server.worker_connections +
195
- HttpServer::LISTENERS.size)
193
+ Server.const_set(:MAX, worker_connections + LISTENERS.size)
196
194
  EvCore.setup(Client)
197
195
  EM.run {
198
196
  conns = EM.instance_variable_get(:@conns) or
@@ -0,0 +1,15 @@
1
+ # -*- encoding: binary -*-
2
+ begin
3
+ require 'fiber'
4
+ rescue LoadError
5
+ defined?(NeverBlock) or raise
6
+ end
7
+
8
+ module Rainbows
9
+
10
+ # core module for all things that use Fibers in Rainbows!
11
+ module Fiber
12
+ autoload :Base, 'rainbows/fiber/base'
13
+ autoload :Queue, 'rainbows/fiber/queue'
14
+ end
15
+ end
@@ -0,0 +1,112 @@
1
+ # -*- encoding: binary -*-
2
+ require 'rainbows/fiber/io'
3
+
4
+ module Rainbows
5
+ module Fiber
6
+
7
+ # blocked readers (key: Rainbows::Fiber::IO object, value is irrelevant)
8
+ RD = {}
9
+
10
+ # blocked writers (key: Rainbows::Fiber::IO object, value is irrelevant)
11
+ WR = {}
12
+
13
+ # sleeping fibers go here (key: Fiber object, value: wakeup time)
14
+ ZZ = {}
15
+
16
+ # puts the current Fiber into uninterruptible sleep for at least
17
+ # +seconds+. Unlike Kernel#sleep, this it is not possible to sleep
18
+ # indefinitely to be woken up (nobody wants that in a web server,
19
+ # right?).
20
+ def self.sleep(seconds)
21
+ ZZ[::Fiber.current] = Time.now + seconds
22
+ ::Fiber.yield
23
+ end
24
+
25
+ # base module used by FiberSpawn and FiberPool
26
+ module Base
27
+ include Rainbows::Base
28
+
29
+ # the scheduler method that powers both FiberSpawn and FiberPool
30
+ # concurrency models. It times out idle clients and attempts to
31
+ # schedules ones that were blocked on I/O. At most it'll sleep
32
+ # for one second (returned by the schedule_sleepers method) which
33
+ # will cause it.
34
+ def schedule(&block)
35
+ ret = begin
36
+ G.tick
37
+ RD.keys.each { |c| c.f.resume } # attempt to time out idle clients
38
+ t = schedule_sleepers
39
+ Kernel.select(RD.keys.concat(LISTENERS), WR.keys, nil, t) or return
40
+ rescue Errno::EINTR
41
+ retry
42
+ rescue Errno::EBADF, TypeError
43
+ LISTENERS.compact!
44
+ raise
45
+ end or return
46
+
47
+ # active writers first, then _all_ readers for keepalive timeout
48
+ ret[1].concat(RD.keys).each { |c| c.f.resume }
49
+
50
+ # accept is an expensive syscall, filter out listeners we don't want
51
+ (ret.first & LISTENERS).each(&block)
52
+ end
53
+
54
+ # wakes up any sleepers that need to be woken and
55
+ # returns an interval to IO.select on
56
+ def schedule_sleepers
57
+ max = nil
58
+ now = Time.now
59
+ ZZ.delete_if { |fib, time|
60
+ if now >= time
61
+ fib.resume
62
+ now = Time.now
63
+ else
64
+ max = time
65
+ false
66
+ end
67
+ }
68
+ max.nil? || max > (now + 1) ? 1 : max - now
69
+ end
70
+
71
+ def process_client(client)
72
+ G.cur += 1
73
+ io = client.to_io
74
+ buf = client.read_timeout or return
75
+ hp = HttpParser.new
76
+ env = {}
77
+ alive = true
78
+ remote_addr = TCPSocket === io ? io.peeraddr.last : LOCALHOST
79
+
80
+ begin # loop
81
+ while ! hp.headers(env, buf)
82
+ buf << (client.read_timeout or return)
83
+ end
84
+
85
+ env[RACK_INPUT] = 0 == hp.content_length ?
86
+ HttpRequest::NULL_IO : TeeInput.new(client, env, hp, buf)
87
+ env[REMOTE_ADDR] = remote_addr
88
+ response = APP.call(env.update(RACK_DEFAULTS))
89
+
90
+ if 100 == response.first.to_i
91
+ client.write(EXPECT_100_RESPONSE)
92
+ env.delete(HTTP_EXPECT)
93
+ response = APP.call(env)
94
+ end
95
+
96
+ alive = hp.keepalive? && G.alive
97
+ out = [ alive ? CONN_ALIVE : CONN_CLOSE ] if hp.headers?
98
+ HttpResponse.write(client, response, out)
99
+ end while alive and hp.reset.nil? and env.clear
100
+ rescue => e
101
+ handle_error(io, e)
102
+ ensure
103
+ G.cur -= 1
104
+ RD.delete(client)
105
+ WR.delete(client)
106
+ ZZ.delete(client.f)
107
+ io.close unless io.closed?
108
+ end
109
+
110
+ end
111
+ end
112
+ end