rainbows 3.2.0 → 3.3.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (70) hide show
  1. data/.document +1 -0
  2. data/COPYING +617 -282
  3. data/Documentation/comparison.haml +81 -24
  4. data/FAQ +3 -0
  5. data/GIT-VERSION-GEN +1 -1
  6. data/LICENSE +14 -5
  7. data/README +10 -9
  8. data/Sandbox +25 -0
  9. data/TODO +2 -22
  10. data/lib/rainbows.rb +50 -49
  11. data/lib/rainbows/client.rb +6 -5
  12. data/lib/rainbows/configurator.rb +191 -37
  13. data/lib/rainbows/const.rb +1 -1
  14. data/lib/rainbows/coolio.rb +4 -1
  15. data/lib/rainbows/coolio/client.rb +2 -2
  16. data/lib/rainbows/coolio/heartbeat.rb +2 -1
  17. data/lib/rainbows/coolio_fiber_spawn.rb +12 -7
  18. data/lib/rainbows/coolio_thread_pool.rb +19 -10
  19. data/lib/rainbows/coolio_thread_spawn.rb +3 -0
  20. data/lib/rainbows/epoll.rb +27 -5
  21. data/lib/rainbows/epoll/client.rb +3 -3
  22. data/lib/rainbows/ev_core.rb +2 -1
  23. data/lib/rainbows/event_machine.rb +4 -0
  24. data/lib/rainbows/event_machine/client.rb +2 -1
  25. data/lib/rainbows/fiber.rb +5 -0
  26. data/lib/rainbows/fiber/base.rb +1 -0
  27. data/lib/rainbows/fiber/coolio/methods.rb +0 -1
  28. data/lib/rainbows/fiber/io.rb +10 -6
  29. data/lib/rainbows/fiber/io/pipe.rb +6 -1
  30. data/lib/rainbows/fiber/io/socket.rb +6 -1
  31. data/lib/rainbows/fiber_pool.rb +12 -7
  32. data/lib/rainbows/fiber_spawn.rb +11 -6
  33. data/lib/rainbows/http_server.rb +55 -59
  34. data/lib/rainbows/join_threads.rb +4 -0
  35. data/lib/rainbows/max_body.rb +29 -10
  36. data/lib/rainbows/never_block.rb +7 -10
  37. data/lib/rainbows/pool_size.rb +14 -0
  38. data/lib/rainbows/process_client.rb +23 -1
  39. data/lib/rainbows/queue_pool.rb +8 -6
  40. data/lib/rainbows/response.rb +12 -11
  41. data/lib/rainbows/revactor.rb +14 -7
  42. data/lib/rainbows/revactor/client.rb +2 -2
  43. data/lib/rainbows/stream_file.rb +11 -4
  44. data/lib/rainbows/thread_pool.rb +12 -28
  45. data/lib/rainbows/thread_spawn.rb +14 -13
  46. data/lib/rainbows/thread_timeout.rb +118 -30
  47. data/lib/rainbows/writer_thread_pool/client.rb +1 -1
  48. data/lib/rainbows/writer_thread_spawn/client.rb +2 -2
  49. data/lib/rainbows/xepoll.rb +13 -5
  50. data/lib/rainbows/xepoll/client.rb +19 -17
  51. data/lib/rainbows/xepoll_thread_pool.rb +82 -0
  52. data/lib/rainbows/xepoll_thread_pool/client.rb +129 -0
  53. data/lib/rainbows/xepoll_thread_spawn.rb +58 -0
  54. data/lib/rainbows/xepoll_thread_spawn/client.rb +121 -0
  55. data/pkg.mk +4 -0
  56. data/rainbows.gemspec +4 -1
  57. data/t/GNUmakefile +5 -1
  58. data/t/client_header_buffer_size.ru +5 -0
  59. data/t/simple-http_XEpollThreadPool.ru +10 -0
  60. data/t/simple-http_XEpollThreadSpawn.ru +10 -0
  61. data/t/t0022-copy_stream-byte-range.sh +1 -15
  62. data/t/t0026-splice-copy_stream-byte-range.sh +25 -0
  63. data/t/t0027-nil-copy_stream.sh +60 -0
  64. data/t/t0041-optional-pool-size.sh +2 -2
  65. data/t/t0042-client_header_buffer_size.sh +65 -0
  66. data/t/t9100-thread-timeout.sh +1 -6
  67. data/t/t9101-thread-timeout-threshold.sh +1 -6
  68. data/t/test-lib.sh +58 -0
  69. data/t/test_isolate.rb +9 -3
  70. metadata +47 -16
@@ -18,7 +18,7 @@ class Rainbows::WriterThreadPool::Client < Struct.new(:to_io, :q)
18
18
  }
19
19
  end
20
20
 
21
- if IO.respond_to?(:copy_stream) || IO.method_defined?(:trysendfile)
21
+ if Rainbows::Response::COPY_STREAM || IO.method_defined?(:trysendfile)
22
22
  def write_response(status, headers, body, alive)
23
23
  if body.respond_to?(:close)
24
24
  write_response_close(status, headers, body, alive)
@@ -21,7 +21,7 @@ class Rainbows::WriterThreadSpawn::Client < Struct.new(:to_io, :q, :thr)
21
21
  }
22
22
  end
23
23
 
24
- if IO.respond_to?(:copy_stream) || IO.method_defined?(:trysendfile)
24
+ if Rainbows::Response::COPY_STREAM || IO.method_defined?(:trysendfile)
25
25
  def write_response(status, headers, body, alive)
26
26
  self.q ||= queue_writer
27
27
  if body.respond_to?(:close)
@@ -69,7 +69,7 @@ class Rainbows::WriterThreadSpawn::Client < Struct.new(:to_io, :q, :thr)
69
69
  end
70
70
 
71
71
  q = Queue.new
72
- self.thr = Thread.new(to_io, q) do |io, q|
72
+ self.thr = Thread.new(to_io) do |io|
73
73
  while op = q.shift
74
74
  begin
75
75
  op, *rest = op
@@ -1,19 +1,26 @@
1
1
  # -*- encoding: binary -*-
2
- # :enddoc:
3
2
  require 'raindrops'
4
3
  require 'rainbows/epoll'
5
4
 
6
- # Edge-triggered epoll concurrency model with blocking accept() in
7
- # a (hopefully) native thread. This is recommended over Epoll for
8
- # Ruby 1.9 users as it can workaround accept()-scalability issues
9
- # on multicore machines.
5
+ # Edge-triggered epoll concurrency model with blocking accept() in a
6
+ # (hopefully) native thread. This is just like Epoll, but recommended
7
+ # for Ruby 1.9 users as it can avoid accept()-scalability issues on
8
+ # multicore machines with many worker processes.
9
+ #
10
+ # === RubyGem Requirements
11
+ #
12
+ # * raindrops 0.6.0 or later
13
+ # * sleepy_penguin 2.0.0 or later
14
+ # * sendfile 1.1.0 or later
10
15
  module Rainbows::XEpoll
16
+ # :stopdoc:
11
17
  include Rainbows::Base
12
18
  autoload :Client, 'rainbows/xepoll/client'
13
19
 
14
20
  def init_worker_process(worker)
15
21
  super
16
22
  Rainbows::Epoll.const_set :EP, SleepyPenguin::Epoll.new
23
+ Rainbows.at_quit { Rainbows::Epoll::EP.close }
17
24
  Rainbows::Client.__send__ :include, Client
18
25
  end
19
26
 
@@ -21,4 +28,5 @@ module Rainbows::XEpoll
21
28
  init_worker_process(worker)
22
29
  Client.run
23
30
  end
31
+ # :startdoc:
24
32
  end
@@ -5,28 +5,30 @@ module Rainbows::XEpoll::Client
5
5
  N = Raindrops.new(1)
6
6
  Rainbows::Epoll.nr_clients = lambda { N[0] }
7
7
  include Rainbows::Epoll::Client
8
- MAX = Rainbows.server.worker_connections
9
- THRESH = MAX - 1
10
8
  EP = Rainbows::Epoll::EP
11
- THREADS = Rainbows::HttpServer::LISTENERS.map do |sock|
12
- Thread.new(sock) do |sock|
13
- sleep
14
- begin
15
- if io = sock.kgio_accept
16
- N.incr(0, 1)
17
- io.epoll_once
18
- end
19
- sleep while N[0] >= MAX
20
- rescue => e
21
- Rainbows::Error.listen_loop(e)
22
- end while Rainbows.alive
9
+ ACCEPTORS = Rainbows::HttpServer::LISTENERS.dup
10
+ extend Rainbows::WorkerYield
11
+
12
+ def self.included(klass)
13
+ max = Rainbows.server.worker_connections
14
+ ACCEPTORS.map! do |sock|
15
+ Thread.new do
16
+ begin
17
+ if io = sock.kgio_accept(klass)
18
+ N.incr(0, 1)
19
+ io.epoll_once
20
+ end
21
+ worker_yield while N[0] >= max
22
+ rescue => e
23
+ Rainbows::Error.listen_loop(e)
24
+ end while Rainbows.alive
25
+ end
23
26
  end
24
27
  end
25
28
 
26
29
  def self.run
27
- THREADS.each { |t| t.run }
28
30
  Rainbows::Epoll.loop
29
- Rainbows::JoinThreads.acceptors(THREADS)
31
+ Rainbows::JoinThreads.acceptors(ACCEPTORS)
30
32
  end
31
33
 
32
34
  # only call this once
@@ -40,6 +42,6 @@ module Rainbows::XEpoll::Client
40
42
 
41
43
  def on_close
42
44
  KATO.delete(self)
43
- N.decr(0, 1) == THRESH and THREADS.each { |t| t.run }
45
+ N.decr(0, 1)
44
46
  end
45
47
  end
@@ -0,0 +1,82 @@
1
+ # -*- encoding: binary -*-
2
+ require "thread"
3
+ require "sleepy_penguin"
4
+ require "raindrops"
5
+
6
+ # This is an edge-triggered epoll concurrency model with blocking
7
+ # accept() in a (hopefully) native thread. This is comparable to
8
+ # ThreadPool and CoolioThreadPool, but is Linux-only and able to exploit
9
+ # "wake one" accept() behavior of a blocking accept() call when used
10
+ # with native threads.
11
+ #
12
+ # This supports streaming "rack.input" and allows +:pool_size+ tuning
13
+ # independently of +worker_connections+
14
+ #
15
+ # === Disadvantages
16
+ #
17
+ # This is only supported under Linux 2.6 kernels.
18
+ #
19
+ # === Compared to CoolioThreadPool
20
+ #
21
+ # This does not buffer outgoing responses in userspace at all, meaning
22
+ # it can lower response latency to fast clients and also prevent
23
+ # starvation of other clients when reading slow disks for responses
24
+ # (when combined with native threads).
25
+ #
26
+ # CoolioThreadPool is likely better for trickling large static files or
27
+ # proxying responses to slow clients, but this is likely better for fast
28
+ # clients.
29
+ #
30
+ # Unlikely CoolioThreadPool, this supports streaming "rack.input" which
31
+ # is useful for reading large uploads from fast clients.
32
+ #
33
+ # This exposes no special API or extensions on top of Rack.
34
+ #
35
+ # === Compared to ThreadPool
36
+ #
37
+ # This can maintain idle connections without the memory overhead of an
38
+ # idle Thread. The cost of handling/dispatching active connections is
39
+ # exactly the same for an equivalent number of active connections
40
+ # (but independently tunable).
41
+ #
42
+ # === :pool_size vs worker_connections
43
+ #
44
+ # Since +:pool_size+ and +worker_connections+ are independently tunable,
45
+ # it is possible to get into situations where active connections need
46
+ # to wait for an idle thread in the thread pool before being processed
47
+ #
48
+ # In your Rainbows! config block, you may specify a Thread pool size
49
+ # to limit your application concurrency independently of
50
+ # worker_connections.
51
+ #
52
+ # Rainbows! do
53
+ # use :XEpollThreadPool, :pool_size => 50
54
+ # worker_connections 100
55
+ # end
56
+ #
57
+ # In extremely rare cases, this may be combined with Rainbows::AppPool
58
+ # if you have different concurrency capabilities for different parts of
59
+ # your Rack application.
60
+ #
61
+ # === RubyGem Requirements
62
+ #
63
+ # * raindrops 0.6.0 or later
64
+ # * sleepy_penguin 2.0.0 or later
65
+ module Rainbows::XEpollThreadPool
66
+ extend Rainbows::PoolSize
67
+
68
+ # :stopdoc:
69
+ include Rainbows::Base
70
+
71
+ def init_worker_process(worker)
72
+ super
73
+ require "rainbows/xepoll_thread_pool/client"
74
+ Rainbows::Client.__send__ :include, Client
75
+ end
76
+
77
+ def worker_loop(worker) # :nodoc:
78
+ init_worker_process(worker)
79
+ Client.loop
80
+ end
81
+ # :startdoc:
82
+ end
@@ -0,0 +1,129 @@
1
+ # -*- encoding: binary -*-
2
+ # :enddoc:
3
+ # FIXME: lots of duplication from xepolll_thread_spawn/client
4
+
5
+ module Rainbows::XEpollThreadPool::Client
6
+ Rainbows.config!(self, :keepalive_timeout, :client_header_buffer_size)
7
+ N = Raindrops.new(1)
8
+ ACCEPTORS = Rainbows::HttpServer::LISTENERS.dup
9
+ extend Rainbows::WorkerYield
10
+
11
+ def self.included(klass) # included in Rainbows::Client
12
+ max = Rainbows.server.worker_connections
13
+ ACCEPTORS.map! do |sock|
14
+ Thread.new do
15
+ buf = ""
16
+ begin
17
+ if io = sock.kgio_accept(klass)
18
+ N.incr(0, 1)
19
+ io.epoll_once(buf)
20
+ end
21
+ worker_yield while N[0] >= max
22
+ rescue => e
23
+ Rainbows::Error.listen_loop(e)
24
+ end while Rainbows.alive
25
+ end
26
+ end
27
+ end
28
+
29
+ def self.app_run(queue)
30
+ while client = queue.pop
31
+ client.run
32
+ end
33
+ end
34
+
35
+ QUEUE = Queue.new
36
+ Rainbows::O[:pool_size].times { Thread.new { app_run(QUEUE) } }
37
+
38
+ ep = SleepyPenguin::Epoll
39
+ EP = ep.new
40
+ Rainbows.at_quit { EP.close }
41
+ IN = ep::IN | ep::ET | ep::ONESHOT
42
+ KATO = {}
43
+ KATO.compare_by_identity if KATO.respond_to?(:compare_by_identity)
44
+ LOCK = Mutex.new
45
+ @@last_expire = Time.now
46
+
47
+ def kato_set
48
+ LOCK.synchronize { KATO[self] = @@last_expire }
49
+ EP.set(self, IN)
50
+ end
51
+
52
+ def kato_delete
53
+ LOCK.synchronize { KATO.delete self }
54
+ end
55
+
56
+ def self.loop
57
+ buf = ""
58
+ begin
59
+ EP.wait(nil, 1000) { |_, obj| obj.epoll_run(buf) }
60
+ expire
61
+ rescue Errno::EINTR
62
+ rescue => e
63
+ Rainbows::Error.listen_loop(e)
64
+ end while Rainbows.tick || N[0] > 0
65
+ Rainbows::JoinThreads.acceptors(ACCEPTORS)
66
+ end
67
+
68
+ def self.expire
69
+ return if ((now = Time.now) - @@last_expire) < 1.0
70
+ if (ot = KEEPALIVE_TIMEOUT) >= 0
71
+ ot = now - ot
72
+ defer = []
73
+ LOCK.synchronize do
74
+ KATO.delete_if { |client, time| time < ot and defer << client }
75
+ end
76
+ defer.each { |io| io.closed? or io.close }
77
+ end
78
+ @@last_expire = now
79
+ end
80
+
81
+ def epoll_once(buf)
82
+ @hp = Rainbows::HttpParser.new
83
+ epoll_run(buf)
84
+ end
85
+
86
+ def close
87
+ super
88
+ kato_delete
89
+ N.decr(0, 1)
90
+ nil
91
+ end
92
+
93
+ def handle_error(e)
94
+ super
95
+ ensure
96
+ closed? or close
97
+ end
98
+
99
+ def queue!
100
+ QUEUE << self
101
+ false
102
+ end
103
+
104
+ def epoll_run(buf)
105
+ case kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, buf)
106
+ when :wait_readable
107
+ return kato_set
108
+ when String
109
+ kato_delete
110
+ @hp.buf << buf
111
+ @hp.parse and return queue!
112
+ else
113
+ return close
114
+ end while true
115
+ rescue => e
116
+ handle_error(e)
117
+ end
118
+
119
+ def run
120
+ process_pipeline(@hp.env, @hp)
121
+ end
122
+
123
+ def pipeline_ready(hp)
124
+ # be fair to other clients, let others run first
125
+ hp.parse and return queue!
126
+ kato_set
127
+ false
128
+ end
129
+ end
@@ -0,0 +1,58 @@
1
+ # -*- encoding: binary -*-
2
+ require "thread"
3
+ require "sleepy_penguin"
4
+ require "raindrops"
5
+
6
+ # This is an edge-triggered epoll concurrency model with blocking
7
+ # accept() in a (hopefully) native thread. This is comparable to
8
+ # ThreadSpawn and CoolioThreadSpawn, but is Linux-only and able to exploit
9
+ # "wake one" accept() behavior of a blocking accept() call when used
10
+ # with native threads.
11
+ #
12
+ # This supports streaming "rack.input" and allows +:pool_size+ tuning
13
+ # independently of +worker_connections+
14
+ #
15
+ # === Disadvantages
16
+ #
17
+ # This is only supported under Linux 2.6 kernels.
18
+ #
19
+ # === Compared to CoolioThreadSpawn
20
+ #
21
+ # This does not buffer outgoing responses in userspace at all, meaning
22
+ # it can lower response latency to fast clients and also prevent
23
+ # starvation of other clients when reading slow disks for responses
24
+ # (when combined with native threads).
25
+ #
26
+ # CoolioThreadSpawn is likely better for trickling large static files or
27
+ # proxying responses to slow clients, but this is likely better for fast
28
+ # clients.
29
+ #
30
+ # Unlikely CoolioThreadSpawn, this supports streaming "rack.input" which
31
+ # is useful for reading large uploads from fast clients.
32
+ #
33
+ # === Compared to ThreadSpawn
34
+ #
35
+ # This can maintain idle connections without the memory overhead of an
36
+ # idle Thread. The cost of handling/dispatching active connections is
37
+ # exactly the same for an equivalent number of active connections.
38
+ #
39
+ # === RubyGem Requirements
40
+ #
41
+ # * raindrops 0.6.0 or later
42
+ # * sleepy_penguin 2.0.0 or later
43
+ module Rainbows::XEpollThreadSpawn
44
+ # :stopdoc:
45
+ include Rainbows::Base
46
+
47
+ def init_worker_process(worker)
48
+ super
49
+ require "rainbows/xepoll_thread_spawn/client"
50
+ Rainbows::Client.__send__ :include, Client
51
+ end
52
+
53
+ def worker_loop(worker) # :nodoc:
54
+ init_worker_process(worker)
55
+ Client.loop
56
+ end
57
+ # :startdoc:
58
+ end
@@ -0,0 +1,121 @@
1
+ # -*- encoding: binary -*-
2
+ # :stopdoc:
3
+ module Rainbows::XEpollThreadSpawn::Client
4
+ Rainbows.config!(self, :keepalive_timeout, :client_header_buffer_size)
5
+ N = Raindrops.new(1)
6
+ ACCEPTORS = Rainbows::HttpServer::LISTENERS.dup
7
+ extend Rainbows::WorkerYield
8
+
9
+ def self.included(klass) # included in Rainbows::Client
10
+ max = Rainbows.server.worker_connections
11
+ ACCEPTORS.map! do |sock|
12
+ Thread.new do
13
+ buf = ""
14
+ begin
15
+ if io = sock.kgio_accept(klass)
16
+ N.incr(0, 1)
17
+ io.epoll_once(buf)
18
+ end
19
+ worker_yield while N[0] >= max
20
+ rescue => e
21
+ Rainbows::Error.listen_loop(e)
22
+ end while Rainbows.alive
23
+ end
24
+ end
25
+ end
26
+
27
+ ep = SleepyPenguin::Epoll
28
+ EP = ep.new
29
+ Rainbows.at_quit { EP.close }
30
+ IN = ep::IN | ep::ET | ep::ONESHOT
31
+ KATO = {}
32
+ KATO.compare_by_identity if KATO.respond_to?(:compare_by_identity)
33
+ LOCK = Mutex.new
34
+ @@last_expire = Time.now
35
+
36
+ def kato_set
37
+ LOCK.synchronize { KATO[self] = @@last_expire }
38
+ EP.set(self, IN)
39
+ end
40
+
41
+ def kato_delete
42
+ LOCK.synchronize { KATO.delete self }
43
+ end
44
+
45
+ def self.loop
46
+ buf = ""
47
+ begin
48
+ EP.wait(nil, 1000) { |_, obj| obj.epoll_run(buf) }
49
+ expire
50
+ rescue Errno::EINTR
51
+ rescue => e
52
+ Rainbows::Error.listen_loop(e)
53
+ end while Rainbows.tick || N[0] > 0
54
+ Rainbows::JoinThreads.acceptors(ACCEPTORS)
55
+ end
56
+
57
+ def self.expire
58
+ return if ((now = Time.now) - @@last_expire) < 1.0
59
+ if (ot = KEEPALIVE_TIMEOUT) >= 0
60
+ ot = now - ot
61
+ defer = []
62
+ LOCK.synchronize do
63
+ KATO.delete_if { |client, time| time < ot and defer << client }
64
+ end
65
+ defer.each { |io| io.closed? or io.close }
66
+ end
67
+ @@last_expire = now
68
+ end
69
+
70
+ def epoll_once(buf)
71
+ @hp = Rainbows::HttpParser.new
72
+ epoll_run(buf)
73
+ end
74
+
75
+ def close
76
+ super
77
+ kato_delete
78
+ N.decr(0, 1)
79
+ nil
80
+ end
81
+
82
+ def handle_error(e)
83
+ super
84
+ ensure
85
+ closed? or close
86
+ end
87
+
88
+ def epoll_run(buf)
89
+ case kgio_tryread(CLIENT_HEADER_BUFFER_SIZE, buf)
90
+ when :wait_readable
91
+ return kato_set
92
+ when String
93
+ kato_delete
94
+ @hp.buf << buf
95
+ env = @hp.parse and return spawn(env, @hp)
96
+ else
97
+ return close
98
+ end while true
99
+ rescue => e
100
+ handle_error(e)
101
+ end
102
+
103
+ def spawn(env, hp)
104
+ Thread.new { process_pipeline(env, hp) }
105
+ end
106
+
107
+ def pipeline_ready(hp)
108
+ hp.parse and return true
109
+ case buf = kgio_tryread(CLIENT_HEADER_BUFFER_SIZE)
110
+ when :wait_readable
111
+ kato_set
112
+ return false
113
+ when String
114
+ hp.buf << buf
115
+ hp.parse and return true
116
+ # continue loop
117
+ else
118
+ return close
119
+ end while true
120
+ end
121
+ end