rainbows 0.94.0 → 0.95.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (84) hide show
  1. data/.document +1 -0
  2. data/.manifest +18 -0
  3. data/ChangeLog +394 -226
  4. data/GIT-VERSION-FILE +1 -1
  5. data/GIT-VERSION-GEN +1 -1
  6. data/GNUmakefile +6 -4
  7. data/NEWS +18 -0
  8. data/README +13 -5
  9. data/Static_Files +71 -0
  10. data/TODO +12 -0
  11. data/Test_Suite +1 -1
  12. data/bin/rainbows +1 -4
  13. data/lib/rainbows/actor_spawn.rb +1 -1
  14. data/lib/rainbows/app_pool.rb +1 -1
  15. data/lib/rainbows/base.rb +79 -89
  16. data/lib/rainbows/byte_slice.rb +17 -0
  17. data/lib/rainbows/configurator.rb +46 -0
  18. data/lib/rainbows/const.rb +2 -2
  19. data/lib/rainbows/dev_fd_response.rb +52 -44
  20. data/lib/rainbows/error.rb +1 -0
  21. data/lib/rainbows/ev_core.rb +3 -2
  22. data/lib/rainbows/event_machine.rb +26 -24
  23. data/lib/rainbows/fiber/base.rb +30 -40
  24. data/lib/rainbows/fiber/body.rb +34 -0
  25. data/lib/rainbows/fiber/io.rb +28 -8
  26. data/lib/rainbows/fiber/queue.rb +1 -0
  27. data/lib/rainbows/fiber/rev.rb +4 -2
  28. data/lib/rainbows/fiber.rb +1 -0
  29. data/lib/rainbows/fiber_pool.rb +2 -2
  30. data/lib/rainbows/fiber_spawn.rb +2 -2
  31. data/lib/rainbows/http_response.rb +20 -31
  32. data/lib/rainbows/http_server.rb +3 -4
  33. data/lib/rainbows/max_body.rb +1 -0
  34. data/lib/rainbows/never_block/event_machine.rb +2 -0
  35. data/lib/rainbows/never_block.rb +5 -4
  36. data/lib/rainbows/queue_pool.rb +1 -0
  37. data/lib/rainbows/response/body.rb +119 -0
  38. data/lib/rainbows/response.rb +43 -0
  39. data/lib/rainbows/rev/client.rb +79 -9
  40. data/lib/rainbows/rev/core.rb +4 -0
  41. data/lib/rainbows/rev/deferred_response.rb +1 -44
  42. data/lib/rainbows/rev/heartbeat.rb +1 -0
  43. data/lib/rainbows/rev/master.rb +1 -0
  44. data/lib/rainbows/rev/sendfile.rb +26 -0
  45. data/lib/rainbows/rev/thread.rb +2 -1
  46. data/lib/rainbows/rev.rb +2 -0
  47. data/lib/rainbows/rev_fiber_spawn.rb +3 -1
  48. data/lib/rainbows/rev_thread_pool.rb +7 -5
  49. data/lib/rainbows/rev_thread_spawn.rb +2 -2
  50. data/lib/rainbows/revactor.rb +146 -146
  51. data/lib/rainbows/sendfile.rb +10 -21
  52. data/lib/rainbows/server_token.rb +39 -0
  53. data/lib/rainbows/stream_file.rb +14 -0
  54. data/lib/rainbows/tee_input.rb +1 -0
  55. data/lib/rainbows/thread_pool.rb +12 -7
  56. data/lib/rainbows/thread_spawn.rb +2 -3
  57. data/lib/rainbows/writer_thread_pool.rb +13 -7
  58. data/lib/rainbows/writer_thread_spawn.rb +12 -9
  59. data/lib/rainbows.rb +16 -45
  60. data/rainbows.gemspec +8 -8
  61. data/t/.gitignore +1 -1
  62. data/t/GNUmakefile +26 -16
  63. data/t/README +1 -1
  64. data/t/async-response-no-autochunk.ru +0 -1
  65. data/t/async-response.ru +0 -1
  66. data/t/cramp/rainsocket.ru +26 -0
  67. data/t/fork-sleep.ru +0 -1
  68. data/t/my-tap-lib.sh +3 -2
  69. data/t/simple-http_ActorSpawn.ru +9 -0
  70. data/t/t0009-broken-app.sh +1 -1
  71. data/t/t0009.ru +0 -1
  72. data/t/t0011-close-on-exec-set.sh +1 -1
  73. data/t/t0015-working_directory.sh +56 -0
  74. data/t/t0016-onenine-encoding-is-tricky.sh +28 -0
  75. data/t/t0016.rb +15 -0
  76. data/t/t0020-large-sendfile-response.sh +141 -0
  77. data/t/t0300-async_sinatra.sh +0 -6
  78. data/t/t0501-cramp-rainsocket.sh +38 -0
  79. data/t/t9001-sendfile-to-path.sh +5 -4
  80. data/t/t9002-server-token.sh +37 -0
  81. data/t/t9002.ru +4 -0
  82. data/t/test-lib.sh +1 -1
  83. data/t/test_isolate.rb +14 -11
  84. metadata +87 -18
@@ -2,174 +2,174 @@
2
2
  require 'revactor'
3
3
  Revactor::VERSION >= '0.1.5' or abort 'revactor 0.1.5 is required'
4
4
 
5
- module Rainbows
6
-
7
- # Enables use of the Actor model through
8
- # {Revactor}[http://revactor.org] under Ruby 1.9. It spawns one
9
- # long-lived Actor for every listen socket in the process and spawns a
10
- # new Actor for every client connection accept()-ed.
11
- # +worker_connections+ will limit the number of client Actors we have
12
- # running at any one time.
13
- #
14
- # Applications using this model are required to be reentrant, but do
15
- # not have to worry about race conditions unless they use threads
16
- # internally. \Rainbows! does not spawn threads under this model.
17
- # Multiple instances of the same app may run in the same address space
18
- # sequentially (but at interleaved points). Any network dependencies
19
- # in the application using this model should be implemented using the
20
- # \Revactor library as well, to take advantage of the networking
21
- # concurrency features this model provides.
22
-
23
- module Revactor
24
- RD_ARGS = {}
25
-
26
- include Base
27
-
28
- # once a client is accepted, it is processed in its entirety here
29
- # in 3 easy steps: read request, call app, write app response
30
- def process_client(client)
31
- io = client.instance_variable_get(:@_io)
32
- io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
33
- rd_args = [ nil ]
34
- remote_addr = if ::Revactor::TCP::Socket === client
35
- rd_args << RD_ARGS
36
- client.remote_addr
37
- else
38
- Unicorn::HttpRequest::LOCALHOST
39
- end
40
- buf = client.read(*rd_args)
41
- hp = HttpParser.new
42
- env = {}
43
- alive = true
44
-
45
- begin
46
- while ! hp.headers(env, buf)
47
- buf << client.read(*rd_args)
48
- end
49
-
50
- env[Const::CLIENT_IO] = client
51
- env[Const::RACK_INPUT] = 0 == hp.content_length ?
52
- HttpRequest::NULL_IO :
53
- TeeInput.new(PartialSocket.new(client), env, hp, buf)
54
- env[Const::REMOTE_ADDR] = remote_addr
55
- response = app.call(env.update(RACK_DEFAULTS))
56
-
57
- if 100 == response.first.to_i
58
- client.write(Const::EXPECT_100_RESPONSE)
59
- env.delete(Const::HTTP_EXPECT)
60
- response = app.call(env)
61
- end
62
-
63
- alive = hp.keepalive? && G.alive
64
- out = [ alive ? CONN_ALIVE : CONN_CLOSE ] if hp.headers?
65
- HttpResponse.write(client, response, out)
66
- end while alive and hp.reset.nil? and env.clear
67
- rescue ::Revactor::TCP::ReadError
68
- rescue => e
69
- Error.write(io, e)
70
- ensure
71
- client.close
5
+ # Enables use of the Actor model through
6
+ # {Revactor}[http://revactor.org] under Ruby 1.9. It spawns one
7
+ # long-lived Actor for every listen socket in the process and spawns a
8
+ # new Actor for every client connection accept()-ed.
9
+ # +worker_connections+ will limit the number of client Actors we have
10
+ # running at any one time.
11
+ #
12
+ # Applications using this model are required to be reentrant, but do
13
+ # not have to worry about race conditions unless they use threads
14
+ # internally. \Rainbows! does not spawn threads under this model.
15
+ # Multiple instances of the same app may run in the same address space
16
+ # sequentially (but at interleaved points). Any network dependencies
17
+ # in the application using this model should be implemented using the
18
+ # \Revactor library as well, to take advantage of the networking
19
+ # concurrency features this model provides.
20
+ module Rainbows::Revactor
21
+
22
+ # :stopdoc:
23
+ RD_ARGS = {}
24
+
25
+ include Rainbows::Base
26
+ LOCALHOST = Unicorn::HttpRequest::LOCALHOST
27
+ TCP = ::Revactor::TCP::Socket
28
+
29
+ # once a client is accepted, it is processed in its entirety here
30
+ # in 3 easy steps: read request, call app, write app response
31
+ def process_client(client) # :nodoc:
32
+ io = client.instance_variable_get(:@_io)
33
+ io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
34
+ rd_args = [ nil ]
35
+ remote_addr = if TCP === client
36
+ rd_args << RD_ARGS
37
+ client.remote_addr
38
+ else
39
+ LOCALHOST
72
40
  end
41
+ buf = client.read(*rd_args)
42
+ hp = HttpParser.new
43
+ env = {}
44
+ alive = true
45
+
46
+ begin
47
+ buf << client.read(*rd_args) until hp.headers(env, buf)
48
+
49
+ env[CLIENT_IO] = client
50
+ env[RACK_INPUT] = 0 == hp.content_length ?
51
+ NULL_IO : TeeInput.new(PartialSocket.new(client), env, hp, buf)
52
+ env[REMOTE_ADDR] = remote_addr
53
+ response = app.call(env.update(RACK_DEFAULTS))
54
+
55
+ if 100 == response[0].to_i
56
+ client.write(EXPECT_100_RESPONSE)
57
+ env.delete(HTTP_EXPECT)
58
+ response = app.call(env)
59
+ end
73
60
 
74
- # runs inside each forked worker, this sits around and waits
75
- # for connections and doesn't die until the parent dies (or is
76
- # given a INT, QUIT, or TERM signal)
77
- def worker_loop(worker)
78
- init_worker_process(worker)
79
- RD_ARGS[:timeout] = G.kato if G.kato > 0
80
- nr = 0
81
- limit = worker_connections
82
- actor_exit = Case[:exit, Actor, Object]
83
-
84
- revactorize_listeners.each do |l, close, accept|
85
- Actor.spawn(l, close, accept) do |l, close, accept|
86
- Actor.current.trap_exit = true
87
- l.controller = l.instance_variable_set(:@receiver, Actor.current)
88
- begin
89
- while nr >= limit
90
- l.disable if l.enabled?
91
- logger.info "busy: clients=#{nr} >= limit=#{limit}"
92
- Actor.receive do |f|
93
- f.when(close) {}
94
- f.when(actor_exit) { nr -= 1 }
95
- f.after(0.01) {} # another listener could've gotten an exit
96
- end
97
- end
61
+ alive = hp.keepalive? && G.alive
62
+ out = [ alive ? CONN_ALIVE : CONN_CLOSE ] if hp.headers?
63
+ write_response(client, response, out)
64
+ end while alive and hp.reset.nil? and env.clear
65
+ rescue ::Revactor::TCP::ReadError
66
+ rescue => e
67
+ Rainbows::Error.write(io, e)
68
+ ensure
69
+ client.close
70
+ end
98
71
 
99
- l.enable unless l.enabled?
72
+ # runs inside each forked worker, this sits around and waits
73
+ # for connections and doesn't die until the parent dies (or is
74
+ # given a INT, QUIT, or TERM signal)
75
+ def worker_loop(worker) #:nodoc:
76
+ init_worker_process(worker)
77
+ self.class.__send__(:alias_method, :write_body, :write_body_each)
78
+ RD_ARGS[:timeout] = G.kato if G.kato > 0
79
+ nr = 0
80
+ limit = worker_connections
81
+ actor_exit = Case[:exit, Actor, Object]
82
+
83
+ revactorize_listeners.each do |l, close, accept|
84
+ Actor.spawn(l, close, accept) do |l, close, accept|
85
+ Actor.current.trap_exit = true
86
+ l.controller = l.instance_variable_set(:@receiver, Actor.current)
87
+ begin
88
+ while nr >= limit
89
+ l.disable if l.enabled?
90
+ logger.info "busy: clients=#{nr} >= limit=#{limit}"
100
91
  Actor.receive do |f|
101
92
  f.when(close) {}
102
93
  f.when(actor_exit) { nr -= 1 }
103
- f.when(accept) do |_, _, s|
104
- nr += 1
105
- Actor.spawn_link(s) { |c| process_client(c) }
106
- end
94
+ f.after(0.01) {} # another listener could've gotten an exit
107
95
  end
108
- rescue => e
109
- Error.listen_loop(e)
110
- end while G.alive
96
+ end
97
+
98
+ l.enable unless l.enabled?
111
99
  Actor.receive do |f|
112
100
  f.when(close) {}
113
101
  f.when(actor_exit) { nr -= 1 }
114
- end while nr > 0
115
- end
102
+ f.when(accept) do |_, _, s|
103
+ nr += 1
104
+ Actor.spawn_link(s) { |c| process_client(c) }
105
+ end
106
+ end
107
+ rescue => e
108
+ Rainbows::Error.listen_loop(e)
109
+ end while G.alive
110
+ Actor.receive do |f|
111
+ f.when(close) {}
112
+ f.when(actor_exit) { nr -= 1 }
113
+ end while nr > 0
116
114
  end
117
-
118
- Actor.sleep 1 while G.tick || nr > 0
119
- rescue Errno::EMFILE
120
- # ignore, let another worker process take it
121
115
  end
122
116
 
123
- def revactorize_listeners
124
- LISTENERS.map do |s|
125
- case s
126
- when TCPServer
127
- l = ::Revactor::TCP.listen(s, nil)
128
- [ l, T[:tcp_closed, ::Revactor::TCP::Socket],
129
- T[:tcp_connection, l, ::Revactor::TCP::Socket] ]
130
- when UNIXServer
131
- l = ::Revactor::UNIX.listen(s)
132
- [ l, T[:unix_closed, ::Revactor::UNIX::Socket ],
133
- T[:unix_connection, l, ::Revactor::UNIX::Socket] ]
134
- end
117
+ Actor.sleep 1 while G.tick || nr > 0
118
+ rescue Errno::EMFILE
119
+ # ignore, let another worker process take it
120
+ end
121
+
122
+ def revactorize_listeners
123
+ LISTENERS.map do |s|
124
+ case s
125
+ when TCPServer
126
+ l = ::Revactor::TCP.listen(s, nil)
127
+ [ l, T[:tcp_closed, ::Revactor::TCP::Socket],
128
+ T[:tcp_connection, l, ::Revactor::TCP::Socket] ]
129
+ when UNIXServer
130
+ l = ::Revactor::UNIX.listen(s)
131
+ [ l, T[:unix_closed, ::Revactor::UNIX::Socket ],
132
+ T[:unix_connection, l, ::Revactor::UNIX::Socket] ]
135
133
  end
136
134
  end
135
+ end
137
136
 
138
- # Revactor Sockets do not implement readpartial, so we emulate just
139
- # enough to avoid mucking with TeeInput internals. Fortunately
140
- # this code is not heavily used so we can usually avoid the overhead
141
- # of adding a userspace buffer.
142
- class PartialSocket < Struct.new(:socket, :rbuf)
143
- def initialize(socket)
144
- # IO::Buffer is used internally by Rev which Revactor is based on
145
- # so we'll always have it available
146
- super(socket, IO::Buffer.new)
147
- end
137
+ # Revactor Sockets do not implement readpartial, so we emulate just
138
+ # enough to avoid mucking with TeeInput internals. Fortunately
139
+ # this code is not heavily used so we can usually avoid the overhead
140
+ # of adding a userspace buffer.
141
+ class PartialSocket < Struct.new(:socket, :rbuf)
142
+ def initialize(socket)
143
+ # IO::Buffer is used internally by Rev which Revactor is based on
144
+ # so we'll always have it available
145
+ super(socket, IO::Buffer.new)
146
+ end
148
147
 
149
- # Revactor socket reads always return an unspecified amount,
150
- # sometimes too much
151
- def readpartial(length, dst = "")
152
- return dst if length == 0
153
- # always check and return from the userspace buffer first
154
- rbuf.size > 0 and return dst.replace(rbuf.read(length))
148
+ # Revactor socket reads always return an unspecified amount,
149
+ # sometimes too much
150
+ def readpartial(length, dst = "")
151
+ return dst.replace("") if length == 0
155
152
 
156
- # read off the socket since there was nothing in rbuf
157
- tmp = socket.read
153
+ # always check and return from the userspace buffer first
154
+ rbuf.size > 0 and return dst.replace(rbuf.read(length))
158
155
 
159
- # we didn't read too much, good, just return it straight back
160
- # to avoid needlessly wasting memory bandwidth
161
- tmp.size <= length and return dst.replace(tmp)
156
+ # read off the socket since there was nothing in rbuf
157
+ tmp = socket.read
162
158
 
163
- # ugh, read returned too much, copy + reread to avoid slicing
164
- rbuf << tmp[length, tmp.size]
165
- dst.replace(tmp[0, length])
166
- end
159
+ # we didn't read too much, good, just return it straight back
160
+ # to avoid needlessly wasting memory bandwidth
161
+ tmp.size <= length and return dst.replace(tmp)
167
162
 
168
- # just proxy any remaining methods TeeInput may use
169
- def close
170
- socket.close
171
- end
163
+ # ugh, read returned too much
164
+ rbuf << tmp[length, tmp.size]
165
+ dst.replace(tmp[0, length])
172
166
  end
173
167
 
168
+ # just proxy any remaining methods TeeInput may use
169
+ def close
170
+ socket.close
171
+ end
174
172
  end
173
+
174
+ # :startdoc:
175
175
  end
@@ -11,8 +11,8 @@ module Rainbows
11
11
  # efficiently using sendfile() or similar. With multithreaded models
12
12
  # under Ruby 1.9, IO.copy_stream will be used.
13
13
  #
14
- # This middleware is the opposite of Rack::Contrib::Sendfile as it
15
- # reverses the effect of Rack::Contrib::Sendfile. Unlike many Ruby
14
+ # This middleware is the opposite of Rack::Sendfile as it
15
+ # reverses the effect of Rack:::Sendfile. Unlike many Ruby
16
16
  # web servers, some configurations of \Rainbows! are capable of
17
17
  # serving static files efficiently.
18
18
  #
@@ -57,37 +57,26 @@ class Sendfile < Struct.new(:app)
57
57
  # Body wrapper, this allows us to fall back gracefully to
58
58
  # +each+ in case a given concurrency model does not optimize
59
59
  # +to_path+ calls.
60
- class Body < Struct.new(:to_io)
61
-
62
- def initialize(path, headers)
63
- # Rainbows! will try #to_io if #to_path exists to avoid unnecessary
64
- # open() calls.
65
- self.to_io = File.open(path, 'rb')
60
+ class Body < Struct.new(:to_path) # :nodoc: all
66
61
 
62
+ def self.new(path, headers)
67
63
  unless headers['Content-Length']
68
- stat = to_io.stat
64
+ stat = File.stat(path)
69
65
  headers['Content-Length'] = stat.size.to_s if stat.file?
70
66
  end
71
- end
72
-
73
- def to_path
74
- to_io.path
67
+ super(path)
75
68
  end
76
69
 
77
70
  # fallback in case our +to_path+ doesn't get handled for whatever reason
78
71
  def each(&block)
79
- buf = ''
80
- while to_io.read(0x4000, buf)
81
- yield buf
72
+ File.open(to_path, 'rb') do |fp|
73
+ buf = ''
74
+ yield buf while fp.read(0x4000, buf)
82
75
  end
83
76
  end
84
-
85
- def close
86
- to_io.close
87
- end
88
77
  end
89
78
 
90
- def call(env)
79
+ def call(env) # :nodoc:
91
80
  status, headers, body = app.call(env)
92
81
  headers = HH.new(headers)
93
82
  if path = headers.delete('X-Sendfile')
@@ -0,0 +1,39 @@
1
+ # -*- encoding: binary -*-
2
+ module Rainbows
3
+
4
+ # An optional middleware to proudly display your usage of \Rainbows! in
5
+ # the "Server:" response header. This means you can help tell the world
6
+ # you're using \Rainbows! and spread fun and joy all over the Internet!
7
+ #
8
+ # ------ in your config.ru ------
9
+ # require 'rainbows/server_token'
10
+ # require 'rack/lobster'
11
+ # use Rainbows::ServerToken
12
+ # run Rack::Lobster.new
13
+ #
14
+ # If you're nervous about the exact version of \Rainbows! you're running,
15
+ # then you can actually specify anything you want:
16
+ #
17
+ # use Rainbows::ServerToken, "netcat 1.0"
18
+ #
19
+
20
+ class ServerToken < Struct.new(:app, :token)
21
+
22
+ # :stopdoc:
23
+ #
24
+ # Freeze constants as they're slightly faster when setting hashes
25
+ SERVER = "Server".freeze
26
+
27
+ def initialize(app, token = Const::RACK_DEFAULTS['SERVER_SOFTWARE'])
28
+ super
29
+ end
30
+
31
+ def call(env)
32
+ status, headers, body = app.call(env)
33
+ headers = Rack::Utils::HeaderHash.new(headers)
34
+ headers[SERVER] = token
35
+ [ status, headers, body ]
36
+ end
37
+ # :startdoc:
38
+ end
39
+ end
@@ -0,0 +1,14 @@
1
+ # -*- encoding: binary -*-
2
+ # :enddoc:
3
+
4
+ # Used to keep track of file offsets in IO#sendfile_nonblock + evented
5
+ # models. We always maintain our own file offsets in userspace because
6
+ # because sendfile() implementations offer pread()-like idempotency for
7
+ # concurrency (multiple clients can read the same underlying file handle).
8
+ class Rainbows::StreamFile < Struct.new(:offset, :to_io)
9
+
10
+ def close
11
+ to_io.close
12
+ self.to_io = nil
13
+ end
14
+ end
@@ -1,4 +1,5 @@
1
1
  # -*- encoding: binary -*-
2
+ # :enddoc:
2
3
  module Rainbows
3
4
 
4
5
  # acts like tee(1) on an input input to provide a input-like stream
@@ -25,7 +25,7 @@ module Rainbows
25
25
 
26
26
  include Base
27
27
 
28
- def worker_loop(worker)
28
+ def worker_loop(worker) # :nodoc:
29
29
  init_worker_process(worker)
30
30
  pool = (1..worker_connections).map do
31
31
  Thread.new { LISTENERS.size == 1 ? sync_worker : async_worker }
@@ -41,8 +41,8 @@ module Rainbows
41
41
  join_threads(pool)
42
42
  end
43
43
 
44
- def sync_worker
45
- s = LISTENERS.first
44
+ def sync_worker # :nodoc:
45
+ s = LISTENERS[0]
46
46
  begin
47
47
  c = Rainbows.sync_accept(s) and process_client(c)
48
48
  rescue => e
@@ -50,14 +50,14 @@ module Rainbows
50
50
  end while G.alive
51
51
  end
52
52
 
53
- def async_worker
53
+ def async_worker # :nodoc:
54
54
  begin
55
55
  # TODO: check if select() or accept() is a problem on large
56
56
  # SMP systems under Ruby 1.9. Hundreds of native threads
57
57
  # all working off the same socket could be a thundering herd
58
58
  # problem. On the other hand, a thundering herd may not
59
59
  # even incur as much overhead as an extra Mutex#synchronize
60
- ret = IO.select(LISTENERS, nil, nil, 1) and ret.first.each do |s|
60
+ ret = IO.select(LISTENERS, nil, nil, 1) and ret[0].each do |s|
61
61
  s = Rainbows.accept(s) and process_client(s)
62
62
  end
63
63
  rescue Errno::EINTR
@@ -66,11 +66,16 @@ module Rainbows
66
66
  end while G.alive
67
67
  end
68
68
 
69
- def join_threads(threads)
69
+ def join_threads(threads) # :nodoc:
70
70
  G.quit!
71
71
  threads.delete_if do |thr|
72
72
  G.tick
73
- thr.alive? ? thr.join(0.01) : true
73
+ begin
74
+ thr.run
75
+ thr.join(0.01)
76
+ rescue
77
+ true
78
+ end
74
79
  end until threads.empty?
75
80
  end
76
81
 
@@ -17,10 +17,9 @@ module Rainbows
17
17
  # capabilities
18
18
 
19
19
  module ThreadSpawn
20
-
21
20
  include Base
22
21
 
23
- def accept_loop(klass)
22
+ def accept_loop(klass) #:nodoc:
24
23
  lock = Mutex.new
25
24
  limit = worker_connections
26
25
  LISTENERS.each do |l|
@@ -55,7 +54,7 @@ module Rainbows
55
54
  sleep 1 while G.tick || lock.synchronize { G.cur > 0 }
56
55
  end
57
56
 
58
- def worker_loop(worker)
57
+ def worker_loop(worker) #:nodoc:
59
58
  init_worker_process(worker)
60
59
  accept_loop(Thread)
61
60
  end
@@ -24,7 +24,7 @@ module Rainbows
24
24
 
25
25
  # used to wrap a BasicSocket to use with +q+ for all writes
26
26
  # this is compatible with IO.select
27
- class QueueSocket < Struct.new(:to_io, :q)
27
+ class QueueSocket < Struct.new(:to_io, :q) # :nodoc:
28
28
  def readpartial(size, buf = "")
29
29
  to_io.readpartial(size, buf)
30
30
  end
@@ -46,19 +46,25 @@ module Rainbows
46
46
  end
47
47
  end
48
48
 
49
- def write_body(qclient, body)
50
- qclient.q << [ qclient.to_io, :body, body ]
49
+ module Response # :nodoc:
50
+ def write_body(qclient, body)
51
+ qclient.q << [ qclient.to_io, :body, body ]
52
+ end
51
53
  end
52
54
 
53
55
  @@nr = 0
54
56
  @@q = nil
55
57
 
56
- def process_client(client)
58
+ def process_client(client) # :nodoc:
57
59
  @@nr += 1
58
60
  super(QueueSocket[client, @@q[@@nr %= @@q.size]])
59
61
  end
60
62
 
61
- def worker_loop(worker)
63
+ def worker_loop(worker) # :nodoc:
64
+ Rainbows::Response.setup(self.class)
65
+ self.class.__send__(:alias_method, :sync_write_body, :write_body)
66
+ self.class.__send__(:include, Response)
67
+
62
68
  # we have multiple, single-thread queues since we don't want to
63
69
  # interleave writes from the same client
64
70
  qp = (1..worker_connections).map do |n|
@@ -66,13 +72,13 @@ module Rainbows
66
72
  begin
67
73
  io, arg1, arg2 = response
68
74
  case arg1
69
- when :body then Base.write_body(io, arg2)
75
+ when :body then sync_write_body(io, arg2)
70
76
  when :close then io.close unless io.closed?
71
77
  else
72
78
  io.write(arg1)
73
79
  end
74
80
  rescue => err
75
- Error.app(err)
81
+ Error.write(io, err)
76
82
  end
77
83
  end
78
84
  end
@@ -23,11 +23,13 @@ module Rainbows
23
23
  module WriterThreadSpawn
24
24
  include Base
25
25
 
26
- CUR = {}
26
+ CUR = {} # :nodoc:
27
27
 
28
28
  # used to wrap a BasicSocket to use with +q+ for all writes
29
29
  # this is compatible with IO.select
30
- class MySocket < Struct.new(:to_io, :q, :thr)
30
+ class MySocket < Struct.new(:to_io, :q, :thr) # :nodoc: all
31
+ include Rainbows::Response
32
+
31
33
  def readpartial(size, buf = "")
32
34
  to_io.readpartial(size, buf)
33
35
  end
@@ -51,7 +53,7 @@ module Rainbows
51
53
  begin
52
54
  arg1, arg2 = response
53
55
  case arg1
54
- when :body then Base.write_body(io, arg2)
56
+ when :body then write_body(io, arg2)
55
57
  when :close
56
58
  io.close unless io.closed?
57
59
  break
@@ -59,7 +61,7 @@ module Rainbows
59
61
  io.write(arg1)
60
62
  end
61
63
  rescue => e
62
- Error.app(e)
64
+ Error.write(io, e)
63
65
  end
64
66
  end
65
67
  CUR.delete(Thread.current)
@@ -71,7 +73,7 @@ module Rainbows
71
73
  (self.q ||= queue_writer) << buf
72
74
  end
73
75
 
74
- def write_body(body)
76
+ def queue_body(body)
75
77
  (self.q ||= queue_writer) << [ :body, body ]
76
78
  end
77
79
 
@@ -88,16 +90,17 @@ module Rainbows
88
90
  end
89
91
  end
90
92
 
91
- def write_body(my_sock, body)
92
- my_sock.write_body(body)
93
+ def write_body(my_sock, body) # :nodoc:
94
+ my_sock.queue_body(body)
93
95
  end
94
96
 
95
- def process_client(client)
97
+ def process_client(client) # :nodoc:
96
98
  super(MySocket[client])
97
99
  end
98
100
 
99
- def worker_loop(worker)
101
+ def worker_loop(worker) # :nodoc:
100
102
  MySocket.const_set(:MAX, worker_connections)
103
+ Rainbows::Response.setup(MySocket)
101
104
  super(worker) # accept loop from Unicorn
102
105
  CUR.delete_if do |t,q|
103
106
  q << nil