rainbows 0.1.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,35 @@
1
+ # -*- encoding: binary -*-
2
+ require 'time'
3
+ require 'rainbows'
4
+
5
+ module Rainbows
6
+
7
+ class HttpResponse < ::Unicorn::HttpResponse
8
+
9
+ def self.write(socket, rack_response, out = [])
10
+ status, headers, body = rack_response
11
+
12
+ if Array === out
13
+ status = CODES[status.to_i] || status
14
+
15
+ headers.each do |key, value|
16
+ next if SKIP.include?(key.downcase)
17
+ if value =~ /\n/
18
+ out.concat(value.split(/\n/).map! { |v| "#{key}: #{v}\r\n" })
19
+ else
20
+ out << "#{key}: #{value}\r\n"
21
+ end
22
+ end
23
+
24
+ socket.write("HTTP/1.1 #{status}\r\n" \
25
+ "Date: #{Time.now.httpdate}\r\n" \
26
+ "Status: #{status}\r\n" \
27
+ "#{out.join('')}\r\n")
28
+ end
29
+
30
+ body.each { |chunk| socket.write(chunk) }
31
+ ensure
32
+ body.respond_to?(:close) and body.close rescue nil
33
+ end
34
+ end
35
+ end
@@ -0,0 +1,47 @@
1
+ # -*- encoding: binary -*-
2
+ require 'rainbows'
3
+ module Rainbows
4
+
5
+ class HttpServer < ::Unicorn::HttpServer
6
+ include Rainbows
7
+
8
+ @@instance = nil
9
+
10
+ class << self
11
+ def setup(block)
12
+ @@instance.instance_eval(&block)
13
+ end
14
+ end
15
+
16
+ def initialize(app, options)
17
+ @@instance = self
18
+ rv = super(app, options)
19
+ defined?(@use) or use(:Base)
20
+ @worker_connections ||= MODEL_WORKER_CONNECTIONS[@use]
21
+ end
22
+
23
+ def use(*args)
24
+ model = args.shift or return @use
25
+ mod = begin
26
+ Rainbows.const_get(model)
27
+ rescue NameError
28
+ raise ArgumentError, "concurrency model #{model.inspect} not supported"
29
+ end
30
+
31
+ Module === mod or
32
+ raise ArgumentError, "concurrency model #{model.inspect} not supported"
33
+ extend(mod)
34
+ @use = model
35
+ end
36
+
37
+ def worker_connections(*args)
38
+ return @worker_connections if args.empty?
39
+ nr = args.first
40
+ (Integer === nr && nr > 0) or
41
+ raise ArgumentError, "worker_connections must be a positive Integer"
42
+ @worker_connections = nr
43
+ end
44
+
45
+ end
46
+
47
+ end
@@ -0,0 +1,158 @@
1
+ # -*- encoding: binary -*-
2
+ require 'revactor'
3
+
4
+ # workaround revactor 0.1.4 still using the old Rev::Buffer
5
+ # ref: http://rubyforge.org/pipermail/revactor-talk/2009-October/000034.html
6
+ defined?(Rev::Buffer) or Rev::Buffer = IO::Buffer
7
+
8
+ module Rainbows
9
+
10
+ # Enables use of the Actor model through
11
+ # {Revactor}[http://revactor.org] under Ruby 1.9. It spawns one
12
+ # long-lived Actor for every listen socket in the process and spawns a
13
+ # new Actor for every client connection accept()-ed.
14
+ # +worker_connections+ will limit the number of client Actors we have
15
+ # running at any one time.
16
+ #
17
+ # Applications using this model are required to be reentrant, but
18
+ # generally do not have to worry about race conditions. Multiple
19
+ # instances of the same app may run in the same address space
20
+ # sequentially (but at interleaved points). Any network dependencies
21
+ # in the application using this model should be implemented using the
22
+ # \Revactor library as well.
23
+
24
+ module Revactor
25
+ require 'rainbows/revactor/tee_input'
26
+
27
+ include Base
28
+
29
+ # once a client is accepted, it is processed in its entirety here
30
+ # in 3 easy steps: read request, call app, write app response
31
+ def process_client(client)
32
+ buf = client.read or return # this probably does not happen...
33
+ hp = HttpParser.new
34
+ env = {}
35
+ remote_addr = ::Revactor::TCP::Socket === client ?
36
+ client.remote_addr : LOCALHOST
37
+
38
+ begin
39
+ while ! hp.headers(env, buf)
40
+ buf << client.read
41
+ end
42
+
43
+ env[Const::RACK_INPUT] = 0 == hp.content_length ?
44
+ HttpRequest::NULL_IO :
45
+ Rainbows::Revactor::TeeInput.new(client, env, hp, buf)
46
+ env[Const::REMOTE_ADDR] = remote_addr
47
+ response = app.call(env.update(RACK_DEFAULTS))
48
+
49
+ if 100 == response.first.to_i
50
+ client.write(Const::EXPECT_100_RESPONSE)
51
+ env.delete(Const::HTTP_EXPECT)
52
+ response = app.call(env)
53
+ end
54
+
55
+ out = [ hp.keepalive? ? CONN_ALIVE : CONN_CLOSE ] if hp.headers?
56
+ HttpResponse.write(client, response, out)
57
+ end while hp.keepalive? and hp.reset.nil? and env.clear
58
+ client.close
59
+ # if we get any error, try to write something back to the client
60
+ # assuming we haven't closed the socket, but don't get hung up
61
+ # if the socket is already closed or broken. We'll always ensure
62
+ # the socket is closed at the end of this function
63
+ rescue EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
64
+ emergency_response(client, Const::ERROR_500_RESPONSE)
65
+ rescue HttpParserError # try to tell the client they're bad
66
+ buf.empty? or emergency_response(client, Const::ERROR_400_RESPONSE)
67
+ rescue Object => e
68
+ emergency_response(client, Const::ERROR_500_RESPONSE)
69
+ logger.error "Read error: #{e.inspect}"
70
+ logger.error e.backtrace.join("\n")
71
+ end
72
+
73
+ # runs inside each forked worker, this sits around and waits
74
+ # for connections and doesn't die until the parent dies (or is
75
+ # given a INT, QUIT, or TERM signal)
76
+ def worker_loop(worker)
77
+ ppid = master_pid
78
+ init_worker_process(worker)
79
+ alive = worker.tmp # tmp is our lifeline to the master process
80
+
81
+ trap(:USR1) { reopen_worker_logs(worker.nr) }
82
+ trap(:QUIT) { alive = false; LISTENERS.each { |s| s.close rescue nil } }
83
+ [:TERM, :INT].each { |sig| trap(sig) { exit!(0) } } # instant shutdown
84
+
85
+ root = Actor.current
86
+ root.trap_exit = true
87
+
88
+ limit = worker_connections
89
+ listeners = revactorize_listeners
90
+ logger.info "worker=#{worker.nr} ready with Revactor"
91
+ clients = 0
92
+
93
+ listeners.map! do |s|
94
+ Actor.spawn(s) do |l|
95
+ begin
96
+ while clients >= limit
97
+ logger.info "busy: clients=#{clients} >= limit=#{limit}"
98
+ Actor.receive { |filter| filter.when(:resume) {} }
99
+ end
100
+ actor = Actor.spawn(l.accept) { |c| process_client(c) }
101
+ clients += 1
102
+ root.link(actor)
103
+ rescue Errno::EAGAIN, Errno::ECONNABORTED
104
+ rescue Object => e
105
+ if alive
106
+ logger.error "Unhandled listen loop exception #{e.inspect}."
107
+ logger.error e.backtrace.join("\n")
108
+ end
109
+ end while alive
110
+ end
111
+ end
112
+
113
+ nr = 0
114
+ begin
115
+ Actor.receive do |filter|
116
+ filter.after(1) do
117
+ if alive
118
+ alive.chmod(nr = 0 == nr ? 1 : 0)
119
+ listeners.each { |l| alive = false if l.dead? }
120
+ ppid == Process.ppid or alive = false
121
+ end
122
+ end
123
+ filter.when(Case[:exit, Actor, Object]) do |_,actor,_|
124
+ orig = clients
125
+ clients -= 1
126
+ orig >= limit and listeners.each { |l| l << :resume }
127
+ end
128
+ end
129
+ end while alive || clients > 0
130
+ end
131
+
132
+ private
133
+
134
+ # write a response without caring if it went out or not
135
+ # This is in the case of untrappable errors
136
+ def emergency_response(client, response_str)
137
+ client.instance_eval do
138
+ # this is Revactor implementation dependent
139
+ @_io.write_nonblock(response_str) rescue nil
140
+ end
141
+ client.close rescue nil
142
+ end
143
+
144
+ def revactorize_listeners
145
+ LISTENERS.map do |s|
146
+ if TCPServer === s
147
+ ::Revactor::TCP.listen(s, nil)
148
+ elsif defined?(::Revactor::UNIX) && UNIXServer === s
149
+ ::Revactor::UNIX.listen(s)
150
+ else
151
+ logger.error "your version of Revactor can't handle #{s.inspect}"
152
+ nil
153
+ end
154
+ end.compact
155
+ end
156
+
157
+ end
158
+ end
@@ -0,0 +1,44 @@
1
+ # -*- encoding: binary -*-
2
+ require 'rainbows/revactor'
3
+
4
+ module Rainbows
5
+ module Revactor
6
+
7
+ # acts like tee(1) on an input input to provide a input-like stream
8
+ # while providing rewindable semantics through a File/StringIO
9
+ # backing store. On the first pass, the input is only read on demand
10
+ # so your Rack application can use input notification (upload progress
11
+ # and like). This should fully conform to the Rack::InputWrapper
12
+ # specification on the public API. This class is intended to be a
13
+ # strict interpretation of Rack::InputWrapper functionality and will
14
+ # not support any deviations from it.
15
+ class TeeInput < ::Unicorn::TeeInput
16
+
17
+ private
18
+
19
+ # tees off a +length+ chunk of data from the input into the IO
20
+ # backing store as well as returning it. +dst+ must be specified.
21
+ # returns nil if reading from the input returns nil
22
+ def tee(length, dst)
23
+ unless parser.body_eof?
24
+ begin
25
+ if parser.filter_body(dst, buf << socket.read).nil?
26
+ @tmp.write(dst)
27
+ return dst
28
+ end
29
+ rescue EOFError
30
+ end
31
+ end
32
+ finalize_input
33
+ end
34
+
35
+ def finalize_input
36
+ while parser.trailers(req, buf).nil?
37
+ buf << socket.read
38
+ end
39
+ self.socket = nil
40
+ end
41
+
42
+ end
43
+ end
44
+ end
@@ -0,0 +1,96 @@
1
+ # -*- encoding: binary -*-
2
+
3
+ module Rainbows
4
+
5
+ # Implements a worker thread pool model. This is suited for platforms
6
+ # where the cost of dynamically spawning a new thread for every new
7
+ # client connection is too high.
8
+ #
9
+ # Applications using this model are required to be thread-safe.
10
+ # Threads are never spawned dynamically under this model. If you're
11
+ # connecting to external services and need to perform DNS lookups,
12
+ # consider using the "resolv-replace" library which replaces parts of
13
+ # the core Socket package with concurrent DNS lookup capabilities.
14
+ #
15
+ # This model is less suited for many slow clients than the others and
16
+ # thus a lower +worker_connections+ setting is recommended.
17
+ module ThreadPool
18
+
19
+ include Base
20
+
21
+ def worker_loop(worker)
22
+ init_worker_process(worker)
23
+ threads = ThreadGroup.new
24
+ alive = worker.tmp
25
+ nr = 0
26
+
27
+ # closing anything we IO.select on will raise EBADF
28
+ trap(:USR1) { reopen_worker_logs(worker.nr) rescue nil }
29
+ trap(:QUIT) { alive = false; LISTENERS.map! { |s| s.close rescue nil } }
30
+ [:TERM, :INT].each { |sig| trap(sig) { exit(0) } } # instant shutdown
31
+ logger.info "worker=#{worker.nr} ready with ThreadPool"
32
+
33
+ while alive && master_pid == Process.ppid
34
+ maintain_thread_count(threads)
35
+ threads.list.each do |thr|
36
+ alive.chmod(nr += 1)
37
+ thr.join(timeout / 2.0) and break
38
+ end
39
+ end
40
+ join_worker_threads(threads)
41
+ end
42
+
43
+ def join_worker_threads(threads)
44
+ logger.info "Joining worker threads..."
45
+ t0 = Time.now
46
+ timeleft = timeout
47
+ threads.list.each { |thr|
48
+ thr.join(timeleft)
49
+ timeleft -= (Time.now - t0)
50
+ }
51
+ logger.info "Done joining worker threads."
52
+ end
53
+
54
+ def maintain_thread_count(threads)
55
+ threads.list.each do |thr|
56
+ next if (Time.now - (thr[:t] || next)) < timeout
57
+ thr.kill
58
+ logger.error "killed #{thr.inspect} for being too old"
59
+ end
60
+
61
+ while threads.list.size < worker_connections
62
+ threads.add(new_worker_thread)
63
+ end
64
+ end
65
+
66
+ def new_worker_thread
67
+ Thread.new {
68
+ alive = true
69
+ thr = Thread.current
70
+ begin
71
+ ret = begin
72
+ thr[:t] = Time.now
73
+ IO.select(LISTENERS, nil, nil, timeout/2.0) or next
74
+ rescue Errno::EINTR
75
+ retry
76
+ rescue Errno::EBADF
77
+ return
78
+ end
79
+ ret.first.each do |sock|
80
+ begin
81
+ process_client(sock.accept_nonblock)
82
+ thr[:t] = Time.now
83
+ rescue Errno::EAGAIN, Errno::ECONNABORTED
84
+ end
85
+ end
86
+ rescue Object => e
87
+ if alive
88
+ logger.error "Unhandled listen loop exception #{e.inspect}."
89
+ logger.error e.backtrace.join("\n")
90
+ end
91
+ end while alive = LISTENERS.first
92
+ }
93
+ end
94
+
95
+ end
96
+ end
@@ -0,0 +1,79 @@
1
+ # -*- encoding: binary -*-
2
+ module Rainbows
3
+
4
+ # Spawns a new thread for every client connection we accept(). This
5
+ # model is recommended for platforms where spawning threads is
6
+ # inexpensive.
7
+ #
8
+ # If you're connecting to external services and need to perform DNS
9
+ # lookups, consider using the "resolv-replace" library which replaces
10
+ # parts of the core Socket package with concurrent DNS lookup
11
+ # capabilities
12
+ module ThreadSpawn
13
+
14
+ include Base
15
+
16
+ def worker_loop(worker)
17
+ init_worker_process(worker)
18
+ threads = ThreadGroup.new
19
+ alive = worker.tmp
20
+ nr = 0
21
+ limit = worker_connections
22
+
23
+ # closing anything we IO.select on will raise EBADF
24
+ trap(:USR1) { reopen_worker_logs(worker.nr) rescue nil }
25
+ trap(:QUIT) { alive = false; LISTENERS.map! { |s| s.close rescue nil } }
26
+ [:TERM, :INT].each { |sig| trap(sig) { exit(0) } } # instant shutdown
27
+ logger.info "worker=#{worker.nr} ready with ThreadSpawn"
28
+
29
+ while alive && master_pid == Process.ppid
30
+ ret = begin
31
+ IO.select(LISTENERS, nil, nil, timeout/2.0) or next
32
+ rescue Errno::EINTR
33
+ retry
34
+ rescue Errno::EBADF
35
+ alive = false
36
+ end
37
+
38
+ ret.first.each do |l|
39
+ while threads.list.size >= limit
40
+ nuke_old_thread(threads)
41
+ end
42
+ c = begin
43
+ l.accept_nonblock
44
+ rescue Errno::EINTR, Errno::ECONNABORTED
45
+ next
46
+ end
47
+ threads.add(Thread.new(c) { |c|
48
+ Thread.current[:t] = Time.now
49
+ process_client(c)
50
+ })
51
+ end
52
+ end
53
+ join_spawned_threads(threads)
54
+ end
55
+
56
+ def nuke_old_thread(threads)
57
+ threads.list.each do |thr|
58
+ next if (Time.now - (thr[:t] || next)) < timeout
59
+ thr.kill
60
+ logger.error "killed #{thr.inspect} for being too old"
61
+ return
62
+ end
63
+ # nothing to kill, yield to another thread
64
+ Thread.pass
65
+ end
66
+
67
+ def join_spawned_threads(threads)
68
+ logger.info "Joining spawned threads..."
69
+ t0 = Time.now
70
+ timeleft = timeout
71
+ threads.list.each { |thr|
72
+ thr.join(timeleft)
73
+ timeleft -= (Time.now - t0)
74
+ }
75
+ logger.info "Done joining spawned threads."
76
+ end
77
+
78
+ end
79
+ end