rainbows 0.5.0 → 0.6.0

Sign up to get free protection for your applications and to get access to all the features.
@@ -57,18 +57,8 @@ module Rainbows
57
57
  HttpResponse.write(client, response, out)
58
58
  end while alive and hp.reset.nil? and env.clear
59
59
  client.close
60
- # if we get any error, try to write something back to the client
61
- # assuming we haven't closed the socket, but don't get hung up
62
- # if the socket is already closed or broken. We'll always ensure
63
- # the socket is closed at the end of this function
64
- rescue EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
65
- emergency_response(client, Const::ERROR_500_RESPONSE)
66
- rescue HttpParserError # try to tell the client they're bad
67
- buf.empty? or emergency_response(client, Const::ERROR_400_RESPONSE)
68
- rescue Object => e
69
- emergency_response(client, Const::ERROR_500_RESPONSE)
70
- logger.error "Read error: #{e.inspect}"
71
- logger.error e.backtrace.join("\n")
60
+ rescue => e
61
+ handle_error(client, e)
72
62
  end
73
63
 
74
64
  # runs inside each forked worker, this sits around and waits
@@ -101,35 +91,41 @@ module Rainbows
101
91
  end
102
92
  end
103
93
 
104
- m = 0
105
- check_quit = lambda do
106
- worker.tmp.chmod(m = 0 == m ? 1 : 0)
107
- G.alive = false if master_pid != Process.ppid
108
- end
109
-
110
94
  begin
111
95
  Actor.receive do |filter|
112
- filter.after(1, &check_quit)
96
+ filter.after(1) { G.tick }
113
97
  filter.when(Case[:exit, Actor, Object]) do |_,actor,_|
114
98
  orig = clients.size
115
99
  clients.delete(actor.object_id)
116
100
  orig >= limit and listeners.each { |l| l << :resume }
117
- check_quit.call
101
+ G.tick
118
102
  end
119
103
  end
120
104
  end while G.alive || clients.size > 0
121
105
  end
122
106
 
123
- private
124
-
125
- # write a response without caring if it went out or not
126
- # This is in the case of untrappable errors
127
- def emergency_response(client, response_str)
107
+ # if we get any error, try to write something back to the client
108
+ # assuming we haven't closed the socket, but don't get hung up
109
+ # if the socket is already closed or broken. We'll always ensure
110
+ # the socket is closed at the end of this function
111
+ def handle_error(client, e)
112
+ msg = case e
113
+ when EOFError,Errno::ECONNRESET,Errno::EPIPE,Errno::EINVAL,Errno::EBADF
114
+ Const::ERROR_500_RESPONSE
115
+ when HttpParserError # try to tell the client they're bad
116
+ Const::ERROR_400_RESPONSE
117
+ else
118
+ logger.error "Read error: #{e.inspect}"
119
+ logger.error e.backtrace.join("\n")
120
+ Const::ERROR_500_RESPONSE
121
+ end
128
122
  client.instance_eval do
129
123
  # this is Revactor implementation dependent
130
- @_io.write_nonblock(response_str) rescue nil
124
+ @_io.write_nonblock(msg)
125
+ close
131
126
  end
132
- client.close rescue nil
127
+ rescue
128
+ nil
133
129
  end
134
130
 
135
131
  def revactorize_listeners!
@@ -21,19 +21,27 @@ module Rainbows
21
21
  # returns nil if reading from the input returns nil
22
22
  def tee(length, dst)
23
23
  unless parser.body_eof?
24
- begin
25
- if parser.filter_body(dst, buf << socket.read).nil?
26
- @tmp.write(dst)
27
- return dst
24
+ if parser.filter_body(dst, buf << socket.read).nil?
25
+ @tmp.write(dst)
26
+ diff = dst.size - length
27
+ if diff > 0
28
+ dst.replace(dst[0,length])
29
+ @tmp.seek(-diff, IO::SEEK_CUR)
28
30
  end
29
- rescue EOFError
31
+ return dst
30
32
  end
31
33
  end
32
34
  finalize_input
35
+ rescue => e
36
+ client_error(e)
33
37
  end
34
38
 
35
39
  def finalize_input
36
40
  while parser.trailers(req, buf).nil?
41
+ # Don't worry about raising ClientShutdown here on EOFError, tee()
42
+ # will catch EOFError when app is processing it, otherwise in
43
+ # initialize we never get any chance to enter the app so the
44
+ # EOFError will just get trapped by Unicorn and not the Rack app
37
45
  buf << socket.read
38
46
  end
39
47
  self.socket = nil
@@ -28,16 +28,15 @@ module Rainbows
28
28
  def worker_loop(worker)
29
29
  init_worker_process(worker)
30
30
  pool = (1..worker_connections).map { new_worker_thread }
31
- m = 0
32
31
 
33
- while G.alive && master_pid == Process.ppid
32
+ while G.alive
33
+ # if any worker dies, something is serious wrong, bail
34
34
  pool.each do |thr|
35
- worker.tmp.chmod(m = 0 == m ? 1 : 0)
36
- # if any worker dies, something is serious wrong, bail
37
- thr.join(1) and break
35
+ G.tick
36
+ thr.join(1) and G.quit!
38
37
  end
39
38
  end
40
- join_threads(pool, worker)
39
+ join_threads(pool)
41
40
  end
42
41
 
43
42
  def new_worker_thread
@@ -22,21 +22,18 @@ module Rainbows
22
22
  def worker_loop(worker)
23
23
  init_worker_process(worker)
24
24
  threads = ThreadGroup.new
25
- alive = worker.tmp
26
- m = 0
27
25
  limit = worker_connections
28
26
 
29
27
  begin
30
- G.alive && master_pid == Process.ppid or break
31
28
  ret = begin
32
- alive.chmod(m = 0 == m ? 1 : 0)
29
+ G.tick or break
33
30
  IO.select(LISTENERS, nil, nil, 1) or next
34
31
  rescue Errno::EINTR
35
32
  retry
36
33
  rescue Errno::EBADF, TypeError
37
34
  break
38
35
  end
39
- alive.chmod(m = 0 == m ? 1 : 0)
36
+ G.tick
40
37
 
41
38
  ret.first.each do |l|
42
39
  # Sleep if we're busy, another less busy worker process may
@@ -57,7 +54,7 @@ module Rainbows
57
54
  rescue Object => e
58
55
  listen_loop_error(e)
59
56
  end while true
60
- join_threads(threads.list, worker)
57
+ join_threads(threads.list)
61
58
  end
62
59
 
63
60
  end
data/local.mk.sample CHANGED
@@ -6,7 +6,7 @@
6
6
 
7
7
  DLEXT := so
8
8
  gems := rack-1.0.1
9
- # gems += unicorn-0.93.5 # installed via setup.rb
9
+ # gems += unicorn-0.95.0 # installed via setup.rb
10
10
  gems += rev-0.3.1 iobuffer-0.1.1
11
11
  gems += eventmachine-0.12.10
12
12
  gems += async_sinatra-0.1.5 sinatra-0.9.4
data/rainbows.gemspec CHANGED
@@ -14,7 +14,7 @@ Gem::Specification.new do |s|
14
14
  s.name = %q{rainbows}
15
15
  s.version = ENV["VERSION"]
16
16
 
17
- s.authors = ["Rainbows! developers"]
17
+ s.authors = ["Rainbows! hackers"]
18
18
  s.date = Time.now.utc.strftime('%Y-%m-%d')
19
19
  s.description = File.read("README").split(/\n\n/)[1]
20
20
  s.email = %q{rainbows-talk@rubyforge.org}
@@ -41,7 +41,8 @@ Gem::Specification.new do |s|
41
41
  s.test_files = test_files
42
42
 
43
43
  # we need Unicorn for the HTTP parser and process management
44
- s.add_dependency(%q<unicorn>, ["~> 0.94.0"])
44
+ # Unicorn 0.95.0 should be released on or around Nov 13/14/15, 2009
45
+ s.add_dependency(%q<unicorn>, ["~> 0.95.0"])
45
46
 
46
47
  # Unicorn already depends on Rack
47
48
  # s.add_dependency(%q<rack>)
data/t/GNUmakefile CHANGED
@@ -19,6 +19,7 @@ endif
19
19
  export RUBYLIB RUBY_VERSION
20
20
 
21
21
  models := ThreadPool ThreadSpawn Rev EventMachine
22
+ models += RevThreadSpawn
22
23
  ifeq ($(RUBY_VERSION),1.9.1) # 1.9.2-preview1 was broken
23
24
  models += Revactor
24
25
  endif
data/t/bin/sha1sum.rb ADDED
@@ -0,0 +1,23 @@
1
+ #!/usr/bin/env ruby
2
+ # -*- encoding: binary -*-
3
+
4
+ # Reads from stdin and outputs the SHA1 hex digest of the input this is
5
+ # ONLY used as a last resort, our test code will try to use sha1sum(1),
6
+ # openssl(1), or gsha1sum(1) before falling back to using this. We try
7
+ # all options first because we have a strong and healthy distrust of our
8
+ # Ruby abilities in general, and *especially* when it comes to
9
+ # understanding (and trusting the implementation of) Ruby 1.9 encoding.
10
+
11
+ require 'digest/sha1'
12
+ $stdout.sync = $stderr.sync = true
13
+ $stdout.binmode
14
+ $stdin.binmode
15
+ bs = 16384
16
+ digest = Digest::SHA1.new
17
+ if buf = $stdin.read(bs)
18
+ begin
19
+ digest.update(buf)
20
+ end while $stdin.read(bs, buf)
21
+ end
22
+
23
+ $stdout.syswrite("#{digest.hexdigest}\n")
@@ -1,12 +1,10 @@
1
1
  use Rack::ContentLength
2
- fifo = ENV['FIFO_PATH'] or abort "FIFO_PATH not defined"
3
2
  headers = { 'Content-Type' => 'text/plain' }
4
3
  run lambda { |env|
5
4
  case env['PATH_INFO']
6
5
  when "/block-forever"
7
- # one of these should block forever
8
6
  Process.kill(:STOP, $$)
9
- ::File.open(fifo, "rb") { |fp| fp.syswrite("NEVER\n") }
7
+ sleep # in case STOP signal is not received in time
10
8
  [ 500, headers, [ "Should never get here\n" ] ]
11
9
  else
12
10
  [ 200, headers, [ "#$$\n" ] ]
@@ -0,0 +1,19 @@
1
+ # SHA1 checksum generator
2
+ require 'digest/sha1'
3
+ use Rack::ContentLength
4
+ cap = 16384
5
+ app = lambda do |env|
6
+ /\A100-continue\z/i =~ env['HTTP_EXPECT'] and
7
+ return [ 100, {}, [] ]
8
+ digest = Digest::SHA1.new
9
+ input = env['rack.input']
10
+ if buf = input.read(rand(cap))
11
+ begin
12
+ raise "#{buf.size} > #{cap}" if buf.size > cap
13
+ digest.update(buf)
14
+ end while input.read(rand(cap), buf)
15
+ end
16
+
17
+ [ 200, {'Content-Type' => 'text/plain'}, [ digest.hexdigest << "\n" ] ]
18
+ end
19
+ run app
data/t/sha1.ru CHANGED
@@ -1,5 +1,5 @@
1
1
  # SHA1 checksum generator
2
- bs = ENV['bs'] ? ENV['bs'].to_i : 4096
2
+ bs = ENV['bs'] ? ENV['bs'].to_i : 16384
3
3
  require 'digest/sha1'
4
4
  use Rack::ContentLength
5
5
  app = lambda do |env|
@@ -7,10 +7,11 @@ app = lambda do |env|
7
7
  return [ 100, {}, [] ]
8
8
  digest = Digest::SHA1.new
9
9
  input = env['rack.input']
10
- buf = input.read(bs)
11
- begin
12
- digest.update(buf)
13
- end while input.read(bs, buf)
10
+ if buf = input.read(bs)
11
+ begin
12
+ digest.update(buf)
13
+ end while input.read(bs, buf)
14
+ end
14
15
 
15
16
  [ 200, {'Content-Type' => 'text/plain'}, [ digest.hexdigest << "\n" ] ]
16
17
  end
@@ -0,0 +1,9 @@
1
+ use Rack::ContentLength
2
+ use Rack::ContentType
3
+ run lambda { |env|
4
+ if env['rack.multithread'] && env['rainbows.model'] == :RevThreadSpawn
5
+ [ 200, {}, [ env.inspect << "\n" ] ]
6
+ else
7
+ raise "rack.multithread is false"
8
+ end
9
+ }
@@ -3,7 +3,7 @@
3
3
  nr_client=${nr_client-2}
4
4
  . ./test-lib.sh
5
5
 
6
- t_plan 18 "reopen rotated logs"
6
+ t_plan 19 "reopen rotated logs"
7
7
 
8
8
  t_begin "setup and startup" && {
9
9
  rtmpfiles curl_out curl_err r_rot
@@ -44,6 +44,16 @@ t_begin "wait for rotated log to reappear" && {
44
44
  done
45
45
  }
46
46
 
47
+ t_begin "wait for worker to reopen logs" && {
48
+ nr=60
49
+ re="worker=.* done reopening logs"
50
+ while ! grep "$re" < $r_err >/dev/null && test $nr -ge 0
51
+ do
52
+ sleep 1
53
+ nr=$(( $nr - 1 ))
54
+ done
55
+ }
56
+
47
57
  dbgcat r_rot
48
58
  dbgcat r_err
49
59
 
@@ -7,7 +7,7 @@ t_begin "setup and startup" && {
7
7
  rainbows_setup $model
8
8
  echo timeout 3 >> $unicorn_config
9
9
  echo preload_app true >> $unicorn_config
10
- FIFO_PATH=$fifo rainbows -D heartbeat-timeout.ru -c $unicorn_config
10
+ rainbows -D heartbeat-timeout.ru -c $unicorn_config
11
11
  rainbows_wait_start
12
12
  }
13
13
 
@@ -37,7 +37,7 @@ t_begin "ensure timeout took 3-6 seconds" && {
37
37
  }
38
38
 
39
39
  t_begin "wait for new worker to start up" && {
40
- test x = x"$(cat $fifo)"
40
+ test xSTART = x"$(cat $fifo)"
41
41
  }
42
42
 
43
43
  t_begin "we get a fresh new worker process" && {
@@ -0,0 +1,50 @@
1
+ #!/bin/sh
2
+ . ./test-lib.sh
3
+ t_plan 7 "ensure worker follows master to death"
4
+
5
+ t_begin "setup" && {
6
+ rtmpfiles curl_err curl_out
7
+ rainbows_setup
8
+ echo timeout 3 >> $unicorn_config
9
+ rainbows -D -c $unicorn_config worker-follows-master-to-death.ru
10
+ rainbows_wait_start
11
+ }
12
+
13
+ t_begin "read worker PID" && {
14
+ worker_pid=$(curl -sSf http://$listen/pid)
15
+ t_info "worker_pid=$worker_pid"
16
+ }
17
+
18
+ t_begin "start a long sleeping request" && {
19
+ curl -sSfv -T- </dev/null http://$listen/sleep/2 >$curl_out 2> $fifo &
20
+ curl_pid=$!
21
+ t_info "curl_pid=$curl_pid"
22
+ }
23
+
24
+ t_begin "nuke the master once we're connected" && {
25
+ awk -v rainbows_pid=$rainbows_pid '
26
+ { print $0 }
27
+ /100 Continue/ {
28
+ print "awk: sending SIGKILL to", rainbows_pid
29
+ system("kill -9 "rainbows_pid)
30
+ }' < $fifo > $curl_err
31
+ wait
32
+ }
33
+
34
+ t_begin "worker is no longer running" && {
35
+ sleep 6
36
+ kill -0 $worker_pid 2> $tmp && false
37
+ test -s $tmp
38
+ }
39
+
40
+ t_begin "sleepy curl request is no longer running" && {
41
+ kill -0 $curl_pid 2> $tmp && false
42
+ test -s $tmp
43
+ }
44
+
45
+ t_begin "sleepy curl request completed gracefully" && {
46
+ test x$(cat $curl_out) = x$worker_pid
47
+ dbgcat curl_err
48
+ }
49
+
50
+ t_done
@@ -0,0 +1,181 @@
1
+ #!/bin/sh
2
+ . ./test-lib.sh
3
+ test -r random_blob || die "random_blob required, run with 'make $0'"
4
+
5
+ t_plan 14 "ensure we're accounting worker_connections properly"
6
+ nr=2
7
+
8
+ t_begin "setup" && {
9
+ rtmpfiles a b c d
10
+ rainbows_setup $model $nr
11
+ rainbows -D sha1.ru -c $unicorn_config
12
+ rainbows_wait_start
13
+ }
14
+
15
+ null_sha1=da39a3ee5e6b4b0d3255bfef95601890afd80709
16
+
17
+ t_begin "fire off concurrent processes" && {
18
+
19
+ req='POST / HTTP/1.1\r\n'
20
+ req="$req"'Host: example.com\r\n'
21
+ req="$req"'Transfer-Encoding: chunked\r\n\r\n'
22
+
23
+ for i in a b c d
24
+ do
25
+ rtmpfiles ${i}_fifo ${i}_tmp
26
+ eval 'i_fifo=$'${i}_fifo
27
+ eval 'i_tmp=$'${i}_tmp
28
+ eval "i=$"$i
29
+ (
30
+ (
31
+ cat $i_fifo > $i_tmp &
32
+ # need a full HTTP request to get around
33
+ # httpready
34
+ printf "$req"
35
+ sleep 5
36
+ printf '0\r\n\r\n'
37
+ wait
38
+ echo ok > $i
39
+ ) | socat - TCP:$listen > $i_fifo
40
+ ) &
41
+ done
42
+ wait
43
+ }
44
+
45
+ t_begin "check results" && {
46
+ for i in a b c d
47
+ do
48
+ eval 'i_tmp=$'${i}_tmp
49
+ eval "i=$"$i
50
+ test xok = x$(cat $i)
51
+ test x$null_sha1 = x$(tail -1 $i_tmp)
52
+ done
53
+ }
54
+
55
+ t_begin "repeat concurrent tests with faster clients" && {
56
+ for i in a b c d
57
+ do
58
+ eval 'i_tmp=$'${i}_tmp
59
+ eval "i=$"$i
60
+ curl -sSf -T- </dev/null http://$listen/ > $i 2> $i_tmp &
61
+ done
62
+ wait
63
+ }
64
+
65
+ t_begin "check results" && {
66
+ for i in a b c d
67
+ do
68
+ eval 'i_tmp=$'${i}_tmp
69
+ eval "i=$"$i
70
+ test ! -s $i_tmp
71
+ test x$null_sha1 = x$(cat $i)
72
+ done
73
+ }
74
+
75
+ t_begin "fire off truncated concurrent requests" && {
76
+
77
+ req='POST / HTTP/1.1\r\n'
78
+ req="$req"'Host: example.com\r\n'
79
+ req="$req"'Transfer-Encoding: chunked\r\n'
80
+
81
+ for i in a b c d
82
+ do
83
+ rtmpfiles ${i}_tmp
84
+ eval 'i_tmp=$'${i}_tmp
85
+ eval "i=$"$i
86
+ (
87
+ (
88
+ # need a full HTTP request to get around
89
+ # httpready
90
+ printf "$req"
91
+ echo ok > $i
92
+ ) | socat - TCP:$listen > $i_tmp
93
+ ) &
94
+ done
95
+ wait
96
+ }
97
+
98
+ t_begin "check broken results" && {
99
+ for i in a b c d
100
+ do
101
+ eval 'i_tmp=$'${i}_tmp
102
+ eval "i=$"$i
103
+ test xok = x$(cat $i)
104
+ dbgcat i_tmp
105
+ done
106
+ }
107
+
108
+ t_begin "repeat concurrent tests with faster clients" && {
109
+ for i in a b c d
110
+ do
111
+ eval 'i_tmp=$'${i}_tmp
112
+ eval "i=$"$i
113
+ curl -sSf -T- </dev/null http://$listen/ > $i 2> $i_tmp &
114
+ done
115
+ wait
116
+ }
117
+
118
+ t_begin "check results" && {
119
+ for i in a b c d
120
+ do
121
+ eval 'i_tmp=$'${i}_tmp
122
+ eval "i=$"$i
123
+ test ! -s $i_tmp
124
+ test x$null_sha1 = x$(cat $i)
125
+ done
126
+ }
127
+
128
+ t_begin "fire off garbage" && {
129
+ for i in a b c d
130
+ do
131
+ rtmpfiles ${i}_fifo ${i}_tmp
132
+ eval 'i_fifo=$'${i}_fifo
133
+ eval 'i_tmp=$'${i}_tmp
134
+ eval "i=$"$i
135
+ (
136
+ (
137
+ cat $i_fifo > $i_tmp &
138
+ dd if=random_blob bs=4096 count=1
139
+ wait
140
+ echo ok > $i
141
+ ) | socat - TCP:$listen > $i_fifo
142
+ ) &
143
+ done
144
+ wait
145
+ }
146
+
147
+ t_begin "check broken results" && {
148
+ for i in a b c d
149
+ do
150
+ eval 'i_tmp=$'${i}_tmp
151
+ eval "i=$"$i
152
+ test xok = x$(cat $i)
153
+ grep -F 'HTTP/1.1 400 Bad Request' $i_tmp
154
+ done
155
+ }
156
+
157
+ t_begin "repeat concurrent tests with faster clients" && {
158
+ for i in a b c d
159
+ do
160
+ eval 'i_tmp=$'${i}_tmp
161
+ eval "i=$"$i
162
+ curl -sSf -T- </dev/null http://$listen/ > $i 2> $i_tmp &
163
+ done
164
+ wait
165
+ }
166
+
167
+ t_begin "check results" && {
168
+ for i in a b c d
169
+ do
170
+ eval 'i_tmp=$'${i}_tmp
171
+ eval "i=$"$i
172
+ test ! -s $i_tmp
173
+ test x$null_sha1 = x$(cat $i)
174
+ done
175
+ }
176
+
177
+ t_begin "teardown" && {
178
+ kill $rainbows_pid
179
+ }
180
+
181
+ t_done