pipemaster 0.3.1

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
@@ -0,0 +1,164 @@
1
+ require "unicorn"
2
+ require "pipemaster/worker"
3
+ require "pipemaster/configurator"
4
+
5
+ module Pipemaster
6
+
7
+ class << self
8
+ def run(options = {})
9
+ Server.new(options).start.join
10
+ end
11
+ end
12
+
13
+ class Server < Unicorn::HttpServer
14
+
15
+ def initialize(options = {})
16
+ self.reexec_pid = 0
17
+ self.ready_pipe = options.delete(:ready_pipe)
18
+ self.init_listeners = options[:listeners] ? options[:listeners].dup : []
19
+ self.config = Configurator.new(options.merge(:use_defaults => true))
20
+ config.commit!(self, :skip => [:listeners, :pid])
21
+ end
22
+
23
+ attr_accessor :commands
24
+
25
+ def start
26
+ trap(:QUIT) { stop }
27
+ [:TERM, :INT].each { |sig| trap(sig) { stop false } }
28
+ self.pid = config[:pid]
29
+
30
+ proc_name "pipemaster"
31
+ logger.info "master process ready" # test_exec.rb relies on this message
32
+ if ready_pipe
33
+ ready_pipe.syswrite($$.to_s)
34
+ ready_pipe.close rescue nil
35
+ self.ready_pipe = nil
36
+ end
37
+
38
+ trap(:CHLD) { reap_all_workers }
39
+ trap :USR1 do
40
+ logger.info "master reopening logs..."
41
+ Unicorn::Util.reopen_logs
42
+ logger.info "master done reopening logs"
43
+ end
44
+ reloaded = nil
45
+ trap(:HUP) { reloaded = true ; load_config! }
46
+ trap(:USR2) { reexec }
47
+
48
+ begin
49
+ config_listeners = config[:listeners].dup
50
+ if config_listeners.empty? && LISTENERS.empty?
51
+ config_listeners << DEFAULT_LISTEN
52
+ init_listeners << DEFAULT_LISTEN
53
+ START_CTX[:argv] << "-s#{DEFAULT_LISTEN}"
54
+ end
55
+ config_listeners.each { |addr| listen(addr) }
56
+
57
+ reloaded = false
58
+ while selected = Kernel.select(LISTENERS)
59
+ selected.first.each do |socket|
60
+ client = socket.accept_nonblock
61
+ worker = Worker.new
62
+ before_fork.call(self, worker)
63
+ WORKERS[fork { process_request client, worker }] = worker
64
+ end
65
+ end
66
+ rescue Errno::EINTR
67
+ retry
68
+ rescue Errno::EBADF # Shutdown
69
+ retry if reloaded
70
+ rescue => ex
71
+ logger.error "Unhandled master loop exception #{ex.inspect}."
72
+ logger.error ex.backtrace.join("\n")
73
+ retry
74
+ end
75
+ self
76
+ end
77
+
78
+ def join
79
+ stop # gracefully shutdown all workers on our way out
80
+ logger.info "master complete"
81
+ unlink_pid_safe(pid) if pid
82
+ end
83
+
84
+ def kill_worker(signal, wpid)
85
+ begin
86
+ Process.kill(signal, wpid)
87
+ rescue Errno::ESRCH
88
+ worker = WORKERS.delete(wpid)
89
+ end
90
+ end
91
+
92
+ def reap_all_workers
93
+ begin
94
+ loop do
95
+ wpid, status = Process.waitpid2(-1, Process::WNOHANG)
96
+ wpid or break
97
+ #if reexec_pid == wpid
98
+ # logger.error "reaped #{status.inspect} exec()-ed"
99
+ # self.reexec_pid = 0
100
+ # self.pid = pid.chomp('.oldbin') if pid
101
+ # proc_name 'master'
102
+ #else
103
+ worker = WORKERS.delete(wpid) rescue nil
104
+ logger.info "reaped #{status.inspect} "
105
+ #end
106
+ end
107
+ rescue Errno::ECHILD
108
+ end
109
+ end
110
+
111
+ def load_config!
112
+ begin
113
+ logger.info "reloading pipefile=#{config.config_file}"
114
+ config[:listeners].replace(init_listeners)
115
+ config.reload
116
+ config.commit!(self)
117
+ Unicorn::Util.reopen_logs
118
+ logger.info "done reloading pipefile=#{config.config_file}"
119
+ rescue => e
120
+ logger.error "error reloading pipefile=#{config.config_file}: " \
121
+ "#{e.class} #{e.message}"
122
+ end
123
+ end
124
+
125
+ def process_request(socket, worker)
126
+ trap(:QUIT) { exit }
127
+ [:TERM, :INT].each { |sig| trap(sig) { exit! } }
128
+ [:USR1, :USR2].each { |sig| trap(sig, nil) }
129
+ trap(:CHLD, 'DEFAULT')
130
+
131
+ WORKERS.clear
132
+ LISTENERS.each { |sock| sock.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
133
+ after_fork.call(self, worker)
134
+ $stdout.reopen socket
135
+ $stdin.reopen socket
136
+ begin
137
+ length = socket.readpartial(4).unpack("N")[0]
138
+ name, *args = socket.read(length).split("\0")
139
+
140
+ proc_name "pipemaster: #{name}"
141
+ logger.info "#{Process.pid} #{name} #{args.join(' ')}"
142
+
143
+ ARGV.replace args
144
+ command = commands[name.to_sym] or raise ArgumentError, "No command #{name}"
145
+ command.call *args
146
+ logger.info "#{Process.pid} completed"
147
+ socket.write 0.chr
148
+ rescue SystemExit => ex
149
+ logger.info "#{Process.pid} completed"
150
+ socket.write ex.status.chr
151
+ rescue Exception => ex
152
+ logger.info "#{Process.pid} failed: #{ex.message}"
153
+ socket.write "#{ex.class.name}: #{ex.message}\n"
154
+ socket.write 1.chr
155
+ ensure
156
+ socket.close_write
157
+ socket.close
158
+ exit
159
+ end
160
+ end
161
+
162
+ end
163
+ end
164
+
@@ -0,0 +1,4 @@
1
+ module Pipemaster
2
+ class Worker
3
+ end
4
+ end
@@ -0,0 +1,18 @@
1
+ Gem::Specification.new do |spec|
2
+ spec.name = "pipemaster"
3
+ spec.version = "0.3.1"
4
+ spec.author = "Assaf Arkin"
5
+ spec.email = "assaf@labnotes.org"
6
+ spec.homepage = "http://labnotes.org"
7
+ spec.summary = "Use the fork"
8
+ spec.post_install_message = "To get started run pipemaster --help"
9
+
10
+ spec.files = Dir["{bin,lib,test}/**/*", "CHANGELOG", "LICENSE", "README.rdoc", "Rakefile", "Gemfile", "pipemaster.gemspec"]
11
+ spec.executable = "pipemaster"
12
+
13
+ spec.has_rdoc = true
14
+ spec.extra_rdoc_files = "README.rdoc", "CHANGELOG"
15
+ spec.rdoc_options = "--title", "Pipemaster #{spec.version}", "--main", "README.rdoc",
16
+ "--webcvs", "http://github.com/assaf/#{spec.name}"
17
+ spec.add_dependency("unicorn", ["~> 0.96"])
18
+ end
@@ -0,0 +1,268 @@
1
+ # -*- encoding: binary -*-
2
+
3
+ # Copyright (c) 2005 Zed A. Shaw
4
+ # You can redistribute it and/or modify it under the same terms as Ruby.
5
+ #
6
+ # Additional work donated by contributors. See http://mongrel.rubyforge.org/attributions.html
7
+ # for more information.
8
+
9
+ STDIN.sync = STDOUT.sync = STDERR.sync = true # buffering makes debugging hard
10
+
11
+ # Some tests watch a log file or a pid file to spring up to check state
12
+ # Can't rely on inotify on non-Linux and logging to a pipe makes things
13
+ # more complicated
14
+ DEFAULT_TRIES = 5
15
+ DEFAULT_RES = 0.2
16
+
17
+ HERE = File.dirname(__FILE__) unless defined?(HERE)
18
+ %w(lib ext).each do |dir|
19
+ $LOAD_PATH.unshift "#{HERE}/../#{dir}"
20
+ end
21
+
22
+ require 'test/unit'
23
+ require 'net/http'
24
+ require 'digest/sha1'
25
+ require 'uri'
26
+ require 'stringio'
27
+ require 'pathname'
28
+ require 'tempfile'
29
+ require 'fileutils'
30
+ require 'logger'
31
+ require 'pipemaster'
32
+ require 'pipemaster/server'
33
+ require 'pipemaster/client'
34
+
35
+ if ENV['DEBUG']
36
+ require 'ruby-debug'
37
+ Debugger.start
38
+ end
39
+
40
+ def redirect_test_io
41
+ orig_err = STDERR.dup
42
+ orig_out = STDOUT.dup
43
+ STDERR.reopen("test_stderr.#{$$}.log", "a")
44
+ STDOUT.reopen("test_stdout.#{$$}.log", "a")
45
+ STDERR.sync = STDOUT.sync = true
46
+
47
+ at_exit do
48
+ File.unlink("test_stderr.#{$$}.log") rescue nil
49
+ File.unlink("test_stdout.#{$$}.log") rescue nil
50
+ end
51
+
52
+ begin
53
+ yield
54
+ ensure
55
+ STDERR.reopen(orig_err)
56
+ STDOUT.reopen(orig_out)
57
+ end
58
+ end
59
+
60
+ # which(1) exit codes cannot be trusted on some systems
61
+ # We use UNIX shell utilities in some tests because we don't trust
62
+ # ourselves to write Ruby 100% correctly :)
63
+ def which(bin)
64
+ ex = ENV['PATH'].split(/:/).detect do |x|
65
+ x << "/#{bin}"
66
+ File.executable?(x)
67
+ end or warn "`#{bin}' not found in PATH=#{ENV['PATH']}"
68
+ ex
69
+ end
70
+
71
+ def hit(address, *args)
72
+ client = Pipemaster::Client.new(address)
73
+ code = client.request(*args)
74
+ [code, client.output.string]
75
+ end
76
+
77
+ # unused_port provides an unused port on +addr+ usable for TCP that is
78
+ # guaranteed to be unused across all unicorn builds on that system. It
79
+ # prevents race conditions by using a lock file other unicorn builds
80
+ # will see. This is required if you perform several builds in parallel
81
+ # with a continuous integration system or run tests in parallel via
82
+ # gmake. This is NOT guaranteed to be race-free if you run other
83
+ # processes that bind to random ports for testing (but the window
84
+ # for a race condition is very small). You may also set UNICORN_TEST_ADDR
85
+ # to override the default test address (127.0.0.1).
86
+ def unused_port(addr = '127.0.0.1')
87
+ retries = 100
88
+ base = 5000
89
+ port = sock = nil
90
+ begin
91
+ begin
92
+ port = base + rand(32768 - base)
93
+ while port == Unicorn::Const::DEFAULT_PORT
94
+ port = base + rand(32768 - base)
95
+ end
96
+
97
+ sock = Socket.new(Socket::AF_INET, Socket::SOCK_STREAM, 0)
98
+ sock.bind(Socket.pack_sockaddr_in(port, addr))
99
+ sock.listen(5)
100
+ rescue Errno::EADDRINUSE, Errno::EACCES
101
+ sock.close rescue nil
102
+ retry if (retries -= 1) >= 0
103
+ end
104
+
105
+ # since we'll end up closing the random port we just got, there's a race
106
+ # condition could allow the random port we just chose to reselect itself
107
+ # when running tests in parallel with gmake. Create a lock file while
108
+ # we have the port here to ensure that does not happen .
109
+ lock_path = "#{Dir::tmpdir}/unicorn_test.#{addr}:#{port}.lock"
110
+ lock = File.open(lock_path, File::WRONLY|File::CREAT|File::EXCL, 0600)
111
+ at_exit { File.unlink(lock_path) rescue nil }
112
+ rescue Errno::EEXIST
113
+ sock.close rescue nil
114
+ retry
115
+ end
116
+ sock.close rescue nil
117
+ port
118
+ end
119
+
120
+ def try_require(lib)
121
+ begin
122
+ require lib
123
+ true
124
+ rescue LoadError
125
+ false
126
+ end
127
+ end
128
+
129
+ # sometimes the server may not come up right away
130
+ def retry_hit(uris = [])
131
+ tries = DEFAULT_TRIES
132
+ begin
133
+ hit(uris)
134
+ rescue Errno::EINVAL, Errno::ECONNREFUSED => err
135
+ if (tries -= 1) > 0
136
+ sleep DEFAULT_RES
137
+ retry
138
+ end
139
+ raise err
140
+ end
141
+ end
142
+
143
+ def assert_shutdown(pid)
144
+ wait_master_ready("test_stderr.#{pid}.log")
145
+ assert_nothing_raised { Process.kill(:QUIT, pid) }
146
+ status = nil
147
+ assert_nothing_raised { pid, status = Process.waitpid2(pid) }
148
+ assert status.success?, "exited successfully"
149
+ end
150
+
151
+ def wait_master_ready(master_log)
152
+ tries = DEFAULT_TRIES
153
+ while (tries -= 1) > 0
154
+ begin
155
+ File.readlines(master_log).grep(/master process ready/)[0] and return
156
+ rescue Errno::ENOENT
157
+ end
158
+ sleep DEFAULT_RES
159
+ end
160
+ raise "master process never became ready"
161
+ end
162
+
163
+ def reexec_usr2_quit_test(pid, pid_file)
164
+ assert File.exist?(pid_file), "pid file OK"
165
+ assert ! File.exist?("#{pid_file}.oldbin"), "oldbin pid file"
166
+ assert_nothing_raised { Process.kill(:USR2, pid) }
167
+ assert_nothing_raised { retry_hit(["http://#{@addr}:#{@port}/"]) }
168
+ wait_for_file("#{pid_file}.oldbin")
169
+ wait_for_file(pid_file)
170
+
171
+ old_pid = File.read("#{pid_file}.oldbin").to_i
172
+ new_pid = File.read(pid_file).to_i
173
+
174
+ # kill old master process
175
+ assert_not_equal pid, new_pid
176
+ assert_equal pid, old_pid
177
+ assert_nothing_raised { Process.kill(:QUIT, old_pid) }
178
+ assert_nothing_raised { retry_hit(["http://#{@addr}:#{@port}/"]) }
179
+ wait_for_death(old_pid)
180
+ assert_equal new_pid, File.read(pid_file).to_i
181
+ assert_nothing_raised { retry_hit(["http://#{@addr}:#{@port}/"]) }
182
+ assert_nothing_raised { Process.kill(:QUIT, new_pid) }
183
+ end
184
+
185
+ def reexec_basic_test(pid, pid_file)
186
+ results = retry_hit(["http://#{@addr}:#{@port}/"])
187
+ assert_equal String, results[0].class
188
+ assert_nothing_raised { Process.kill(0, pid) }
189
+ master_log = "#{@tmpdir}/test_stderr.#{pid}.log"
190
+ wait_master_ready(master_log)
191
+ File.truncate(master_log, 0)
192
+ nr = 50
193
+ kill_point = 2
194
+ assert_nothing_raised do
195
+ nr.times do |i|
196
+ hit(["http://#{@addr}:#{@port}/#{i}"])
197
+ i == kill_point and Process.kill(:HUP, pid)
198
+ end
199
+ end
200
+ wait_master_ready(master_log)
201
+ assert File.exist?(pid_file), "pid=#{pid_file} exists"
202
+ new_pid = File.read(pid_file).to_i
203
+ assert_not_equal pid, new_pid
204
+ assert_nothing_raised { Process.kill(0, new_pid) }
205
+ assert_nothing_raised { Process.kill(:QUIT, new_pid) }
206
+ end
207
+
208
+ def wait_for_file(path)
209
+ tries = DEFAULT_TRIES
210
+ while (tries -= 1) > 0 && ! File.exist?(path)
211
+ sleep DEFAULT_RES
212
+ end
213
+ assert File.exist?(path), "path=#{path} exists #{caller.inspect}"
214
+ end
215
+
216
+ def xfork(&block)
217
+ fork do
218
+ ObjectSpace.each_object(Tempfile) do |tmp|
219
+ ObjectSpace.undefine_finalizer(tmp)
220
+ end
221
+ yield
222
+ end
223
+ end
224
+
225
+ # can't waitpid on detached processes
226
+ def wait_for_death(pid)
227
+ tries = DEFAULT_TRIES
228
+ while (tries -= 1) > 0
229
+ begin
230
+ Process.kill(0, pid)
231
+ begin
232
+ Process.waitpid(pid, Process::WNOHANG)
233
+ rescue Errno::ECHILD
234
+ end
235
+ sleep(DEFAULT_RES)
236
+ rescue Errno::ESRCH
237
+ return
238
+ end
239
+ end
240
+ raise "PID:#{pid} never died!"
241
+ end
242
+
243
+ # executes +cmd+ and chunks its STDOUT
244
+ def chunked_spawn(stdout, *cmd)
245
+ fork {
246
+ crd, cwr = IO.pipe
247
+ crd.binmode
248
+ cwr.binmode
249
+ crd.sync = cwr.sync = true
250
+
251
+ pid = fork {
252
+ STDOUT.reopen(cwr)
253
+ crd.close
254
+ cwr.close
255
+ exec(*cmd)
256
+ }
257
+ cwr.close
258
+ begin
259
+ buf = crd.readpartial(16384)
260
+ stdout.write("#{'%x' % buf.size}\r\n#{buf}")
261
+ rescue EOFError
262
+ stdout.write("0\r\n")
263
+ pid, status = Process.waitpid(pid)
264
+ exit status.exitstatus
265
+ end while true
266
+ }
267
+ end
268
+