resque-pool-diet 0.3.1

Sign up to get free protection for your applications and to get access to all the features.
@@ -0,0 +1,76 @@
1
+ require 'aruba/cucumber'
2
+ require 'aruba/api'
3
+ require 'aruba/process'
4
+
5
+ module Aruba
6
+
7
+ module Api
8
+
9
+ # this is a horrible hack, to make sure that it's done what it needs to do
10
+ # before we do our next step
11
+ def keep_trying(timeout=10, tries=0)
12
+ puts "Try: #{tries}" if @announce_env
13
+ yield
14
+ rescue RSpec::Expectations::ExpectationNotMetError
15
+ if tries < timeout
16
+ sleep 1
17
+ tries += 1
18
+ retry
19
+ else
20
+ raise
21
+ end
22
+ end
23
+
24
+ def run_background(cmd)
25
+ @background = run(cmd)
26
+ end
27
+
28
+ def send_signal(cmd, signal)
29
+ announce_or_puts "$ kill -#{signal} #{processes[cmd].pid}" if @announce_env
30
+ processes[cmd].send_signal signal
31
+ end
32
+
33
+ def background_pid
34
+ @pid_from_pidfile || @background.pid
35
+ end
36
+
37
+ # like all_stdout, but doesn't stop processes first
38
+ def interactive_stdout
39
+ only_processes.inject("") { |out, ps| out << ps.stdout(@aruba_keep_ansi) }
40
+ end
41
+
42
+ # like all_stderr, but doesn't stop processes first
43
+ def interactive_stderr
44
+ only_processes.inject("") { |out, ps| out << ps.stderr(@aruba_keep_ansi) }
45
+ end
46
+
47
+ # like all_output, but doesn't stop processes first
48
+ def interactive_output
49
+ interactive_stdout << interactive_stderr
50
+ end
51
+
52
+ def interpolate_background_pid(string)
53
+ interpolated = string.gsub('$PID', background_pid.to_s)
54
+ announce_or_puts interpolated if @announce_env
55
+ interpolated
56
+ end
57
+
58
+ def kill_all_processes!
59
+ # stop_processes!
60
+ #rescue
61
+ # processes.each {|cmd,process| send_signal(cmd, 'KILL') }
62
+ # raise
63
+ end
64
+
65
+ end
66
+
67
+ class Process
68
+ def pid
69
+ @process.pid
70
+ end
71
+ def send_signal signal
72
+ @process.send :send_signal, signal
73
+ end
74
+ end
75
+
76
+ end
@@ -0,0 +1 @@
1
+ ENV["RAILS_ENV"] = "test"
@@ -0,0 +1,120 @@
1
+ require 'trollop'
2
+ require 'resque/pool'
3
+ require 'fileutils'
4
+
5
+ module Resque
6
+ class Pool
7
+ module CLI
8
+ extend self
9
+
10
+ def run
11
+ opts = parse_options
12
+ daemonize if opts[:daemon]
13
+ manage_pidfile opts[:pidfile]
14
+ redirect opts
15
+ setup_environment opts
16
+ start_pool
17
+ end
18
+
19
+ def parse_options
20
+ opts = Trollop::options do
21
+ version "resque-pool #{VERSION} (c) nicholas a. evans"
22
+ banner <<-EOS
23
+ resque-pool is the best way to manage a group (pool) of resque workers
24
+
25
+ When daemonized, stdout and stderr default to resque-pool.stdxxx.log files in
26
+ the log directory and pidfile defaults to resque-pool.pid in the current dir.
27
+
28
+ Usage:
29
+ resque-pool [options]
30
+ where [options] are:
31
+ EOS
32
+ opt :config, "Alternate path to config file", :type => String, :short => "-c"
33
+ opt :appname, "Alternate appname", :type => String, :short => "-a"
34
+ opt :daemon, "Run as a background daemon", :default => false, :short => "-d"
35
+ opt :stdout, "Redirect stdout to logfile", :type => String, :short => '-o'
36
+ opt :stderr, "Redirect stderr to logfile", :type => String, :short => '-e'
37
+ opt :nosync, "Don't sync logfiles on every write"
38
+ opt :pidfile, "PID file location", :type => String, :short => "-p"
39
+ opt :environment, "Set RAILS_ENV/RACK_ENV/RESQUE_ENV", :type => String, :short => "-E"
40
+ end
41
+ if opts[:daemon]
42
+ opts[:stdout] ||= "log/resque-pool.stdout.log"
43
+ opts[:stderr] ||= "log/resque-pool.stderr.log"
44
+ opts[:pidfile] ||= "tmp/pids/resque-pool.pid"
45
+ end
46
+ opts
47
+ end
48
+
49
+ def daemonize
50
+ raise 'First fork failed' if (pid = fork) == -1
51
+ exit unless pid.nil?
52
+ Process.setsid
53
+ raise 'Second fork failed' if (pid = fork) == -1
54
+ exit unless pid.nil?
55
+ end
56
+
57
+ def manage_pidfile(pidfile)
58
+ return unless pidfile
59
+ pid = Process.pid
60
+ if File.exist? pidfile
61
+ if process_still_running? pidfile
62
+ raise "Pidfile already exists at #{pidfile} and process is still running."
63
+ else
64
+ File.delete pidfile
65
+ end
66
+ else
67
+ FileUtils.mkdir_p File.dirname(pidfile)
68
+ end
69
+ File.open pidfile, "w" do |f|
70
+ f.write pid
71
+ end
72
+ at_exit do
73
+ if Process.pid == pid
74
+ File.delete pidfile
75
+ end
76
+ end
77
+ end
78
+
79
+ def process_still_running?(pidfile)
80
+ old_pid = open(pidfile).read.strip.to_i
81
+ Process.kill 0, old_pid
82
+ true
83
+ rescue Errno::ESRCH
84
+ false
85
+ rescue Errno::EPERM
86
+ true
87
+ rescue ::Exception => e
88
+ $stderr.puts "While checking if PID #{old_pid} is running, unexpected #{e.class}: #{e}"
89
+ true
90
+ end
91
+
92
+ def redirect(opts)
93
+ $stdin.reopen '/dev/null' if opts[:daemon]
94
+ # need to reopen as File, or else Resque::Pool::Logging.reopen_logs! won't work
95
+ out = File.new(opts[:stdout], "a") if opts[:stdout] && !opts[:stdout].empty?
96
+ err = File.new(opts[:stderr], "a") if opts[:stderr] && !opts[:stderr].empty?
97
+ $stdout.reopen out if out
98
+ $stderr.reopen err if err
99
+ $stdout.sync = $stderr.sync = true unless opts[:nosync]
100
+ end
101
+
102
+ def setup_environment(opts)
103
+ Resque::Pool.app_name = opts[:appname] if opts[:appname]
104
+ ENV["RACK_ENV"] = ENV["RAILS_ENV"] = ENV["RESQUE_ENV"] = opts[:environment] if opts[:environment]
105
+ Resque::Pool.log "Resque Pool running in #{ENV["RAILS_ENV"] || "development"} environment"
106
+ ENV["RESQUE_POOL_CONFIG"] = opts[:config] if opts[:config]
107
+ end
108
+
109
+ def start_pool
110
+ require 'rake'
111
+ require 'resque/pool/tasks'
112
+ Rake.application.init
113
+ Rake.application.load_rakefile
114
+ Rake.application["resque:pool"].invoke
115
+ end
116
+
117
+ end
118
+ end
119
+ end
120
+
@@ -0,0 +1,65 @@
1
+ module Resque
2
+ class Pool
3
+ module Logging
4
+ extend self
5
+
6
+ # more than a little bit complicated...
7
+ # copied this from Unicorn.
8
+ def self.reopen_logs!
9
+ log "Flushing logs"
10
+ [$stdout, $stderr].each do |fd|
11
+ if fd.instance_of? File
12
+ # skip if the file is the exact same inode and device
13
+ orig_st = fd.stat
14
+ begin
15
+ cur_st = File.stat(fd.path)
16
+ next if orig_st.ino == cur_st.ino && orig_st.dev == cur_st.dev
17
+ rescue Errno::ENOENT
18
+ end
19
+ # match up the encoding
20
+ open_arg = 'a'
21
+ if fd.respond_to?(:external_encoding) && enc = fd.external_encoding
22
+ open_arg << ":#{enc.to_s}"
23
+ enc = fd.internal_encoding and open_arg << ":#{enc.to_s}"
24
+ end
25
+ # match up buffering (does reopen reset this?)
26
+ sync = fd.sync
27
+ # sync to disk
28
+ fd.fsync
29
+ # reopen, and set ruby buffering appropriately
30
+ fd.reopen fd.path, open_arg
31
+ fd.sync = sync
32
+ log "Reopened logfile: #{fd.path}"
33
+ end
34
+ end
35
+ end
36
+
37
+ # Given a string, sets the procline ($0)
38
+ # Procline is always in the format of:
39
+ # resque-pool-master: STRING
40
+ def procline(string)
41
+ $0 = "resque-pool-master#{app}: #{string}"
42
+ end
43
+
44
+ # TODO: make this use an actual logger
45
+ def log(message)
46
+ puts "resque-pool-manager#{app}[#{Process.pid}]: #{message}"
47
+ #$stdout.fsync
48
+ end
49
+
50
+ # TODO: make this use an actual logger
51
+ def log_worker(message)
52
+ puts "resque-pool-worker#{app}[#{Process.pid}]: #{message}"
53
+ #$stdout.fsync
54
+ end
55
+
56
+ # Include optional app name in procline
57
+ def app
58
+ app_name = self.respond_to?(:app_name) && self.app_name
59
+ app_name ||= self.class.respond_to?(:app_name) && self.class.app_name
60
+ app_name ? "[#{app_name}]" : ""
61
+ end
62
+
63
+ end
64
+ end
65
+ end
@@ -0,0 +1,21 @@
1
+ require 'resque/worker'
2
+
3
+ class Resque::Pool
4
+ module PooledWorker
5
+ def shutdown_with_pool
6
+ shutdown_without_pool || Process.ppid == 1
7
+ end
8
+
9
+ def self.included(base)
10
+ base.instance_eval do
11
+ alias_method :shutdown_without_pool, :shutdown?
12
+ alias_method :shutdown?, :shutdown_with_pool
13
+ end
14
+ end
15
+
16
+ end
17
+ end
18
+
19
+ Resque::Worker.class_eval do
20
+ include Resque::Pool::PooledWorker
21
+ end
@@ -0,0 +1,20 @@
1
+ # -*- encoding: utf-8 -*-
2
+ require 'resque/tasks'
3
+
4
+ namespace :resque do
5
+
6
+ # resque worker config (not pool related). e.g. hoptoad, rails environment
7
+ task :setup
8
+
9
+ namespace :pool do
10
+ # resque pool config. e.g. after_prefork connection handling
11
+ task :setup
12
+ end
13
+
14
+ desc "Launch a pool of resque workers"
15
+ task :pool => %w[resque:setup resque:pool:setup] do
16
+ require 'resque/pool'
17
+ Resque::Pool.run
18
+ end
19
+
20
+ end
@@ -0,0 +1,5 @@
1
+ module Resque
2
+ class Pool
3
+ VERSION = "0.3.1"
4
+ end
5
+ end
@@ -0,0 +1,342 @@
1
+ # -*- encoding: utf-8 -*-
2
+ require 'resque'
3
+ require 'resque/worker'
4
+ require 'resque/pool/version'
5
+ require 'resque/pool/logging'
6
+ require 'resque/pool/pooled_worker'
7
+ require 'fcntl'
8
+ require 'yaml'
9
+
10
+ module Resque
11
+ class Pool
12
+ SIG_QUEUE_MAX_SIZE = 5
13
+ DEFAULT_WORKER_INTERVAL = 5
14
+ QUEUE_SIGS = [ :QUIT, :INT, :TERM, :USR1, :USR2, :CONT, :HUP, :WINCH, ]
15
+ CHUNK_SIZE = (16 * 1024)
16
+
17
+ include Logging
18
+ extend Logging
19
+ attr_reader :config
20
+ attr_reader :workers
21
+
22
+ def initialize(config)
23
+ init_config(config)
24
+ @workers = Hash.new { |workers, queues| workers[queues] = {} }
25
+ procline "(initialized)"
26
+ end
27
+
28
+ # Config: after_prefork {{{
29
+
30
+ # The `after_prefork` hook will be run in workers if you are using the
31
+ # preforking master worker to save memory. Use this hook to reload
32
+ # database connections and so forth to ensure that they're not shared
33
+ # among workers.
34
+ #
35
+ # Call with a block to set the hook.
36
+ # Call with no arguments to return the hook.
37
+ def self.after_prefork(&block)
38
+ block ? (@after_prefork = block) : @after_prefork
39
+ end
40
+
41
+ # Set the after_prefork proc.
42
+ def self.after_prefork=(after_prefork)
43
+ @after_prefork = after_prefork
44
+ end
45
+
46
+ def call_after_prefork!
47
+ self.class.after_prefork && self.class.after_prefork.call
48
+ end
49
+
50
+ # }}}
51
+ # Config: class methods to start up the pool using the default config {{{
52
+
53
+ @config_files = ["resque-pool.yml", "config/resque-pool.yml"]
54
+ class << self; attr_accessor :config_files, :app_name; end
55
+
56
+ def self.app_name
57
+ @app_name ||= File.basename(Dir.pwd)
58
+ end
59
+
60
+ def self.choose_config_file
61
+ if ENV["RESQUE_POOL_CONFIG"]
62
+ ENV["RESQUE_POOL_CONFIG"]
63
+ else
64
+ @config_files.detect { |f| File.exist?(f) }
65
+ end
66
+ end
67
+
68
+ def self.run
69
+ if GC.respond_to?(:copy_on_write_friendly=)
70
+ GC.copy_on_write_friendly = true
71
+ end
72
+ Resque::Pool.new(choose_config_file).start.join
73
+ end
74
+
75
+ # }}}
76
+ # Config: load config and config file {{{
77
+
78
+ def config_file
79
+ @config_file || (!@config && ::Resque::Pool.choose_config_file)
80
+ end
81
+
82
+ def init_config(config)
83
+ case config
84
+ when String, nil
85
+ @config_file = config
86
+ else
87
+ @config = config.dup
88
+ end
89
+ load_config
90
+ end
91
+
92
+ def load_config
93
+ if config_file
94
+ @config = YAML.load_file(config_file)
95
+ else
96
+ @config ||= {}
97
+ end
98
+ environment and @config[environment] and config.merge!(@config[environment])
99
+ config.delete_if {|key, value| value.is_a? Hash }
100
+ end
101
+
102
+ def environment
103
+ if defined? RAILS_ENV
104
+ RAILS_ENV
105
+ else
106
+ ENV['RACK_ENV'] || ENV['RAILS_ENV'] || ENV['RESQUE_ENV']
107
+ end
108
+ end
109
+
110
+ # }}}
111
+
112
+ # Sig handlers and self pipe management {{{
113
+
114
+ def self_pipe; @self_pipe ||= [] end
115
+ def sig_queue; @sig_queue ||= [] end
116
+
117
+ def init_self_pipe!
118
+ self_pipe.each { |io| io.close rescue nil }
119
+ self_pipe.replace(IO.pipe)
120
+ self_pipe.each { |io| io.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC) }
121
+ end
122
+
123
+ def init_sig_handlers!
124
+ QUEUE_SIGS.each { |sig| trap_deferred(sig) }
125
+ trap(:CHLD) { |_| awaken_master }
126
+ end
127
+
128
+ def awaken_master
129
+ begin
130
+ self_pipe.last.write_nonblock('.') # wakeup master process from select
131
+ rescue Errno::EAGAIN, Errno::EINTR
132
+ # pipe is full, master should wake up anyways
133
+ retry
134
+ end
135
+ end
136
+
137
+ class QuitNowException < Exception; end
138
+ # defer a signal for later processing in #join (master process)
139
+ def trap_deferred(signal)
140
+ trap(signal) do |sig_nr|
141
+ if @waiting_for_reaper && [:INT, :TERM].include?(signal)
142
+ log "Recieved #{signal}: short circuiting QUIT waitpid"
143
+ raise QuitNowException
144
+ end
145
+ if sig_queue.size < SIG_QUEUE_MAX_SIZE
146
+ sig_queue << signal
147
+ awaken_master
148
+ else
149
+ log "ignoring SIG#{signal}, queue=#{sig_queue.inspect}"
150
+ end
151
+ end
152
+ end
153
+
154
+ def reset_sig_handlers!
155
+ QUEUE_SIGS.each {|sig| trap(sig, "DEFAULT") }
156
+ end
157
+
158
+ def handle_sig_queue!
159
+ case signal = sig_queue.shift
160
+ when :USR1, :USR2, :CONT
161
+ log "#{signal}: sending to all workers"
162
+ signal_all_workers(signal)
163
+ when :HUP
164
+ log "HUP: reload config file and reload logfiles"
165
+ load_config
166
+ Logging.reopen_logs!
167
+ log "HUP: gracefully shutdown old children (which have old logfiles open)"
168
+ signal_all_workers(:QUIT)
169
+ log "HUP: new children will inherit new logfiles"
170
+ maintain_worker_count
171
+ when :WINCH
172
+ log "WINCH: gracefully stopping all workers"
173
+ @config = {}
174
+ maintain_worker_count
175
+ when :QUIT
176
+ log "QUIT: graceful shutdown, waiting for children"
177
+ signal_all_workers(:QUIT)
178
+ reap_all_workers(0) # will hang until all workers are shutdown
179
+ :break
180
+ when :INT
181
+ log "INT: immediate shutdown (graceful worker shutdown)"
182
+ signal_all_workers(:QUIT)
183
+ :break
184
+ when :TERM
185
+ log "TERM: immediate shutdown (and immediate worker shutdown)"
186
+ signal_all_workers(:TERM)
187
+ :break
188
+ end
189
+ end
190
+
191
+ # }}}
192
+ # start, join, and master sleep {{{
193
+
194
+ def start
195
+ procline("(starting)")
196
+ init_self_pipe!
197
+ init_sig_handlers!
198
+ maintain_worker_count
199
+ procline("(started)")
200
+ log "started manager"
201
+ report_worker_pool_pids
202
+ self
203
+ end
204
+
205
+ def report_worker_pool_pids
206
+ if workers.empty?
207
+ log "Pool is empty"
208
+ else
209
+ log "Pool contains worker PIDs: #{all_pids.inspect}"
210
+ end
211
+ end
212
+
213
+ def join
214
+ loop do
215
+ reap_all_workers
216
+ break if handle_sig_queue! == :break
217
+ if sig_queue.empty?
218
+ master_sleep
219
+ maintain_worker_count
220
+ end
221
+ procline("managing #{all_pids.inspect}")
222
+ end
223
+ procline("(shutting down)")
224
+ #stop # gracefully shutdown all workers on our way out
225
+ log "manager finished"
226
+ #unlink_pid_safe(pid) if pid
227
+ end
228
+
229
+ def master_sleep
230
+ begin
231
+ ready = IO.select([self_pipe.first], nil, nil, 1) or return
232
+ ready.first && ready.first.first or return
233
+ loop { self_pipe.first.read_nonblock(CHUNK_SIZE) }
234
+ rescue Errno::EAGAIN, Errno::EINTR
235
+ end
236
+ end
237
+
238
+ # }}}
239
+ # worker process management {{{
240
+
241
+ def reap_all_workers(waitpid_flags=Process::WNOHANG)
242
+ @waiting_for_reaper = waitpid_flags == 0
243
+ begin
244
+ loop do
245
+ # -1, wait for any child process
246
+ wpid, status = Process.waitpid2(-1, waitpid_flags)
247
+ break unless wpid
248
+
249
+ if worker = delete_worker(wpid)
250
+ log "Reaped resque worker[#{status.pid}] (status: #{status.exitstatus}) queues: #{worker.queues.join(",")}"
251
+ else
252
+ # this died before it could be killed, so it's not going to have any extra info
253
+ log "Tried to reap worker [#{status.pid}], but it had already died. (status: #{status.exitstatus})"
254
+ end
255
+ end
256
+ rescue Errno::ECHILD, QuitNowException
257
+ end
258
+ end
259
+
260
+ # TODO: close any file descriptors connected to worker, if any
261
+ def delete_worker(pid)
262
+ worker = nil
263
+ workers.detect do |queues, pid_to_worker|
264
+ worker = pid_to_worker.delete(pid)
265
+ end
266
+ worker
267
+ end
268
+
269
+ def all_pids
270
+ workers.map {|q,workers| workers.keys }.flatten
271
+ end
272
+
273
+ def signal_all_workers(signal)
274
+ all_pids.each do |pid|
275
+ Process.kill signal, pid
276
+ end
277
+ end
278
+
279
+ # }}}
280
+ # ???: maintain_worker_count, all_known_queues {{{
281
+
282
+ def maintain_worker_count
283
+ all_known_queues.each do |queues|
284
+ delta = worker_delta_for(queues)
285
+ spawn_missing_workers_for(queues) if delta > 0
286
+ quit_excess_workers_for(queues) if delta < 0
287
+ end
288
+ end
289
+
290
+ def all_known_queues
291
+ config.keys | workers.keys
292
+ end
293
+
294
+ # }}}
295
+ # methods that operate on a single grouping of queues {{{
296
+ # perhaps this means a class is waiting to be extracted
297
+
298
+ def spawn_missing_workers_for(queues)
299
+ worker_delta_for(queues).times do |nr|
300
+ spawn_worker!(queues)
301
+ end
302
+ end
303
+
304
+ def quit_excess_workers_for(queues)
305
+ delta = -worker_delta_for(queues)
306
+ pids_for(queues)[0...delta].each do |pid|
307
+ Process.kill("QUIT", pid)
308
+ end
309
+ end
310
+
311
+ def worker_delta_for(queues)
312
+ config.fetch(queues, 0) - workers.fetch(queues, []).size
313
+ end
314
+
315
+ def pids_for(queues)
316
+ workers[queues].keys
317
+ end
318
+
319
+ def spawn_worker!(queues)
320
+ worker = create_worker(queues)
321
+ pid = fork do
322
+ log_worker "Starting worker #{worker}"
323
+ call_after_prefork!
324
+ reset_sig_handlers!
325
+ #self_pipe.each {|io| io.close }
326
+ worker.work(ENV['INTERVAL'] || DEFAULT_WORKER_INTERVAL) # interval, will block
327
+ end
328
+ workers[queues][pid] = worker
329
+ end
330
+
331
+ def create_worker(queues)
332
+ queues = queues.to_s.split(',')
333
+ worker = ::Resque::Worker.new(*queues)
334
+ worker.verbose = ENV['LOGGING'] || ENV['VERBOSE']
335
+ worker.very_verbose = ENV['VVERBOSE']
336
+ worker
337
+ end
338
+
339
+ # }}}
340
+
341
+ end
342
+ end