nfo-resque-mongo 1.15.0
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/HISTORY.md +259 -0
- data/LICENSE +20 -0
- data/README.markdown +828 -0
- data/Rakefile +73 -0
- data/bin/resque +75 -0
- data/bin/resque-web +23 -0
- data/lib/resque/errors.rb +10 -0
- data/lib/resque/failure/base.rb +74 -0
- data/lib/resque/failure/hoptoad.rb +139 -0
- data/lib/resque/failure/mongo.rb +92 -0
- data/lib/resque/failure/multiple.rb +60 -0
- data/lib/resque/failure.rb +82 -0
- data/lib/resque/helpers.rb +79 -0
- data/lib/resque/job.rb +228 -0
- data/lib/resque/plugin.rb +51 -0
- data/lib/resque/queue_stats.rb +58 -0
- data/lib/resque/server/public/idle.png +0 -0
- data/lib/resque/server/public/jquery-1.3.2.min.js +19 -0
- data/lib/resque/server/public/jquery.relatize_date.js +95 -0
- data/lib/resque/server/public/poll.png +0 -0
- data/lib/resque/server/public/ranger.js +73 -0
- data/lib/resque/server/public/reset.css +48 -0
- data/lib/resque/server/public/style.css +86 -0
- data/lib/resque/server/public/working.png +0 -0
- data/lib/resque/server/test_helper.rb +19 -0
- data/lib/resque/server/views/error.erb +1 -0
- data/lib/resque/server/views/failed.erb +75 -0
- data/lib/resque/server/views/key_sets.erb +19 -0
- data/lib/resque/server/views/key_string.erb +11 -0
- data/lib/resque/server/views/layout.erb +38 -0
- data/lib/resque/server/views/next_more.erb +19 -0
- data/lib/resque/server/views/overview.erb +4 -0
- data/lib/resque/server/views/queues.erb +49 -0
- data/lib/resque/server/views/stats.erb +62 -0
- data/lib/resque/server/views/workers.erb +109 -0
- data/lib/resque/server/views/working.erb +68 -0
- data/lib/resque/server.rb +222 -0
- data/lib/resque/stat.rb +55 -0
- data/lib/resque/tasks.rb +42 -0
- data/lib/resque/version.rb +3 -0
- data/lib/resque/worker.rb +524 -0
- data/lib/resque.rb +384 -0
- data/tasks/redis.rake +161 -0
- data/tasks/resque.rake +2 -0
- data/test/dump.rdb +0 -0
- data/test/job_hooks_test.rb +323 -0
- data/test/job_plugins_test.rb +230 -0
- data/test/plugin_test.rb +116 -0
- data/test/queue_stats_test.rb +57 -0
- data/test/redis-test.conf +115 -0
- data/test/resque-web_test.rb +48 -0
- data/test/resque_test.rb +256 -0
- data/test/test_helper.rb +151 -0
- data/test/worker_test.rb +356 -0
- metadata +166 -0
@@ -0,0 +1,222 @@
|
|
1
|
+
require 'sinatra/base'
|
2
|
+
require 'erb'
|
3
|
+
require 'resque'
|
4
|
+
require 'resque/version'
|
5
|
+
|
6
|
+
module Resque
|
7
|
+
class Server < Sinatra::Base
|
8
|
+
dir = File.dirname(File.expand_path(__FILE__))
|
9
|
+
|
10
|
+
set :views, "#{dir}/server/views"
|
11
|
+
set :public, "#{dir}/server/public"
|
12
|
+
set :static, true
|
13
|
+
|
14
|
+
helpers do
|
15
|
+
include Rack::Utils
|
16
|
+
alias_method :h, :escape_html
|
17
|
+
|
18
|
+
def current_section
|
19
|
+
url_path request.path_info.sub('/','').split('/')[0].downcase
|
20
|
+
end
|
21
|
+
|
22
|
+
def current_page
|
23
|
+
url_path request.path_info.sub('/','')
|
24
|
+
end
|
25
|
+
|
26
|
+
def url_path(*path_parts)
|
27
|
+
[ path_prefix, path_parts ].join("/").squeeze('/')
|
28
|
+
end
|
29
|
+
alias_method :u, :url_path
|
30
|
+
|
31
|
+
def path_prefix
|
32
|
+
request.env['SCRIPT_NAME']
|
33
|
+
end
|
34
|
+
|
35
|
+
def class_if_current(path = '')
|
36
|
+
'class="current"' if current_page[0, path.size] == path
|
37
|
+
end
|
38
|
+
|
39
|
+
def tab(name)
|
40
|
+
dname = name.to_s.downcase
|
41
|
+
path = url_path(dname)
|
42
|
+
"<li #{class_if_current(path)}><a href='#{path}'>#{name}</a></li>"
|
43
|
+
end
|
44
|
+
|
45
|
+
def tabs
|
46
|
+
Resque::Server.tabs
|
47
|
+
end
|
48
|
+
|
49
|
+
def redis_get_size(key)
|
50
|
+
case Resque.redis.type(key)
|
51
|
+
when 'none'
|
52
|
+
[]
|
53
|
+
when 'list'
|
54
|
+
Resque.redis.llen(key)
|
55
|
+
when 'set'
|
56
|
+
Resque.redis.scard(key)
|
57
|
+
when 'string'
|
58
|
+
Resque.redis.get(key).length
|
59
|
+
when 'zset'
|
60
|
+
Resque.redis.zcard(key)
|
61
|
+
end
|
62
|
+
end
|
63
|
+
|
64
|
+
def redis_get_value_as_array(key, start=0)
|
65
|
+
case Resque.redis.type(key)
|
66
|
+
when 'none'
|
67
|
+
[]
|
68
|
+
when 'list'
|
69
|
+
Resque.redis.lrange(key, start, start + 20)
|
70
|
+
when 'set'
|
71
|
+
Resque.redis.smembers(key)[start..(start + 20)]
|
72
|
+
when 'string'
|
73
|
+
[Resque.redis.get(key)]
|
74
|
+
when 'zset'
|
75
|
+
Resque.redis.zrange(key, start, start + 20)
|
76
|
+
end
|
77
|
+
end
|
78
|
+
|
79
|
+
def show_args(args)
|
80
|
+
Array(args).map { |a| a.inspect }.join("\n")
|
81
|
+
end
|
82
|
+
|
83
|
+
def worker_hosts
|
84
|
+
@worker_hosts ||= worker_hosts!
|
85
|
+
end
|
86
|
+
|
87
|
+
def worker_hosts!
|
88
|
+
hosts = Hash.new { [] }
|
89
|
+
|
90
|
+
Resque.workers.each do |worker|
|
91
|
+
host, _ = worker.to_s.split(':')
|
92
|
+
hosts[host] += [worker.to_s]
|
93
|
+
end
|
94
|
+
|
95
|
+
hosts
|
96
|
+
end
|
97
|
+
|
98
|
+
def partial?
|
99
|
+
@partial
|
100
|
+
end
|
101
|
+
|
102
|
+
def partial(template, local_vars = {})
|
103
|
+
@partial = true
|
104
|
+
erb(template.to_sym, {:layout => false}, local_vars)
|
105
|
+
ensure
|
106
|
+
@partial = false
|
107
|
+
end
|
108
|
+
|
109
|
+
def poll
|
110
|
+
if @polling
|
111
|
+
text = "Last Updated: #{Time.now.strftime("%H:%M:%S")}"
|
112
|
+
else
|
113
|
+
text = "<a href='#{u(request.path_info)}.poll' rel='poll'>Live Poll</a>"
|
114
|
+
end
|
115
|
+
"<p class='poll'>#{text}</p>"
|
116
|
+
end
|
117
|
+
|
118
|
+
end
|
119
|
+
|
120
|
+
def show(page, layout = true)
|
121
|
+
begin
|
122
|
+
erb page.to_sym, {:layout => layout}, :resque => Resque
|
123
|
+
rescue Errno::ECONNREFUSED
|
124
|
+
erb :error, {:layout => false}, :error => "Can't connect to Mongo!"
|
125
|
+
end
|
126
|
+
end
|
127
|
+
|
128
|
+
# to make things easier on ourselves
|
129
|
+
get "/?" do
|
130
|
+
redirect url_path(:overview)
|
131
|
+
end
|
132
|
+
|
133
|
+
%w( overview queues working workers key ).each do |page|
|
134
|
+
get "/#{page}" do
|
135
|
+
show page
|
136
|
+
end
|
137
|
+
|
138
|
+
get "/#{page}/:id" do
|
139
|
+
show page
|
140
|
+
end
|
141
|
+
end
|
142
|
+
|
143
|
+
post "/queues/:id/remove" do
|
144
|
+
Resque.remove_queue(params[:id])
|
145
|
+
redirect u('queues')
|
146
|
+
end
|
147
|
+
|
148
|
+
%w( overview workers ).each do |page|
|
149
|
+
get "/#{page}.poll" do
|
150
|
+
content_type "text/html"
|
151
|
+
@polling = true
|
152
|
+
show(page.to_sym, false).gsub(/\s{1,}/, ' ')
|
153
|
+
end
|
154
|
+
end
|
155
|
+
|
156
|
+
get "/failed" do
|
157
|
+
if Resque::Failure.url
|
158
|
+
redirect Resque::Failure.url
|
159
|
+
else
|
160
|
+
show :failed
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
post "/failed/clear" do
|
165
|
+
Resque::Failure.clear
|
166
|
+
redirect u('failed')
|
167
|
+
end
|
168
|
+
|
169
|
+
get "/failed/requeue/:index" do
|
170
|
+
Resque::Failure.requeue(params[:index])
|
171
|
+
if request.xhr?
|
172
|
+
return Resque::Failure.all(params[:index])['retried_at']
|
173
|
+
else
|
174
|
+
redirect u('failed')
|
175
|
+
end
|
176
|
+
end
|
177
|
+
|
178
|
+
get "/failed/remove/:index" do
|
179
|
+
Resque::Failure.remove(params[:index])
|
180
|
+
redirect u('failed')
|
181
|
+
end
|
182
|
+
|
183
|
+
get "/stats" do
|
184
|
+
redirect url_path("/stats/resque")
|
185
|
+
end
|
186
|
+
|
187
|
+
get "/stats/:id" do
|
188
|
+
show :stats
|
189
|
+
end
|
190
|
+
|
191
|
+
get "/stats/keys/:key" do
|
192
|
+
show :stats
|
193
|
+
end
|
194
|
+
|
195
|
+
get "/stats.txt" do
|
196
|
+
info = Resque.info
|
197
|
+
|
198
|
+
stats = []
|
199
|
+
stats << "resque.servers=#{info[:servers].inspect}"
|
200
|
+
stats << "resque.pending=#{info[:pending]}"
|
201
|
+
stats << "resque.processed+=#{info[:processed]}"
|
202
|
+
stats << "resque.failed+=#{info[:failed]}"
|
203
|
+
stats << "resque.workers=#{info[:workers]}"
|
204
|
+
stats << "resque.working=#{info[:working]}"
|
205
|
+
|
206
|
+
Resque.queues.each do |queue|
|
207
|
+
stats << "queues.#{queue}=#{Resque.size(queue)}"
|
208
|
+
end
|
209
|
+
|
210
|
+
content_type 'text/html'
|
211
|
+
stats.join "\n"
|
212
|
+
end
|
213
|
+
|
214
|
+
def resque
|
215
|
+
Resque
|
216
|
+
end
|
217
|
+
|
218
|
+
def self.tabs
|
219
|
+
@tabs ||= ["Overview", "Working", "Failed", "Queues", "Workers", "Stats"]
|
220
|
+
end
|
221
|
+
end
|
222
|
+
end
|
data/lib/resque/stat.rb
ADDED
@@ -0,0 +1,55 @@
|
|
1
|
+
module Resque
|
2
|
+
# The stat subsystem. Used to keep track of integer counts.
|
3
|
+
#
|
4
|
+
# Get a stat: Stat[name]
|
5
|
+
# Incr a stat: Stat.incr(name)
|
6
|
+
# Decr a stat: Stat.decr(name)
|
7
|
+
# Kill a stat: Stat.clear(name)
|
8
|
+
module Stat
|
9
|
+
extend self
|
10
|
+
extend Helpers
|
11
|
+
|
12
|
+
# Returns the int value of a stat, given a string stat name.
|
13
|
+
def get(stat)
|
14
|
+
res = mongo_stats.find_one(:stat => stat)
|
15
|
+
return 0 unless res
|
16
|
+
res['value'].to_i
|
17
|
+
end
|
18
|
+
|
19
|
+
# Alias of `get`
|
20
|
+
def [](stat)
|
21
|
+
get(stat)
|
22
|
+
end
|
23
|
+
|
24
|
+
# For a string stat name, increments the stat by one.
|
25
|
+
#
|
26
|
+
# Can optionally accept a second int parameter. The stat is then
|
27
|
+
# incremented by that amount.
|
28
|
+
def incr(stat, by = 1)
|
29
|
+
mongo_stats.update({:stat => stat}, {'$inc' => {:value => by}}, :upsert => true)
|
30
|
+
end
|
31
|
+
|
32
|
+
# Increments a stat by one.
|
33
|
+
def <<(stat)
|
34
|
+
incr stat
|
35
|
+
end
|
36
|
+
|
37
|
+
# For a string stat name, decrements the stat by one.
|
38
|
+
#
|
39
|
+
# Can optionally accept a second int parameter. The stat is then
|
40
|
+
# decremented by that amount.
|
41
|
+
def decr(stat, by = 1)
|
42
|
+
mongo_stats.update({:stat => stat}, {'$inc' => {:value => -by}})
|
43
|
+
end
|
44
|
+
|
45
|
+
# Decrements a stat by one.
|
46
|
+
def >>(stat)
|
47
|
+
decr stat
|
48
|
+
end
|
49
|
+
|
50
|
+
# Removes a stat from Redis, effectively setting it to 0.
|
51
|
+
def clear(stat)
|
52
|
+
mongo_stats.remove(:stat => stat)
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
data/lib/resque/tasks.rb
ADDED
@@ -0,0 +1,42 @@
|
|
1
|
+
# require 'resque/tasks'
|
2
|
+
# will give you the resque tasks
|
3
|
+
|
4
|
+
namespace :resque do
|
5
|
+
task :setup
|
6
|
+
|
7
|
+
desc "Start a Resque worker"
|
8
|
+
task :work => :setup do
|
9
|
+
require 'resque'
|
10
|
+
|
11
|
+
queues = (ENV['QUEUES'] || ENV['QUEUE']).to_s.split(',')
|
12
|
+
|
13
|
+
begin
|
14
|
+
worker = Resque::Worker.new(*queues)
|
15
|
+
worker.verbose = ENV['LOGGING'] || ENV['VERBOSE']
|
16
|
+
worker.very_verbose = ENV['VVERBOSE']
|
17
|
+
rescue Resque::NoQueueError
|
18
|
+
abort "set QUEUE env var, e.g. $ QUEUE=critical,high rake resque:work"
|
19
|
+
end
|
20
|
+
|
21
|
+
if ENV['PIDFILE']
|
22
|
+
File.open(ENV['PIDFILE'], 'w') { |f| f << worker.pid }
|
23
|
+
end
|
24
|
+
|
25
|
+
worker.log "Starting worker #{worker}"
|
26
|
+
|
27
|
+
worker.work(ENV['INTERVAL'] || 5) # interval, will block
|
28
|
+
end
|
29
|
+
|
30
|
+
desc "Start multiple Resque workers. Should only be used in dev mode."
|
31
|
+
task :workers do
|
32
|
+
threads = []
|
33
|
+
|
34
|
+
ENV['COUNT'].to_i.times do
|
35
|
+
threads << Thread.new do
|
36
|
+
system "rake resque:work"
|
37
|
+
end
|
38
|
+
end
|
39
|
+
|
40
|
+
threads.each { |thread| thread.join }
|
41
|
+
end
|
42
|
+
end
|
@@ -0,0 +1,524 @@
|
|
1
|
+
module Resque
|
2
|
+
# A Resque Worker processes jobs. On platforms that support fork(2),
|
3
|
+
# the worker will fork off a child to process each job. This ensures
|
4
|
+
# a clean slate when beginning the next job and cuts down on gradual
|
5
|
+
# memory growth as well as low level failures.
|
6
|
+
#
|
7
|
+
# It also ensures workers are always listening to signals from you,
|
8
|
+
# their master, and can react accordingly.
|
9
|
+
class Worker
|
10
|
+
include Resque::Helpers
|
11
|
+
extend Resque::Helpers
|
12
|
+
|
13
|
+
# Whether the worker should log basic info to STDOUT
|
14
|
+
attr_accessor :verbose
|
15
|
+
|
16
|
+
# Whether the worker should log lots of info to STDOUT
|
17
|
+
attr_accessor :very_verbose
|
18
|
+
|
19
|
+
# Boolean indicating whether this worker can or can not fork.
|
20
|
+
# Automatically set if a fork(2) fails.
|
21
|
+
attr_accessor :cant_fork
|
22
|
+
|
23
|
+
attr_writer :to_s
|
24
|
+
|
25
|
+
attr_accessor :job
|
26
|
+
|
27
|
+
# Returns an array of all worker objects.
|
28
|
+
def self.all
|
29
|
+
mongo_workers.distinct(:worker).map { |w| queues = w.split(','); worker = new(*queues); worker.to_s = w; worker }.compact
|
30
|
+
end
|
31
|
+
|
32
|
+
# Returns an array of all worker objects currently processing
|
33
|
+
# jobs.
|
34
|
+
def self.working
|
35
|
+
select = {'working_on' => { '$exists' => true }}
|
36
|
+
# select['working_on'] = {"$exists" => true}
|
37
|
+
working = mongo_workers.find(select).to_a
|
38
|
+
# working.map! {|w| w['worker'] }
|
39
|
+
working.map do |w|
|
40
|
+
queues = w['worker'].split(',')
|
41
|
+
worker = new(*queues)
|
42
|
+
worker.to_s = w['worker']
|
43
|
+
worker.job = w['working_on'] || {}
|
44
|
+
worker
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
# Returns a single worker object. Accepts a string id.
|
49
|
+
def self.find(worker_id)
|
50
|
+
w = mongo_workers.find_one(:worker => worker_id)
|
51
|
+
return nil unless w
|
52
|
+
queues = w['worker'].split(',')
|
53
|
+
worker = new(*queues)
|
54
|
+
worker.job = w['working_on'] || {} ## avoid a new call to mongo just to retrieve what's this worker is doing
|
55
|
+
worker.to_s = worker_id
|
56
|
+
worker
|
57
|
+
end
|
58
|
+
|
59
|
+
# Alias of `find`
|
60
|
+
def self.attach(worker_id)
|
61
|
+
find(worker_id)
|
62
|
+
end
|
63
|
+
|
64
|
+
# # Given a string worker id, return a boolean indicating whether the
|
65
|
+
# # worker exists
|
66
|
+
def self.exists?(worker_id)
|
67
|
+
not mongo_workers.find_one(:worker => worker_id.to_s).nil?
|
68
|
+
end
|
69
|
+
|
70
|
+
# Workers should be initialized with an array of string queue
|
71
|
+
# names. The order is important: a Worker will check the first
|
72
|
+
# queue given for a job. If none is found, it will check the
|
73
|
+
# second queue name given. If a job is found, it will be
|
74
|
+
# processed. Upon completion, the Worker will again check the
|
75
|
+
# first queue given, and so forth. In this way the queue list
|
76
|
+
# passed to a Worker on startup defines the priorities of queues.
|
77
|
+
#
|
78
|
+
# If passed a single "*", this Worker will operate on all queues
|
79
|
+
# in alphabetical order. Queues can be dynamically added or
|
80
|
+
# removed without needing to restart workers using this method.
|
81
|
+
def initialize(*queues)
|
82
|
+
@queues = queues.map { |queue| queue.to_s.strip }
|
83
|
+
validate_queues
|
84
|
+
end
|
85
|
+
|
86
|
+
# A worker must be given a queue, otherwise it won't know what to
|
87
|
+
# do with itself.
|
88
|
+
#
|
89
|
+
# You probably never need to call this.
|
90
|
+
def validate_queues
|
91
|
+
if @queues.nil? || @queues.empty?
|
92
|
+
raise NoQueueError.new("Please give each worker at least one queue.")
|
93
|
+
end
|
94
|
+
end
|
95
|
+
|
96
|
+
# This is the main workhorse method. Called on a Worker instance,
|
97
|
+
# it begins the worker life cycle.
|
98
|
+
#
|
99
|
+
# The following events occur during a worker's life cycle:
|
100
|
+
#
|
101
|
+
# 1. Startup: Signals are registered, dead workers are pruned,
|
102
|
+
# and this worker is registered.
|
103
|
+
# 2. Work loop: Jobs are pulled from a queue and processed.
|
104
|
+
# 3. Teardown: This worker is unregistered.
|
105
|
+
#
|
106
|
+
# Can be passed a float representing the polling frequency.
|
107
|
+
# The default is 5 seconds, but for a semi-active site you may
|
108
|
+
# want to use a smaller value.
|
109
|
+
#
|
110
|
+
# Also accepts a block which will be passed the job as soon as it
|
111
|
+
# has completed processing. Useful for testing.
|
112
|
+
def work(interval = 5.0, &block)
|
113
|
+
interval = Float(interval)
|
114
|
+
$0 = "resque: Starting"
|
115
|
+
job_count = 0
|
116
|
+
startup
|
117
|
+
loop do
|
118
|
+
break if shutdown?
|
119
|
+
|
120
|
+
if not @paused and job = reserve
|
121
|
+
log "got: #{job.inspect}"
|
122
|
+
run_hook :before_fork, job
|
123
|
+
working_on job
|
124
|
+
|
125
|
+
if @child = fork
|
126
|
+
rand # Reseeding
|
127
|
+
procline "Forked #{@child} at #{Time.now.to_s}"
|
128
|
+
Process.wait
|
129
|
+
else
|
130
|
+
procline "Processing #{job.queue} since #{Time.now.to_s} (#{job_count} so far)"
|
131
|
+
perform(job, &block)
|
132
|
+
job_count += 1
|
133
|
+
exit! unless @cant_fork
|
134
|
+
end
|
135
|
+
|
136
|
+
done_working
|
137
|
+
@child = nil
|
138
|
+
else
|
139
|
+
break if interval.zero?
|
140
|
+
log! "Sleeping for #{interval} seconds"
|
141
|
+
procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}"
|
142
|
+
sleep interval
|
143
|
+
end
|
144
|
+
end
|
145
|
+
|
146
|
+
ensure
|
147
|
+
unregister_worker
|
148
|
+
end
|
149
|
+
|
150
|
+
# DEPRECATED. Processes a single job. If none is given, it will
|
151
|
+
# try to produce one. Usually run in the child.
|
152
|
+
def process(j = nil, &block)
|
153
|
+
return unless j ||= reserve
|
154
|
+
working_on j
|
155
|
+
perform(j, &block)
|
156
|
+
ensure
|
157
|
+
done_working
|
158
|
+
end
|
159
|
+
|
160
|
+
# Processes a given job in the child.
|
161
|
+
def perform(job)
|
162
|
+
begin
|
163
|
+
run_hook :after_fork, job
|
164
|
+
job.perform
|
165
|
+
rescue Object => e
|
166
|
+
log "#{job.inspect} failed: #{e.inspect}"
|
167
|
+
begin
|
168
|
+
job.fail(e)
|
169
|
+
rescue Object => e
|
170
|
+
log "Received exception when reporting failure: #{e.inspect}"
|
171
|
+
end
|
172
|
+
failed!
|
173
|
+
else
|
174
|
+
log "done: #{job.inspect}"
|
175
|
+
ensure
|
176
|
+
yield job if block_given?
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
# Attempts to grab a job off one of the provided queues. Returns
|
181
|
+
# nil if no job can be found.
|
182
|
+
def reserve
|
183
|
+
queues.each do |queue|
|
184
|
+
log! "Checking #{queue}"
|
185
|
+
if j = Resque::Job.reserve(queue)
|
186
|
+
log! "Found job on #{queue}"
|
187
|
+
return j
|
188
|
+
end
|
189
|
+
end
|
190
|
+
|
191
|
+
nil
|
192
|
+
rescue Exception => e
|
193
|
+
log "Error reserving job: #{e.inspect}"
|
194
|
+
log e.backtrace.join("\n")
|
195
|
+
raise e
|
196
|
+
end
|
197
|
+
|
198
|
+
# Returns a list of queues to use when searching for a job.
|
199
|
+
# A splat ("*") means you want every queue (in alpha order) - this
|
200
|
+
# can be useful for dynamically adding new queues.
|
201
|
+
def queues
|
202
|
+
@queues[0] == "*" ? Resque.queues.sort : Resque.queues(@queues).sort
|
203
|
+
end
|
204
|
+
|
205
|
+
# Not every platform supports fork. Here we do our magic to
|
206
|
+
# determine if yours does.
|
207
|
+
def fork
|
208
|
+
@cant_fork = true if $TESTING
|
209
|
+
|
210
|
+
return if @cant_fork
|
211
|
+
|
212
|
+
begin
|
213
|
+
# IronRuby doesn't support `Kernel.fork` yet
|
214
|
+
if Kernel.respond_to?(:fork)
|
215
|
+
Kernel.fork
|
216
|
+
else
|
217
|
+
raise NotImplementedError
|
218
|
+
end
|
219
|
+
rescue NotImplementedError
|
220
|
+
@cant_fork = true
|
221
|
+
nil
|
222
|
+
end
|
223
|
+
end
|
224
|
+
|
225
|
+
# Runs all the methods needed when a worker begins its lifecycle.
|
226
|
+
def startup
|
227
|
+
enable_gc_optimizations
|
228
|
+
register_signal_handlers
|
229
|
+
prune_dead_workers
|
230
|
+
run_hook :before_first_fork
|
231
|
+
register_worker
|
232
|
+
|
233
|
+
# Fix buffering so we can `rake resque:work > resque.log` and
|
234
|
+
# get output from the child in there.
|
235
|
+
$stdout.sync = true
|
236
|
+
end
|
237
|
+
|
238
|
+
# Enables GC Optimizations if you're running REE.
|
239
|
+
# http://www.rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
|
240
|
+
def enable_gc_optimizations
|
241
|
+
if GC.respond_to?(:copy_on_write_friendly=)
|
242
|
+
GC.copy_on_write_friendly = true
|
243
|
+
end
|
244
|
+
end
|
245
|
+
|
246
|
+
# Registers the various signal handlers a worker responds to.
|
247
|
+
#
|
248
|
+
# TERM: Shutdown immediately, stop processing jobs.
|
249
|
+
# INT: Shutdown immediately, stop processing jobs.
|
250
|
+
# QUIT: Shutdown after the current job has finished processing.
|
251
|
+
# USR1: Kill the forked child immediately, continue processing jobs.
|
252
|
+
# USR2: Don't process any new jobs
|
253
|
+
# CONT: Start processing jobs again after a USR2
|
254
|
+
def register_signal_handlers
|
255
|
+
trap('TERM') { shutdown! }
|
256
|
+
trap('INT') { shutdown! }
|
257
|
+
|
258
|
+
begin
|
259
|
+
trap('QUIT') { shutdown }
|
260
|
+
trap('USR1') { kill_child }
|
261
|
+
trap('USR2') { pause_processing }
|
262
|
+
trap('CONT') { unpause_processing }
|
263
|
+
rescue ArgumentError
|
264
|
+
warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
|
265
|
+
end
|
266
|
+
|
267
|
+
log! "Registered signals"
|
268
|
+
end
|
269
|
+
|
270
|
+
# Schedule this worker for shutdown. Will finish processing the
|
271
|
+
# current job.
|
272
|
+
def shutdown
|
273
|
+
log 'Exiting...'
|
274
|
+
@shutdown = true
|
275
|
+
end
|
276
|
+
|
277
|
+
# Kill the child and shutdown immediately.
|
278
|
+
def shutdown!
|
279
|
+
shutdown
|
280
|
+
kill_child
|
281
|
+
end
|
282
|
+
|
283
|
+
# Should this worker shutdown as soon as current job is finished?
|
284
|
+
def shutdown?
|
285
|
+
@shutdown
|
286
|
+
end
|
287
|
+
|
288
|
+
# Kills the forked child immediately, without remorse. The job it
|
289
|
+
# is processing will not be completed.
|
290
|
+
def kill_child
|
291
|
+
if @child
|
292
|
+
log! "Killing child at #{@child}"
|
293
|
+
if system("ps -o pid,state -p #{@child}")
|
294
|
+
Process.kill("KILL", @child) rescue nil
|
295
|
+
else
|
296
|
+
log! "Child #{@child} not found, restarting."
|
297
|
+
shutdown
|
298
|
+
end
|
299
|
+
end
|
300
|
+
end
|
301
|
+
|
302
|
+
# Stop processing jobs after the current one has completed (if we're
|
303
|
+
# currently running one).
|
304
|
+
def pause_processing
|
305
|
+
log "USR2 received; pausing job processing"
|
306
|
+
@paused = true
|
307
|
+
end
|
308
|
+
|
309
|
+
# Start processing jobs again after a pause
|
310
|
+
def unpause_processing
|
311
|
+
log "CONT received; resuming job processing"
|
312
|
+
@paused = false
|
313
|
+
end
|
314
|
+
|
315
|
+
# Looks for any workers which should be running on this server
|
316
|
+
# and, if they're not, removes them from Redis.
|
317
|
+
#
|
318
|
+
# This is a form of garbage collection. If a server is killed by a
|
319
|
+
# hard shutdown, power failure, or something else beyond our
|
320
|
+
# control, the Resque workers will not die gracefully and therefore
|
321
|
+
# will leave stale state information in Redis.
|
322
|
+
#
|
323
|
+
# By checking the current Redis state against the actual
|
324
|
+
# environment, we can determine if Redis is old and clean it up a bit.
|
325
|
+
def prune_dead_workers
|
326
|
+
all_workers = Worker.all
|
327
|
+
known_workers = worker_pids unless all_workers.empty?
|
328
|
+
all_workers.each do |worker|
|
329
|
+
host, pid, queues = worker.to_s.split(':')
|
330
|
+
next unless host == hostname
|
331
|
+
next if known_workers.include?(pid)
|
332
|
+
log! "Pruning dead worker: #{worker}"
|
333
|
+
worker.unregister_worker
|
334
|
+
end
|
335
|
+
end
|
336
|
+
|
337
|
+
# Registers ourself as a worker. Useful when entering the worker
|
338
|
+
# lifecycle on startup.
|
339
|
+
def register_worker
|
340
|
+
mongo_workers.insert(:worker => self.to_s)
|
341
|
+
started!
|
342
|
+
end
|
343
|
+
|
344
|
+
# Runs a named hook, passing along any arguments.
|
345
|
+
def run_hook(name, *args)
|
346
|
+
return unless hook = Resque.send(name)
|
347
|
+
msg = "Running #{name} hook"
|
348
|
+
msg << " with #{args.inspect}" if args.any?
|
349
|
+
log msg
|
350
|
+
|
351
|
+
args.any? ? hook.call(*args) : hook.call
|
352
|
+
end
|
353
|
+
|
354
|
+
# Unregisters ourself as a worker. Useful when shutting down.
|
355
|
+
def unregister_worker
|
356
|
+
# If we're still processing a job, make sure it gets logged as a
|
357
|
+
# failure.
|
358
|
+
if (hash = processing) && !hash.empty?
|
359
|
+
j = Job.new(hash[:queue], hash[:payload])
|
360
|
+
# Ensure the proper worker is attached to this job, even if
|
361
|
+
# it's not the precise instance that died.
|
362
|
+
j.worker = self
|
363
|
+
j.fail(DirtyExit.new)
|
364
|
+
end
|
365
|
+
|
366
|
+
mongo_workers.remove(:worker => self.to_s)
|
367
|
+
|
368
|
+
Stat.clear("processed:#{self}")
|
369
|
+
Stat.clear("failed:#{self}")
|
370
|
+
end
|
371
|
+
|
372
|
+
def check_payload(payload)
|
373
|
+
case payload.class.to_s
|
374
|
+
when 'Class'
|
375
|
+
payload.to_s
|
376
|
+
when 'Array'
|
377
|
+
payload.map { |e| check_payload(e) }
|
378
|
+
when 'Hash'
|
379
|
+
result = {}
|
380
|
+
payload.each { |k,v| result[k] = check_payload(v) }
|
381
|
+
result
|
382
|
+
else
|
383
|
+
return payload
|
384
|
+
end
|
385
|
+
end
|
386
|
+
|
387
|
+
# Given a job, tells Redis we're working on it. Useful for seeing
|
388
|
+
# what workers are doing and when.
|
389
|
+
def working_on(j)
|
390
|
+
j.worker = self
|
391
|
+
data = {
|
392
|
+
'queue' => j.queue,
|
393
|
+
'run_at' => Time.now.to_s,
|
394
|
+
'payload' => check_payload(j.payload)
|
395
|
+
}
|
396
|
+
@job = data
|
397
|
+
working_on = {'working_on' => data}
|
398
|
+
mongo_workers.update({:worker => self.to_s}, {'$set' => working_on}, :upsert => true )
|
399
|
+
end
|
400
|
+
|
401
|
+
# Called when we are done working - clears our `working_on` state
|
402
|
+
# and tells Redis we processed a job.
|
403
|
+
def done_working
|
404
|
+
@job = {}
|
405
|
+
working_on = {'working_on' => 1}
|
406
|
+
mongo_workers.update({:worker => self.to_s}, {'$unset' => working_on})
|
407
|
+
processed!
|
408
|
+
end
|
409
|
+
|
410
|
+
# How many jobs has this worker processed? Returns an int.
|
411
|
+
def processed
|
412
|
+
Stat["processed:#{self}"]
|
413
|
+
end
|
414
|
+
|
415
|
+
# Tell Redis we've processed a job.
|
416
|
+
def processed!
|
417
|
+
Stat << "processed"
|
418
|
+
Stat << "processed:#{self}"
|
419
|
+
end
|
420
|
+
|
421
|
+
# How many failed jobs has this worker seen? Returns an int.
|
422
|
+
def failed
|
423
|
+
Stat["failed:#{self}"]
|
424
|
+
end
|
425
|
+
|
426
|
+
# Tells Redis we've failed a job.
|
427
|
+
def failed!
|
428
|
+
Stat << "failed"
|
429
|
+
Stat << "failed:#{self}"
|
430
|
+
end
|
431
|
+
|
432
|
+
# What time did this worker start? Returns an instance of `Time`
|
433
|
+
def started
|
434
|
+
worker = mongo_workers.find_one(:worker => self.to_s)
|
435
|
+
return nil if !worker
|
436
|
+
worker['started']
|
437
|
+
end
|
438
|
+
|
439
|
+
# Tell Redis we've started
|
440
|
+
def started!
|
441
|
+
started = {'started' => Time.now }
|
442
|
+
mongo_workers.update({:worker => self.to_s}, {'$set' => started})
|
443
|
+
end
|
444
|
+
|
445
|
+
# Returns a hash explaining the Job we're currently processing, if any.
|
446
|
+
def processing
|
447
|
+
job || {}
|
448
|
+
end
|
449
|
+
|
450
|
+
# Boolean - true if working, false if not
|
451
|
+
def working?
|
452
|
+
state == :working
|
453
|
+
end
|
454
|
+
|
455
|
+
# Boolean - true if idle, false if not
|
456
|
+
def idle?
|
457
|
+
state == :idle
|
458
|
+
end
|
459
|
+
|
460
|
+
# Returns a symbol representing the current worker state,
|
461
|
+
# which can be either :working or :idle
|
462
|
+
def state
|
463
|
+
worker = mongo_workers.find_one(:worker => self.to_s)
|
464
|
+
worker ? :working : :idle
|
465
|
+
end
|
466
|
+
|
467
|
+
# Is this worker the same as another worker?
|
468
|
+
def ==(other)
|
469
|
+
to_s == other.to_s
|
470
|
+
end
|
471
|
+
|
472
|
+
def inspect
|
473
|
+
"#<Worker #{to_s}>"
|
474
|
+
end
|
475
|
+
|
476
|
+
# The string representation is the same as the id for this worker
|
477
|
+
# instance. Can be used with `Worker.find`.
|
478
|
+
def to_s
|
479
|
+
@to_s ||= "#{hostname}:#{Process.pid}:#{@queues.join(',')}"
|
480
|
+
end
|
481
|
+
alias_method :worker_id, :to_s
|
482
|
+
|
483
|
+
# chomp'd hostname of this machine
|
484
|
+
def hostname
|
485
|
+
@hostname ||= `hostname`.chomp
|
486
|
+
end
|
487
|
+
|
488
|
+
# Returns Integer PID of running worker
|
489
|
+
def pid
|
490
|
+
@pid ||= to_s.split(":")[1].to_i
|
491
|
+
end
|
492
|
+
|
493
|
+
# Returns an array of string pids of all the other workers on this
|
494
|
+
# machine. Useful when pruning dead workers on startup.
|
495
|
+
def worker_pids
|
496
|
+
`ps -A -o pid,command | grep [r]esque | grep -v "resque-web"`.split("\n").map do |line|
|
497
|
+
line.split(' ')[0]
|
498
|
+
end
|
499
|
+
end
|
500
|
+
|
501
|
+
# Given a string, sets the procline ($0) and logs.
|
502
|
+
# Procline is always in the format of:
|
503
|
+
# resque-VERSION: STRING
|
504
|
+
def procline(string)
|
505
|
+
$0 = "resque-#{Resque::Version}: #{string}"
|
506
|
+
log! $0
|
507
|
+
end
|
508
|
+
|
509
|
+
# Log a message to STDOUT if we are verbose or very_verbose.
|
510
|
+
def log(message)
|
511
|
+
if verbose
|
512
|
+
puts "*** #{message}"
|
513
|
+
elsif very_verbose
|
514
|
+
time = Time.now.strftime('%H:%M:%S %Y-%m-%d')
|
515
|
+
puts "** [#{time}] #$$: #{message}"
|
516
|
+
end
|
517
|
+
end
|
518
|
+
|
519
|
+
# Logs a very verbose message to STDOUT.
|
520
|
+
def log!(message)
|
521
|
+
log message if very_verbose
|
522
|
+
end
|
523
|
+
end
|
524
|
+
end
|