resque-cedar 1.20.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (64) hide show
  1. data/HISTORY.md +354 -0
  2. data/LICENSE +20 -0
  3. data/README.markdown +908 -0
  4. data/Rakefile +70 -0
  5. data/bin/resque +81 -0
  6. data/bin/resque-web +27 -0
  7. data/lib/resque.rb +385 -0
  8. data/lib/resque/coder.rb +27 -0
  9. data/lib/resque/errors.rb +10 -0
  10. data/lib/resque/failure.rb +96 -0
  11. data/lib/resque/failure/airbrake.rb +17 -0
  12. data/lib/resque/failure/base.rb +64 -0
  13. data/lib/resque/failure/hoptoad.rb +33 -0
  14. data/lib/resque/failure/multiple.rb +54 -0
  15. data/lib/resque/failure/redis.rb +51 -0
  16. data/lib/resque/failure/thoughtbot.rb +33 -0
  17. data/lib/resque/helpers.rb +64 -0
  18. data/lib/resque/job.rb +223 -0
  19. data/lib/resque/multi_json_coder.rb +37 -0
  20. data/lib/resque/multi_queue.rb +73 -0
  21. data/lib/resque/plugin.rb +66 -0
  22. data/lib/resque/queue.rb +117 -0
  23. data/lib/resque/server.rb +248 -0
  24. data/lib/resque/server/public/favicon.ico +0 -0
  25. data/lib/resque/server/public/idle.png +0 -0
  26. data/lib/resque/server/public/jquery-1.3.2.min.js +19 -0
  27. data/lib/resque/server/public/jquery.relatize_date.js +95 -0
  28. data/lib/resque/server/public/poll.png +0 -0
  29. data/lib/resque/server/public/ranger.js +73 -0
  30. data/lib/resque/server/public/reset.css +44 -0
  31. data/lib/resque/server/public/style.css +86 -0
  32. data/lib/resque/server/public/working.png +0 -0
  33. data/lib/resque/server/test_helper.rb +19 -0
  34. data/lib/resque/server/views/error.erb +1 -0
  35. data/lib/resque/server/views/failed.erb +67 -0
  36. data/lib/resque/server/views/key_sets.erb +19 -0
  37. data/lib/resque/server/views/key_string.erb +11 -0
  38. data/lib/resque/server/views/layout.erb +44 -0
  39. data/lib/resque/server/views/next_more.erb +10 -0
  40. data/lib/resque/server/views/overview.erb +4 -0
  41. data/lib/resque/server/views/queues.erb +49 -0
  42. data/lib/resque/server/views/stats.erb +62 -0
  43. data/lib/resque/server/views/workers.erb +109 -0
  44. data/lib/resque/server/views/working.erb +72 -0
  45. data/lib/resque/stat.rb +53 -0
  46. data/lib/resque/tasks.rb +61 -0
  47. data/lib/resque/version.rb +3 -0
  48. data/lib/resque/worker.rb +557 -0
  49. data/lib/tasks/redis.rake +161 -0
  50. data/lib/tasks/resque.rake +2 -0
  51. data/test/airbrake_test.rb +26 -0
  52. data/test/hoptoad_test.rb +26 -0
  53. data/test/job_hooks_test.rb +423 -0
  54. data/test/job_plugins_test.rb +230 -0
  55. data/test/multi_queue_test.rb +95 -0
  56. data/test/plugin_test.rb +116 -0
  57. data/test/redis-test-cluster.conf +115 -0
  58. data/test/redis-test.conf +115 -0
  59. data/test/redis_queue_test.rb +133 -0
  60. data/test/resque-web_test.rb +59 -0
  61. data/test/resque_test.rb +284 -0
  62. data/test/test_helper.rb +135 -0
  63. data/test/worker_test.rb +443 -0
  64. metadata +188 -0
@@ -0,0 +1,72 @@
1
+ <% if params[:id] && (worker = Resque::Worker.find(params[:id])) && worker.job %>
2
+ <h1><%= worker %>'s job</h1>
3
+
4
+ <table>
5
+ <tr>
6
+ <th>&nbsp;</th>
7
+ <th>Where</th>
8
+ <th>Queue</th>
9
+ <th>Started</th>
10
+ <th>Class</th>
11
+ <th>Args</th>
12
+ </tr>
13
+ <tr>
14
+ <td><img src="<%=u 'working.png' %>" alt="working" title="working"></td>
15
+ <% host, pid, _ = worker.to_s.split(':') %>
16
+ <td><a href="<%=u "/workers/#{worker}" %>"><%= host %>:<%= pid %></a></td>
17
+ <% data = worker.job %>
18
+ <% queue = data['queue'] %>
19
+ <td><a class="queue" href="<%=u "/queues/#{queue}" %>"><%= queue %></a></td>
20
+ <td><span class="time"><%= data['run_at'] %></span></td>
21
+ <td>
22
+ <code><%= data['payload']['class'] %></code>
23
+ </td>
24
+ <td><%=h data['payload']['args'].inspect %></td>
25
+ </tr>
26
+ </table>
27
+
28
+ <% else %>
29
+
30
+ <%
31
+ workers = resque.working
32
+ jobs = workers.collect {|w| w.job }
33
+ worker_jobs = workers.zip(jobs)
34
+ worker_jobs = worker_jobs.reject { |w, j| w.idle? }
35
+ %>
36
+
37
+ <h1 class='wi'><%= worker_jobs.size %> of <%= resque.workers.size %> Workers Working</h1>
38
+ <p class='intro'>The list below contains all workers which are currently running a job.</p>
39
+ <table class='workers'>
40
+ <tr>
41
+ <th>&nbsp;</th>
42
+ <th>Where</th>
43
+ <th>Queue</th>
44
+ <th>Processing</th>
45
+ </tr>
46
+ <% if worker_jobs.empty? %>
47
+ <tr>
48
+ <td colspan="4" class='no-data'>Nothing is happening right now...</td>
49
+ </tr>
50
+ <% end %>
51
+
52
+ <% worker_jobs.sort_by {|w, j| j['run_at'] ? j['run_at'] : '' }.each do |worker, job| %>
53
+ <tr>
54
+ <td class='icon'><img src="<%=u state = worker.state %>.png" alt="<%= state %>" title="<%= state %>"></td>
55
+ <% host, pid, queues = worker.to_s.split(':') %>
56
+ <td class='where'><a href="<%=u "/workers/#{worker}" %>"><%= host %>:<%= pid %></a></td>
57
+ <td class='queues queue'>
58
+ <a class="queue-tag" href="<%=u "/queues/#{job['queue']}" %>"><%= job['queue'] %></a>
59
+ </td>
60
+ <td class='process'>
61
+ <% if job['queue'] %>
62
+ <code><%= job['payload']['class'] %></code>
63
+ <small><a class="queue time" href="<%=u "/working/#{worker}" %>"><%= job['run_at'] %></a></small>
64
+ <% else %>
65
+ <span class='waiting'>Waiting for a job...</span>
66
+ <% end %>
67
+ </td>
68
+ </tr>
69
+ <% end %>
70
+ </table>
71
+
72
+ <% end %>
@@ -0,0 +1,53 @@
1
+ module Resque
2
+ # The stat subsystem. Used to keep track of integer counts.
3
+ #
4
+ # Get a stat: Stat[name]
5
+ # Incr a stat: Stat.incr(name)
6
+ # Decr a stat: Stat.decr(name)
7
+ # Kill a stat: Stat.clear(name)
8
+ module Stat
9
+ extend self
10
+ extend Helpers
11
+
12
+ # Returns the int value of a stat, given a string stat name.
13
+ def get(stat)
14
+ redis.get("stat:#{stat}").to_i
15
+ end
16
+
17
+ # Alias of `get`
18
+ def [](stat)
19
+ get(stat)
20
+ end
21
+
22
+ # For a string stat name, increments the stat by one.
23
+ #
24
+ # Can optionally accept a second int parameter. The stat is then
25
+ # incremented by that amount.
26
+ def incr(stat, by = 1)
27
+ redis.incrby("stat:#{stat}", by)
28
+ end
29
+
30
+ # Increments a stat by one.
31
+ def <<(stat)
32
+ incr stat
33
+ end
34
+
35
+ # For a string stat name, decrements the stat by one.
36
+ #
37
+ # Can optionally accept a second int parameter. The stat is then
38
+ # decremented by that amount.
39
+ def decr(stat, by = 1)
40
+ redis.decrby("stat:#{stat}", by)
41
+ end
42
+
43
+ # Decrements a stat by one.
44
+ def >>(stat)
45
+ decr stat
46
+ end
47
+
48
+ # Removes a stat from Redis, effectively setting it to 0.
49
+ def clear(stat)
50
+ redis.del("stat:#{stat}")
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,61 @@
1
+ # require 'resque/tasks'
2
+ # will give you the resque tasks
3
+
4
+ namespace :resque do
5
+ task :setup
6
+
7
+ desc "Start a Resque worker"
8
+ task :work => [ :preload, :setup ] do
9
+ require 'resque'
10
+
11
+ queues = (ENV['QUEUES'] || ENV['QUEUE']).to_s.split(',')
12
+
13
+ begin
14
+ worker = Resque::Worker.new(*queues)
15
+ worker.verbose = ENV['LOGGING'] || ENV['VERBOSE']
16
+ worker.very_verbose = ENV['VVERBOSE']
17
+ rescue Resque::NoQueueError
18
+ abort "set QUEUE env var, e.g. $ QUEUE=critical,high rake resque:work"
19
+ end
20
+
21
+ if ENV['BACKGROUND']
22
+ unless Process.respond_to?('daemon')
23
+ abort "env var BACKGROUND is set, which requires ruby >= 1.9"
24
+ end
25
+ Process.daemon(true)
26
+ end
27
+
28
+ if ENV['PIDFILE']
29
+ File.open(ENV['PIDFILE'], 'w') { |f| f << worker.pid }
30
+ end
31
+
32
+ worker.log "Starting worker #{worker}"
33
+
34
+ worker.work(ENV['INTERVAL'] || 5) # interval, will block
35
+ end
36
+
37
+ desc "Start multiple Resque workers. Should only be used in dev mode."
38
+ task :workers do
39
+ threads = []
40
+
41
+ ENV['COUNT'].to_i.times do
42
+ threads << Thread.new do
43
+ system "rake resque:work"
44
+ end
45
+ end
46
+
47
+ threads.each { |thread| thread.join }
48
+ end
49
+
50
+ # Preload app files if this is Rails
51
+ task :preload => :setup do
52
+ if defined?(Rails) && Rails.respond_to?(:application)
53
+ # Rails 3
54
+ Rails.application.eager_load!
55
+ elsif defined?(Rails::Initializer)
56
+ # Rails 2.3
57
+ $rails_rake_task = false
58
+ Rails::Initializer.run :load_application_classes
59
+ end
60
+ end
61
+ end
@@ -0,0 +1,3 @@
1
+ module Resque
2
+ Version = VERSION = '1.20.0'
3
+ end
@@ -0,0 +1,557 @@
1
+ module Resque
2
+ # A Resque Worker processes jobs. On platforms that support fork(2),
3
+ # the worker will fork off a child to process each job. This ensures
4
+ # a clean slate when beginning the next job and cuts down on gradual
5
+ # memory growth as well as low level failures.
6
+ #
7
+ # It also ensures workers are always listening to signals from you,
8
+ # their master, and can react accordingly.
9
+ class Worker
10
+ include Resque::Helpers
11
+ extend Resque::Helpers
12
+
13
+ # Whether the worker should log basic info to STDOUT
14
+ attr_accessor :verbose
15
+
16
+ # Whether the worker should log lots of info to STDOUT
17
+ attr_accessor :very_verbose
18
+
19
+ # Boolean indicating whether this worker can or can not fork.
20
+ # Automatically set if a fork(2) fails.
21
+ attr_accessor :cant_fork
22
+
23
+ attr_writer :to_s
24
+
25
+ # Returns an array of all worker objects.
26
+ def self.all
27
+ Array(redis.smembers(:workers)).map { |id| find(id) }.compact
28
+ end
29
+
30
+ # Returns an array of all worker objects currently processing
31
+ # jobs.
32
+ def self.working
33
+ names = all
34
+ return [] unless names.any?
35
+
36
+ names.map! { |name| "worker:#{name}" }
37
+
38
+ reportedly_working = {}
39
+
40
+ begin
41
+ reportedly_working = redis.mapped_mget(*names).reject do |key, value|
42
+ value.nil? || value.empty?
43
+ end
44
+ rescue Redis::Distributed::CannotDistribute
45
+ names.each do |name|
46
+ value = redis.get name
47
+ reportedly_working[name] = value unless value.nil? || value.empty?
48
+ end
49
+ end
50
+
51
+ reportedly_working.keys.map do |key|
52
+ find key.sub("worker:", '')
53
+ end.compact
54
+ end
55
+
56
+ # Returns a single worker object. Accepts a string id.
57
+ def self.find(worker_id)
58
+ if exists? worker_id
59
+ queues = worker_id.split(':')[-1].split(',')
60
+ worker = new(*queues)
61
+ worker.to_s = worker_id
62
+ worker
63
+ else
64
+ nil
65
+ end
66
+ end
67
+
68
+ # Alias of `find`
69
+ def self.attach(worker_id)
70
+ find(worker_id)
71
+ end
72
+
73
+ # Given a string worker id, return a boolean indicating whether the
74
+ # worker exists
75
+ def self.exists?(worker_id)
76
+ redis.sismember(:workers, worker_id)
77
+ end
78
+
79
+ # Workers should be initialized with an array of string queue
80
+ # names. The order is important: a Worker will check the first
81
+ # queue given for a job. If none is found, it will check the
82
+ # second queue name given. If a job is found, it will be
83
+ # processed. Upon completion, the Worker will again check the
84
+ # first queue given, and so forth. In this way the queue list
85
+ # passed to a Worker on startup defines the priorities of queues.
86
+ #
87
+ # If passed a single "*", this Worker will operate on all queues
88
+ # in alphabetical order. Queues can be dynamically added or
89
+ # removed without needing to restart workers using this method.
90
+ def initialize(*queues)
91
+ @queues = queues.map { |queue| queue.to_s.strip }
92
+ @shutdown = nil
93
+ @paused = nil
94
+ validate_queues
95
+ end
96
+
97
+ # A worker must be given a queue, otherwise it won't know what to
98
+ # do with itself.
99
+ #
100
+ # You probably never need to call this.
101
+ def validate_queues
102
+ if @queues.nil? || @queues.empty?
103
+ raise NoQueueError.new("Please give each worker at least one queue.")
104
+ end
105
+ end
106
+
107
+ # This is the main workhorse method. Called on a Worker instance,
108
+ # it begins the worker life cycle.
109
+ #
110
+ # The following events occur during a worker's life cycle:
111
+ #
112
+ # 1. Startup: Signals are registered, dead workers are pruned,
113
+ # and this worker is registered.
114
+ # 2. Work loop: Jobs are pulled from a queue and processed.
115
+ # 3. Teardown: This worker is unregistered.
116
+ #
117
+ # Can be passed a float representing the polling frequency.
118
+ # The default is 5 seconds, but for a semi-active site you may
119
+ # want to use a smaller value.
120
+ #
121
+ # Also accepts a block which will be passed the job as soon as it
122
+ # has completed processing. Useful for testing.
123
+ def work(interval = 5.0, &block)
124
+ interval = Float(interval)
125
+ $0 = "resque: Starting"
126
+ startup
127
+
128
+ loop do
129
+ break if shutdown?
130
+
131
+ pause if should_pause?
132
+
133
+ if job = reserve(interval)
134
+ log "got: #{job.inspect}"
135
+ job.worker = self
136
+ run_hook :before_fork, job
137
+ working_on job
138
+
139
+ if @child = fork
140
+ srand # Reseeding
141
+ procline "Forked #{@child} at #{Time.now.to_i}"
142
+ Process.wait(@child)
143
+ else
144
+ procline "Processing #{job.queue} since #{Time.now.to_i}"
145
+ perform(job, &block)
146
+ exit! unless @cant_fork
147
+ end
148
+
149
+ done_working
150
+ @child = nil
151
+ else
152
+ break if interval.zero?
153
+ log! "Timed out after #{interval} seconds"
154
+ procline paused? ? "Paused" : "Waiting for #{@queues.join(',')}"
155
+ end
156
+ end
157
+
158
+ ensure
159
+ unregister_worker
160
+ end
161
+
162
+ # DEPRECATED. Processes a single job. If none is given, it will
163
+ # try to produce one. Usually run in the child.
164
+ def process(job = nil, &block)
165
+ return unless job ||= reserve
166
+
167
+ job.worker = self
168
+ working_on job
169
+ perform(job, &block)
170
+ ensure
171
+ done_working
172
+ end
173
+
174
+ # Processes a given job in the child.
175
+ def perform(job)
176
+ begin
177
+ run_hook :after_fork, job
178
+ job.perform
179
+ rescue Object => e
180
+ log "#{job.inspect} failed: #{e.inspect}"
181
+ begin
182
+ job.fail(e)
183
+ rescue Object => e
184
+ log "Received exception when reporting failure: #{e.inspect}"
185
+ end
186
+ failed!
187
+ else
188
+ log "done: #{job.inspect}"
189
+ ensure
190
+ yield job if block_given?
191
+ end
192
+ end
193
+
194
+ # Attempts to grab a job off one of the provided queues. Returns
195
+ # nil if no job can be found.
196
+ def reserve(interval = 5.0)
197
+ interval = interval.to_i
198
+ multi_queue = MultiQueue.new(
199
+ queues.map {|queue| Queue.new(queue, Resque.redis, Resque.coder) },
200
+ Resque.redis)
201
+
202
+ if interval < 1
203
+ begin
204
+ queue, job = multi_queue.pop(true)
205
+ rescue ThreadError
206
+ queue, job = nil
207
+ end
208
+ else
209
+ queue, job = multi_queue.poll(interval.to_i)
210
+ end
211
+
212
+ log! "Found job on #{queue}"
213
+ Job.new(queue.name, job) if queue && job
214
+ end
215
+
216
+ # Returns a list of queues to use when searching for a job.
217
+ # A splat ("*") means you want every queue (in alpha order) - this
218
+ # can be useful for dynamically adding new queues.
219
+ def queues
220
+ @queues.map {|queue| queue == "*" ? Resque.queues.sort : queue }.flatten.uniq
221
+ end
222
+
223
+ # Not every platform supports fork. Here we do our magic to
224
+ # determine if yours does.
225
+ def fork
226
+ @cant_fork = true if $TESTING
227
+
228
+ return if @cant_fork
229
+
230
+ begin
231
+ # IronRuby doesn't support `Kernel.fork` yet
232
+ if Kernel.respond_to?(:fork)
233
+ Kernel.fork
234
+ else
235
+ raise NotImplementedError
236
+ end
237
+ rescue NotImplementedError
238
+ @cant_fork = true
239
+ nil
240
+ end
241
+ end
242
+
243
+ # Runs all the methods needed when a worker begins its lifecycle.
244
+ def startup
245
+ enable_gc_optimizations
246
+ register_signal_handlers
247
+ prune_dead_workers
248
+ run_hook :before_first_fork
249
+ register_worker
250
+
251
+ # Fix buffering so we can `rake resque:work > resque.log` and
252
+ # get output from the child in there.
253
+ $stdout.sync = true
254
+ end
255
+
256
+ # Enables GC Optimizations if you're running REE.
257
+ # http://www.rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
258
+ def enable_gc_optimizations
259
+ if GC.respond_to?(:copy_on_write_friendly=)
260
+ GC.copy_on_write_friendly = true
261
+ end
262
+ end
263
+
264
+ # Registers the various signal handlers a worker responds to.
265
+ #
266
+ # TERM: Shutdown immediately, stop processing jobs.
267
+ # INT: Shutdown immediately, stop processing jobs.
268
+ # QUIT: Shutdown after the current job has finished processing.
269
+ # USR1: Kill the forked child immediately, continue processing jobs.
270
+ # USR2: Don't process any new jobs
271
+ # CONT: Start processing jobs again after a USR2
272
+ def register_signal_handlers
273
+ trap('TERM') { shutdown }
274
+ trap('INT') { shutdown! }
275
+
276
+ begin
277
+ trap('QUIT') { shutdown }
278
+ trap('USR1') { kill_child }
279
+ trap('USR2') { pause_processing }
280
+ rescue ArgumentError
281
+ warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
282
+ end
283
+
284
+ log! "Registered signals"
285
+ end
286
+
287
+ # Schedule this worker for shutdown. Will finish processing the
288
+ # current job.
289
+ def shutdown
290
+ log 'Exiting...'
291
+ @shutdown = true
292
+ end
293
+
294
+ # Kill the child and shutdown immediately.
295
+ def shutdown!
296
+ shutdown
297
+ kill_child
298
+ end
299
+
300
+ # Should this worker shutdown as soon as current job is finished?
301
+ def shutdown?
302
+ @shutdown
303
+ end
304
+
305
+ # Kills the forked child immediately, without remorse. The job it
306
+ # is processing will not be completed.
307
+ def kill_child
308
+ if @child
309
+ log! "Killing child at #{@child}"
310
+ if system("ps -o pid,state -p #{@child}")
311
+ Process.kill("KILL", @child) rescue nil
312
+ else
313
+ log! "Child #{@child} not found, restarting."
314
+ shutdown
315
+ end
316
+ end
317
+ end
318
+
319
+ # are we paused?
320
+ def should_pause?
321
+ @paused
322
+ end
323
+ alias :paused? :should_pause?
324
+
325
+ def pause
326
+ rd, wr = IO.pipe
327
+ trap('CONT') {
328
+ log "CONT received; resuming job processing"
329
+ @paused = false
330
+ wr.write 'x'
331
+ wr.close
332
+ }
333
+ rd.read 1
334
+ rd.close
335
+ end
336
+
337
+ # Stop processing jobs after the current one has completed (if we're
338
+ # currently running one).
339
+ def pause_processing
340
+ log "USR2 received; pausing job processing"
341
+ @paused = true
342
+ end
343
+
344
+ # Looks for any workers which should be running on this server
345
+ # and, if they're not, removes them from Redis.
346
+ #
347
+ # This is a form of garbage collection. If a server is killed by a
348
+ # hard shutdown, power failure, or something else beyond our
349
+ # control, the Resque workers will not die gracefully and therefore
350
+ # will leave stale state information in Redis.
351
+ #
352
+ # By checking the current Redis state against the actual
353
+ # environment, we can determine if Redis is old and clean it up a bit.
354
+ def prune_dead_workers
355
+ all_workers = Worker.all
356
+ known_workers = worker_pids unless all_workers.empty?
357
+ all_workers.each do |worker|
358
+ host, pid, queues = worker.id.split(':')
359
+ next unless host == hostname
360
+ next if known_workers.include?(pid)
361
+ log! "Pruning dead worker: #{worker}"
362
+ worker.unregister_worker
363
+ end
364
+ end
365
+
366
+ # Registers ourself as a worker. Useful when entering the worker
367
+ # lifecycle on startup.
368
+ def register_worker
369
+ redis.sadd(:workers, self)
370
+ started!
371
+ end
372
+
373
+ # Runs a named hook, passing along any arguments.
374
+ def run_hook(name, *args)
375
+ return unless hook = Resque.send(name)
376
+ msg = "Running #{name} hook"
377
+ msg << " with #{args.inspect}" if args.any?
378
+ log msg
379
+
380
+ args.any? ? hook.call(*args) : hook.call
381
+ end
382
+
383
+ # Unregisters ourself as a worker. Useful when shutting down.
384
+ def unregister_worker
385
+ # If we're still processing a job, make sure it gets logged as a
386
+ # failure.
387
+ if (hash = processing) && !hash.empty?
388
+ job = Job.new(hash['queue'], hash['payload'])
389
+ # Ensure the proper worker is attached to this job, even if
390
+ # it's not the precise instance that died.
391
+ job.worker = self
392
+ job.fail(DirtyExit.new)
393
+ end
394
+
395
+ redis.srem(:workers, self)
396
+ redis.del("worker:#{self}")
397
+ redis.del("worker:#{self}:started")
398
+
399
+ Stat.clear("processed:#{self}")
400
+ Stat.clear("failed:#{self}")
401
+ end
402
+
403
+ # Given a job, tells Redis we're working on it. Useful for seeing
404
+ # what workers are doing and when.
405
+ def working_on(job)
406
+ data = encode \
407
+ :queue => job.queue,
408
+ :run_at => Time.now.strftime("%Y/%m/%d %H:%M:%S %Z"),
409
+ :payload => job.payload
410
+ redis.set("worker:#{self}", data)
411
+ end
412
+
413
+ # Called when we are done working - clears our `working_on` state
414
+ # and tells Redis we processed a job.
415
+ def done_working
416
+ processed!
417
+ redis.del("worker:#{self}")
418
+ end
419
+
420
+ # How many jobs has this worker processed? Returns an int.
421
+ def processed
422
+ Stat["processed:#{self}"]
423
+ end
424
+
425
+ # Tell Redis we've processed a job.
426
+ def processed!
427
+ Stat << "processed"
428
+ Stat << "processed:#{self}"
429
+ end
430
+
431
+ # How many failed jobs has this worker seen? Returns an int.
432
+ def failed
433
+ Stat["failed:#{self}"]
434
+ end
435
+
436
+ # Tells Redis we've failed a job.
437
+ def failed!
438
+ Stat << "failed"
439
+ Stat << "failed:#{self}"
440
+ end
441
+
442
+ # What time did this worker start? Returns an instance of `Time`
443
+ def started
444
+ redis.get "worker:#{self}:started"
445
+ end
446
+
447
+ # Tell Redis we've started
448
+ def started!
449
+ redis.set("worker:#{self}:started", Time.now.to_s)
450
+ end
451
+
452
+ # Returns a hash explaining the Job we're currently processing, if any.
453
+ def job
454
+ decode(redis.get("worker:#{self}")) || {}
455
+ end
456
+ alias_method :processing, :job
457
+
458
+ # Boolean - true if working, false if not
459
+ def working?
460
+ state == :working
461
+ end
462
+
463
+ # Boolean - true if idle, false if not
464
+ def idle?
465
+ state == :idle
466
+ end
467
+
468
+ # Returns a symbol representing the current worker state,
469
+ # which can be either :working or :idle
470
+ def state
471
+ redis.exists("worker:#{self}") ? :working : :idle
472
+ end
473
+
474
+ # Is this worker the same as another worker?
475
+ def ==(other)
476
+ to_s == other.to_s
477
+ end
478
+
479
+ def inspect
480
+ "#<Worker #{to_s}>"
481
+ end
482
+
483
+ # The string representation is the same as the id for this worker
484
+ # instance. Can be used with `Worker.find`.
485
+ def to_s
486
+ @to_s ||= "#{hostname}:#{Process.pid}:#{@queues.join(',')}"
487
+ end
488
+ alias_method :id, :to_s
489
+
490
+ # chomp'd hostname of this machine
491
+ def hostname
492
+ Socket.gethostname
493
+ end
494
+
495
+ # Returns Integer PID of running worker
496
+ def pid
497
+ Process.pid
498
+ end
499
+
500
+ # Returns an Array of string pids of all the other workers on this
501
+ # machine. Useful when pruning dead workers on startup.
502
+ def worker_pids
503
+ if RUBY_PLATFORM =~ /solaris/
504
+ solaris_worker_pids
505
+ else
506
+ linux_worker_pids
507
+ end
508
+ end
509
+
510
+ # Find Resque worker pids on Linux and OS X.
511
+ #
512
+ # Returns an Array of string pids of all the other workers on this
513
+ # machine. Useful when pruning dead workers on startup.
514
+ def linux_worker_pids
515
+ `ps -A -o pid,command | grep "[r]esque" | grep -v "resque-web"`.split("\n").map do |line|
516
+ line.split(' ')[0]
517
+ end
518
+ end
519
+
520
+ # Find Resque worker pids on Solaris.
521
+ #
522
+ # Returns an Array of string pids of all the other workers on this
523
+ # machine. Useful when pruning dead workers on startup.
524
+ def solaris_worker_pids
525
+ `ps -A -o pid,comm | grep "[r]uby" | grep -v "resque-web"`.split("\n").map do |line|
526
+ real_pid = line.split(' ')[0]
527
+ pargs_command = `pargs -a #{real_pid} 2>/dev/null | grep [r]esque | grep -v "resque-web"`
528
+ if pargs_command.split(':')[1] == " resque-#{Resque::Version}"
529
+ real_pid
530
+ end
531
+ end.compact
532
+ end
533
+
534
+ # Given a string, sets the procline ($0) and logs.
535
+ # Procline is always in the format of:
536
+ # resque-VERSION: STRING
537
+ def procline(string)
538
+ $0 = "resque-#{Resque::Version}: #{string}"
539
+ log! $0
540
+ end
541
+
542
+ # Log a message to STDOUT if we are verbose or very_verbose.
543
+ def log(message)
544
+ if verbose
545
+ puts "*** #{message}"
546
+ elsif very_verbose
547
+ time = Time.now.strftime('%H:%M:%S %Y-%m-%d')
548
+ puts "** [#{time}] #$$: #{message}"
549
+ end
550
+ end
551
+
552
+ # Logs a very verbose message to STDOUT.
553
+ def log!(message)
554
+ log message if very_verbose
555
+ end
556
+ end
557
+ end