steini-resque 1.18.5

Sign up to get free protection for your applications and to get access to all the features.
Files changed (54) hide show
  1. data/HISTORY.md +322 -0
  2. data/LICENSE +20 -0
  3. data/README.markdown +881 -0
  4. data/Rakefile +78 -0
  5. data/bin/resque +81 -0
  6. data/bin/resque-web +23 -0
  7. data/lib/resque.rb +352 -0
  8. data/lib/resque/errors.rb +10 -0
  9. data/lib/resque/failure.rb +70 -0
  10. data/lib/resque/failure/base.rb +64 -0
  11. data/lib/resque/failure/hoptoad.rb +48 -0
  12. data/lib/resque/failure/multiple.rb +54 -0
  13. data/lib/resque/failure/redis.rb +51 -0
  14. data/lib/resque/helpers.rb +63 -0
  15. data/lib/resque/job.rb +205 -0
  16. data/lib/resque/plugin.rb +56 -0
  17. data/lib/resque/server.rb +231 -0
  18. data/lib/resque/server/public/favicon.ico +0 -0
  19. data/lib/resque/server/public/idle.png +0 -0
  20. data/lib/resque/server/public/jquery-1.3.2.min.js +19 -0
  21. data/lib/resque/server/public/jquery.relatize_date.js +95 -0
  22. data/lib/resque/server/public/poll.png +0 -0
  23. data/lib/resque/server/public/ranger.js +73 -0
  24. data/lib/resque/server/public/reset.css +48 -0
  25. data/lib/resque/server/public/style.css +85 -0
  26. data/lib/resque/server/public/working.png +0 -0
  27. data/lib/resque/server/test_helper.rb +19 -0
  28. data/lib/resque/server/views/error.erb +1 -0
  29. data/lib/resque/server/views/failed.erb +64 -0
  30. data/lib/resque/server/views/key_sets.erb +19 -0
  31. data/lib/resque/server/views/key_string.erb +11 -0
  32. data/lib/resque/server/views/layout.erb +44 -0
  33. data/lib/resque/server/views/next_more.erb +10 -0
  34. data/lib/resque/server/views/overview.erb +4 -0
  35. data/lib/resque/server/views/queues.erb +49 -0
  36. data/lib/resque/server/views/stats.erb +62 -0
  37. data/lib/resque/server/views/workers.erb +109 -0
  38. data/lib/resque/server/views/working.erb +72 -0
  39. data/lib/resque/stat.rb +53 -0
  40. data/lib/resque/tasks.rb +51 -0
  41. data/lib/resque/version.rb +3 -0
  42. data/lib/resque/worker.rb +533 -0
  43. data/lib/tasks/redis.rake +161 -0
  44. data/lib/tasks/resque.rake +2 -0
  45. data/test/hoptoad_test.rb +25 -0
  46. data/test/job_hooks_test.rb +363 -0
  47. data/test/job_plugins_test.rb +230 -0
  48. data/test/plugin_test.rb +116 -0
  49. data/test/redis-test.conf +115 -0
  50. data/test/resque-web_test.rb +53 -0
  51. data/test/resque_test.rb +259 -0
  52. data/test/test_helper.rb +148 -0
  53. data/test/worker_test.rb +332 -0
  54. metadata +183 -0
@@ -0,0 +1,72 @@
1
+ <% if params[:id] && (worker = Resque::Worker.find(params[:id])) && worker.job %>
2
+ <h1><%= worker %>'s job</h1>
3
+
4
+ <table>
5
+ <tr>
6
+ <th>&nbsp;</th>
7
+ <th>Where</th>
8
+ <th>Queue</th>
9
+ <th>Started</th>
10
+ <th>Class</th>
11
+ <th>Args</th>
12
+ </tr>
13
+ <tr>
14
+ <td><img src="<%=u 'working.png' %>" alt="working" title="working"></td>
15
+ <% host, pid, _ = worker.to_s.split(':') %>
16
+ <td><a href="<%=u "/workers/#{worker}" %>"><%= host %>:<%= pid %></a></td>
17
+ <% data = worker.job %>
18
+ <% queue = data['queue'] %>
19
+ <td><a class="queue" href="<%=u "/queues/#{queue}" %>"><%= queue %></a></td>
20
+ <td><span class="time"><%= data['run_at'] %></span></td>
21
+ <td>
22
+ <code><%= data['payload']['class'] %></code>
23
+ </td>
24
+ <td><%=h data['payload']['args'].inspect %></td>
25
+ </tr>
26
+ </table>
27
+
28
+ <% else %>
29
+
30
+ <%
31
+ workers = resque.working
32
+ jobs = workers.collect {|w| w.job }
33
+ worker_jobs = workers.zip(jobs)
34
+ worker_jobs = worker_jobs.reject { |w, j| w.idle? }
35
+ %>
36
+
37
+ <h1 class='wi'><%= worker_jobs.size %> of <%= resque.workers.size %> Workers Working</h1>
38
+ <p class='intro'>The list below contains all workers which are currently running a job.</p>
39
+ <table class='workers'>
40
+ <tr>
41
+ <th>&nbsp;</th>
42
+ <th>Where</th>
43
+ <th>Queue</th>
44
+ <th>Processing</th>
45
+ </tr>
46
+ <% if worker_jobs.empty? %>
47
+ <tr>
48
+ <td colspan="4" class='no-data'>Nothing is happening right now...</td>
49
+ </tr>
50
+ <% end %>
51
+
52
+ <% worker_jobs.sort_by {|w, j| j['run_at'] ? j['run_at'] : '' }.each do |worker, job| %>
53
+ <tr>
54
+ <td class='icon'><img src="<%=u state = worker.state %>.png" alt="<%= state %>" title="<%= state %>"></td>
55
+ <% host, pid, queues = worker.to_s.split(':') %>
56
+ <td class='where'><a href="<%=u "/workers/#{worker}" %>"><%= host %>:<%= pid %></a></td>
57
+ <td class='queues queue'>
58
+ <a class="queue-tag" href="<%=u "/queues/#{job['queue']}" %>"><%= job['queue'] %></a>
59
+ </td>
60
+ <td class='process'>
61
+ <% if job['queue'] %>
62
+ <code><%= job['payload']['class'] %></code>
63
+ <small><a class="queue time" href="<%=u "/working/#{worker}" %>"><%= job['run_at'] %></a></small>
64
+ <% else %>
65
+ <span class='waiting'>Waiting for a job...</span>
66
+ <% end %>
67
+ </td>
68
+ </tr>
69
+ <% end %>
70
+ </table>
71
+
72
+ <% end %>
@@ -0,0 +1,53 @@
1
+ module Resque
2
+ # The stat subsystem. Used to keep track of integer counts.
3
+ #
4
+ # Get a stat: Stat[name]
5
+ # Incr a stat: Stat.incr(name)
6
+ # Decr a stat: Stat.decr(name)
7
+ # Kill a stat: Stat.clear(name)
8
+ module Stat
9
+ extend self
10
+ extend Helpers
11
+
12
+ # Returns the int value of a stat, given a string stat name.
13
+ def get(stat)
14
+ redis.get("stat:#{stat}").to_i
15
+ end
16
+
17
+ # Alias of `get`
18
+ def [](stat)
19
+ get(stat)
20
+ end
21
+
22
+ # For a string stat name, increments the stat by one.
23
+ #
24
+ # Can optionally accept a second int parameter. The stat is then
25
+ # incremented by that amount.
26
+ def incr(stat, by = 1)
27
+ redis.incrby("stat:#{stat}", by)
28
+ end
29
+
30
+ # Increments a stat by one.
31
+ def <<(stat)
32
+ incr stat
33
+ end
34
+
35
+ # For a string stat name, decrements the stat by one.
36
+ #
37
+ # Can optionally accept a second int parameter. The stat is then
38
+ # decremented by that amount.
39
+ def decr(stat, by = 1)
40
+ redis.decrby("stat:#{stat}", by)
41
+ end
42
+
43
+ # Decrements a stat by one.
44
+ def >>(stat)
45
+ decr stat
46
+ end
47
+
48
+ # Removes a stat from Redis, effectively setting it to 0.
49
+ def clear(stat)
50
+ redis.del("stat:#{stat}")
51
+ end
52
+ end
53
+ end
@@ -0,0 +1,51 @@
1
+ # require 'resque/tasks'
2
+ # will give you the resque tasks
3
+
4
+ namespace :resque do
5
+ task :setup
6
+
7
+ desc "Start a Resque worker"
8
+ task :work => [ :preload, :setup ] do
9
+ require 'resque'
10
+
11
+ queues = (ENV['QUEUES'] || ENV['QUEUE']).to_s.split(',')
12
+
13
+ begin
14
+ worker = Resque::Worker.new(*queues)
15
+ worker.verbose = ENV['LOGGING'] || ENV['VERBOSE']
16
+ worker.very_verbose = ENV['VVERBOSE']
17
+ rescue Resque::NoQueueError
18
+ abort "set QUEUE env var, e.g. $ QUEUE=critical,high rake resque:work"
19
+ end
20
+
21
+ if ENV['PIDFILE']
22
+ File.open(ENV['PIDFILE'], 'w') { |f| f << worker.pid }
23
+ end
24
+
25
+ worker.log "Starting worker #{worker}"
26
+
27
+ worker.work(ENV['INTERVAL'] || 5) # interval, will block
28
+ end
29
+
30
+ desc "Start multiple Resque workers. Should only be used in dev mode."
31
+ task :workers do
32
+ threads = []
33
+
34
+ ENV['COUNT'].to_i.times do
35
+ threads << Thread.new do
36
+ system "rake resque:work"
37
+ end
38
+ end
39
+
40
+ threads.each { |thread| thread.join }
41
+ end
42
+
43
+ # Preload app files if this is Rails
44
+ task :preload => :setup do
45
+ if defined?(Rails) && Rails.env == 'production'
46
+ #Dir["#{Rails.root}/app/**/*.rb"].each do |file|
47
+ # require file
48
+ #end
49
+ end
50
+ end
51
+ end
@@ -0,0 +1,3 @@
1
+ module Resque
2
+ Version = VERSION = '1.18.5'
3
+ end
@@ -0,0 +1,533 @@
1
+ module Resque
2
+ # A Resque Worker processes jobs. On platforms that support fork(2),
3
+ # the worker will fork off a child to process each job. This ensures
4
+ # a clean slate when beginning the next job and cuts down on gradual
5
+ # memory growth as well as low level failures.
6
+ #
7
+ # It also ensures workers are always listening to signals from you,
8
+ # their master, and can react accordingly.
9
+ class Worker
10
+ include Resque::Helpers
11
+ extend Resque::Helpers
12
+
13
+ # Whether the worker should log basic info to STDOUT
14
+ attr_accessor :verbose
15
+
16
+ # Whether the worker should log lots of info to STDOUT
17
+ attr_accessor :very_verbose
18
+
19
+ # Boolean indicating whether this worker can or can not fork.
20
+ # Automatically set if a fork(2) fails.
21
+ attr_accessor :cant_fork
22
+
23
+ attr_writer :to_s
24
+
25
+ # Returns an array of all worker objects.
26
+ def self.all
27
+ Array(redis.smembers(:workers)).map { |id| find(id) }.compact
28
+ end
29
+
30
+ # Returns an array of all worker objects currently processing
31
+ # jobs.
32
+ def self.working
33
+ names = all
34
+ return [] unless names.any?
35
+
36
+ names.map! { |name| "worker:#{name}" }
37
+
38
+ reportedly_working = redis.mapped_mget(*names).reject do |key, value|
39
+ value.nil? || value.empty?
40
+ end
41
+ reportedly_working.keys.map do |key|
42
+ find key.sub("worker:", '')
43
+ end.compact
44
+ end
45
+
46
+ # Returns a single worker object. Accepts a string id.
47
+ def self.find(worker_id)
48
+ if exists? worker_id
49
+ queues = worker_id.split(':')[-1].split(',')
50
+ worker = new(*queues)
51
+ worker.to_s = worker_id
52
+ worker
53
+ else
54
+ nil
55
+ end
56
+ end
57
+
58
+ # Alias of `find`
59
+ def self.attach(worker_id)
60
+ find(worker_id)
61
+ end
62
+
63
+ # Given a string worker id, return a boolean indicating whether the
64
+ # worker exists
65
+ def self.exists?(worker_id)
66
+ redis.sismember(:workers, worker_id)
67
+ end
68
+
69
+ # Workers should be initialized with an array of string queue
70
+ # names. The order is important: a Worker will check the first
71
+ # queue given for a job. If none is found, it will check the
72
+ # second queue name given. If a job is found, it will be
73
+ # processed. Upon completion, the Worker will again check the
74
+ # first queue given, and so forth. In this way the queue list
75
+ # passed to a Worker on startup defines the priorities of queues.
76
+ #
77
+ # If passed a single "*", this Worker will operate on all queues
78
+ # in alphabetical order. Queues can be dynamically added or
79
+ # removed without needing to restart workers using this method.
80
+ def initialize(*queues)
81
+ @queues = queues.map { |queue| queue.to_s.strip }
82
+ validate_queues
83
+ end
84
+
85
+ # A worker must be given a queue, otherwise it won't know what to
86
+ # do with itself.
87
+ #
88
+ # You probably never need to call this.
89
+ def validate_queues
90
+ if @queues.nil? || @queues.empty?
91
+ raise NoQueueError.new("Please give each worker at least one queue.")
92
+ end
93
+ end
94
+
95
+ # This is the main workhorse method. Called on a Worker instance,
96
+ # it begins the worker life cycle.
97
+ #
98
+ # The following events occur during a worker's life cycle:
99
+ #
100
+ # 1. Startup: Signals are registered, dead workers are pruned,
101
+ # and this worker is registered.
102
+ # 2. Work loop: Jobs are pulled from a queue and processed.
103
+ # 3. Teardown: This worker is unregistered.
104
+ #
105
+ # Can be passed a float representing the polling frequency.
106
+ # The default is 5 seconds, but for a semi-active site you may
107
+ # want to use a smaller value.
108
+ #
109
+ # Also accepts a block which will be passed the job as soon as it
110
+ # has completed processing. Useful for testing.
111
+ def work(interval = 5.0, &block)
112
+ interval = Float(interval)
113
+ $0 = "resque: Starting"
114
+ startup
115
+
116
+ loop do
117
+ break if shutdown?
118
+
119
+ if not paused? and job = reserve
120
+ log "got: #{job.inspect}"
121
+ run_hook :before_fork, job
122
+ working_on job
123
+
124
+ if @child = fork
125
+ srand # Reseeding
126
+ procline "Forked #{@child} at #{Time.now.to_i}"
127
+ Process.wait
128
+ else
129
+ procline "Processing #{job.queue} since #{Time.now.to_i}"
130
+ perform(job, &block)
131
+ exit! unless @cant_fork
132
+ end
133
+
134
+ done_working
135
+ @child = nil
136
+ else
137
+ break if interval.zero?
138
+ log! "Sleeping for #{interval} seconds"
139
+ procline paused? ? "Paused" : "Waiting for #{@queues.join(',')}"
140
+ sleep interval
141
+ end
142
+ end
143
+
144
+ ensure
145
+ unregister_worker
146
+ end
147
+
148
+ # DEPRECATED. Processes a single job. If none is given, it will
149
+ # try to produce one. Usually run in the child.
150
+ def process(job = nil, &block)
151
+ return unless job ||= reserve
152
+
153
+ working_on job
154
+ perform(job, &block)
155
+ ensure
156
+ done_working
157
+ end
158
+
159
+ # Processes a given job in the child.
160
+ def perform(job)
161
+ begin
162
+ run_hook :after_fork, job
163
+ job.perform
164
+ rescue Object => e
165
+ log "#{job.inspect} failed: #{e.inspect}"
166
+ begin
167
+ job.fail(e)
168
+ rescue Object => e
169
+ log "Received exception when reporting failure: #{e.inspect}"
170
+ end
171
+ failed!
172
+ else
173
+ log "done: #{job.inspect}"
174
+ ensure
175
+ yield job if block_given?
176
+ end
177
+ end
178
+
179
+ # Attempts to grab a job off one of the provided queues. Returns
180
+ # nil if no job can be found.
181
+ def reserve
182
+ queues.each do |queue|
183
+ log! "Checking #{queue}"
184
+ if job = Resque::Job.reserve(queue)
185
+ log! "Found job on #{queue}"
186
+ return job
187
+ end
188
+ end
189
+
190
+ nil
191
+ rescue Exception => e
192
+ log "Error reserving job: #{e.inspect}"
193
+ log e.backtrace.join("\n")
194
+ raise e
195
+ end
196
+
197
+ # Returns a list of queues to use when searching for a job.
198
+ # A splat ("*") means you want every queue (in alpha order) - this
199
+ # can be useful for dynamically adding new queues.
200
+ def queues
201
+ @queues[0] == "*" ? Resque.queues.sort : @queues
202
+ end
203
+
204
+ # Not every platform supports fork. Here we do our magic to
205
+ # determine if yours does.
206
+ def fork
207
+ @cant_fork = true if $TESTING
208
+
209
+ return if @cant_fork
210
+
211
+ begin
212
+ # IronRuby doesn't support `Kernel.fork` yet
213
+ if Kernel.respond_to?(:fork)
214
+ Kernel.fork
215
+ else
216
+ raise NotImplementedError
217
+ end
218
+ rescue NotImplementedError
219
+ @cant_fork = true
220
+ nil
221
+ end
222
+ end
223
+
224
+ # Runs all the methods needed when a worker begins its lifecycle.
225
+ def startup
226
+ enable_gc_optimizations
227
+ register_signal_handlers
228
+ prune_dead_workers
229
+ run_hook :before_first_fork
230
+ register_worker
231
+
232
+ # Fix buffering so we can `rake resque:work > resque.log` and
233
+ # get output from the child in there.
234
+ $stdout.sync = true
235
+ end
236
+
237
+ # Enables GC Optimizations if you're running REE.
238
+ # http://www.rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
239
+ def enable_gc_optimizations
240
+ if GC.respond_to?(:copy_on_write_friendly=)
241
+ GC.copy_on_write_friendly = true
242
+ end
243
+ end
244
+
245
+ # Registers the various signal handlers a worker responds to.
246
+ #
247
+ # TERM: Shutdown immediately, stop processing jobs.
248
+ # INT: Shutdown immediately, stop processing jobs.
249
+ # QUIT: Shutdown after the current job has finished processing.
250
+ # USR1: Kill the forked child immediately, continue processing jobs.
251
+ # USR2: Don't process any new jobs
252
+ # CONT: Start processing jobs again after a USR2
253
+ def register_signal_handlers
254
+ trap('TERM') { shutdown! }
255
+ trap('INT') { shutdown! }
256
+
257
+ begin
258
+ trap('QUIT') { shutdown }
259
+ trap('USR1') { kill_child }
260
+ trap('USR2') { pause_processing }
261
+ trap('CONT') { unpause_processing }
262
+ rescue ArgumentError
263
+ warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
264
+ end
265
+
266
+ log! "Registered signals"
267
+ end
268
+
269
+ # Schedule this worker for shutdown. Will finish processing the
270
+ # current job.
271
+ def shutdown
272
+ log 'Exiting...'
273
+ @shutdown = true
274
+ end
275
+
276
+ # Kill the child and shutdown immediately.
277
+ def shutdown!
278
+ shutdown
279
+ kill_child
280
+ end
281
+
282
+ # Should this worker shutdown as soon as current job is finished?
283
+ def shutdown?
284
+ @shutdown
285
+ end
286
+
287
+ # Kills the forked child immediately, without remorse. The job it
288
+ # is processing will not be completed.
289
+ def kill_child
290
+ if @child
291
+ log! "Killing child at #{@child}"
292
+ if system("ps -o pid,state -p #{@child}")
293
+ Process.kill("KILL", @child) rescue nil
294
+ else
295
+ log! "Child #{@child} not found, restarting."
296
+ shutdown
297
+ end
298
+ end
299
+ end
300
+
301
+ # are we paused?
302
+ def paused?
303
+ @paused
304
+ end
305
+
306
+ # Stop processing jobs after the current one has completed (if we're
307
+ # currently running one).
308
+ def pause_processing
309
+ log "USR2 received; pausing job processing"
310
+ @paused = true
311
+ end
312
+
313
+ # Start processing jobs again after a pause
314
+ def unpause_processing
315
+ log "CONT received; resuming job processing"
316
+ @paused = false
317
+ end
318
+
319
+ # Looks for any workers which should be running on this server
320
+ # and, if they're not, removes them from Redis.
321
+ #
322
+ # This is a form of garbage collection. If a server is killed by a
323
+ # hard shutdown, power failure, or something else beyond our
324
+ # control, the Resque workers will not die gracefully and therefore
325
+ # will leave stale state information in Redis.
326
+ #
327
+ # By checking the current Redis state against the actual
328
+ # environment, we can determine if Redis is old and clean it up a bit.
329
+ def prune_dead_workers
330
+ all_workers = Worker.all
331
+ known_workers = worker_pids unless all_workers.empty?
332
+ all_workers.each do |worker|
333
+ host, pid, queues = worker.id.split(':')
334
+ next unless host == hostname
335
+ next if known_workers.include?(pid)
336
+ log! "Pruning dead worker: #{worker}"
337
+ worker.unregister_worker
338
+ end
339
+ end
340
+
341
+ # Registers ourself as a worker. Useful when entering the worker
342
+ # lifecycle on startup.
343
+ def register_worker
344
+ redis.sadd(:workers, self)
345
+ started!
346
+ end
347
+
348
+ # Runs a named hook, passing along any arguments.
349
+ def run_hook(name, *args)
350
+ return unless hook = Resque.send(name)
351
+ msg = "Running #{name} hook"
352
+ msg << " with #{args.inspect}" if args.any?
353
+ log msg
354
+
355
+ args.any? ? hook.call(*args) : hook.call
356
+ end
357
+
358
+ # Unregisters ourself as a worker. Useful when shutting down.
359
+ def unregister_worker
360
+ # If we're still processing a job, make sure it gets logged as a
361
+ # failure.
362
+ if (hash = processing) && !hash.empty?
363
+ job = Job.new(hash['queue'], hash['payload'])
364
+ # Ensure the proper worker is attached to this job, even if
365
+ # it's not the precise instance that died.
366
+ job.worker = self
367
+ job.fail(DirtyExit.new)
368
+ end
369
+
370
+ redis.srem(:workers, self)
371
+ redis.del("worker:#{self}")
372
+ redis.del("worker:#{self}:started")
373
+
374
+ Stat.clear("processed:#{self}")
375
+ Stat.clear("failed:#{self}")
376
+ end
377
+
378
+ # Given a job, tells Redis we're working on it. Useful for seeing
379
+ # what workers are doing and when.
380
+ def working_on(job)
381
+ job.worker = self
382
+ data = encode \
383
+ :queue => job.queue,
384
+ :run_at => Time.now.strftime("%Y/%m/%d %H:%M:%S %Z"),
385
+ :payload => job.payload
386
+ redis.set("worker:#{self}", data)
387
+ end
388
+
389
+ # Called when we are done working - clears our `working_on` state
390
+ # and tells Redis we processed a job.
391
+ def done_working
392
+ processed!
393
+ redis.del("worker:#{self}")
394
+ end
395
+
396
+ # How many jobs has this worker processed? Returns an int.
397
+ def processed
398
+ Stat["processed:#{self}"]
399
+ end
400
+
401
+ # Tell Redis we've processed a job.
402
+ def processed!
403
+ Stat << "processed"
404
+ Stat << "processed:#{self}"
405
+ end
406
+
407
+ # How many failed jobs has this worker seen? Returns an int.
408
+ def failed
409
+ Stat["failed:#{self}"]
410
+ end
411
+
412
+ # Tells Redis we've failed a job.
413
+ def failed!
414
+ Stat << "failed"
415
+ Stat << "failed:#{self}"
416
+ end
417
+
418
+ # What time did this worker start? Returns an instance of `Time`
419
+ def started
420
+ redis.get "worker:#{self}:started"
421
+ end
422
+
423
+ # Tell Redis we've started
424
+ def started!
425
+ redis.set("worker:#{self}:started", Time.now.to_s)
426
+ end
427
+
428
+ # Returns a hash explaining the Job we're currently processing, if any.
429
+ def job
430
+ decode(redis.get("worker:#{self}")) || {}
431
+ end
432
+ alias_method :processing, :job
433
+
434
+ # Boolean - true if working, false if not
435
+ def working?
436
+ state == :working
437
+ end
438
+
439
+ # Boolean - true if idle, false if not
440
+ def idle?
441
+ state == :idle
442
+ end
443
+
444
+ # Returns a symbol representing the current worker state,
445
+ # which can be either :working or :idle
446
+ def state
447
+ redis.exists("worker:#{self}") ? :working : :idle
448
+ end
449
+
450
+ # Is this worker the same as another worker?
451
+ def ==(other)
452
+ to_s == other.to_s
453
+ end
454
+
455
+ def inspect
456
+ "#<Worker #{to_s}>"
457
+ end
458
+
459
+ # The string representation is the same as the id for this worker
460
+ # instance. Can be used with `Worker.find`.
461
+ def to_s
462
+ @to_s ||= "#{hostname}:#{Process.pid}:#{@queues.join(',')}"
463
+ end
464
+ alias_method :id, :to_s
465
+
466
+ # chomp'd hostname of this machine
467
+ def hostname
468
+ @hostname ||= `hostname`.chomp
469
+ end
470
+
471
+ # Returns Integer PID of running worker
472
+ def pid
473
+ @pid ||= to_s.split(":")[1].to_i
474
+ end
475
+
476
+ # Returns an Array of string pids of all the other workers on this
477
+ # machine. Useful when pruning dead workers on startup.
478
+ def worker_pids
479
+ if RUBY_PLATFORM =~ /solaris/
480
+ solaris_worker_pids
481
+ else
482
+ linux_worker_pids
483
+ end
484
+ end
485
+
486
+ # Find Resque worker pids on Linux and OS X.
487
+ #
488
+ # Returns an Array of string pids of all the other workers on this
489
+ # machine. Useful when pruning dead workers on startup.
490
+ def linux_worker_pids
491
+ `ps -A -o pid,command | grep [r]esque | grep -v "resque-web"`.split("\n").map do |line|
492
+ line.split(' ')[0]
493
+ end
494
+ end
495
+
496
+ # Find Resque worker pids on Solaris.
497
+ #
498
+ # Returns an Array of string pids of all the other workers on this
499
+ # machine. Useful when pruning dead workers on startup.
500
+ def solaris_worker_pids
501
+ `ps -A -o pid,comm | grep [r]uby | grep -v "resque-web"`.split("\n").map do |line|
502
+ real_pid = line.split(' ')[0]
503
+ pargs_command = `pargs -a #{real_pid} 2>/dev/null | grep [r]esque | grep -v "resque-web"`
504
+ if pargs_command.split(':')[1] == " resque-#{Resque::Version}"
505
+ real_pid
506
+ end
507
+ end.compact
508
+ end
509
+
510
+ # Given a string, sets the procline ($0) and logs.
511
+ # Procline is always in the format of:
512
+ # resque-VERSION: STRING
513
+ def procline(string)
514
+ $0 = "resque-#{Resque::Version}: #{string}"
515
+ log! $0
516
+ end
517
+
518
+ # Log a message to STDOUT if we are verbose or very_verbose.
519
+ def log(message)
520
+ if verbose
521
+ puts "*** #{message}"
522
+ elsif very_verbose
523
+ time = Time.now.strftime('%H:%M:%S %Y-%m-%d')
524
+ puts "** [#{time}] #$$: #{message}"
525
+ end
526
+ end
527
+
528
+ # Logs a very verbose message to STDOUT.
529
+ def log!(message)
530
+ log message if very_verbose
531
+ end
532
+ end
533
+ end