tr_resque 1.20.1
Sign up to get free protection for your applications and to get access to all the features.
- data/HISTORY.md +354 -0
- data/LICENSE +20 -0
- data/README.markdown +908 -0
- data/Rakefile +70 -0
- data/bin/resque +81 -0
- data/bin/resque-web +27 -0
- data/lib/resque.rb +369 -0
- data/lib/resque/errors.rb +10 -0
- data/lib/resque/failure.rb +96 -0
- data/lib/resque/failure/airbrake.rb +17 -0
- data/lib/resque/failure/base.rb +64 -0
- data/lib/resque/failure/hoptoad.rb +33 -0
- data/lib/resque/failure/multiple.rb +54 -0
- data/lib/resque/failure/redis.rb +51 -0
- data/lib/resque/failure/thoughtbot.rb +33 -0
- data/lib/resque/helpers.rb +94 -0
- data/lib/resque/job.rb +227 -0
- data/lib/resque/plugin.rb +66 -0
- data/lib/resque/server.rb +248 -0
- data/lib/resque/server/public/favicon.ico +0 -0
- data/lib/resque/server/public/idle.png +0 -0
- data/lib/resque/server/public/jquery-1.3.2.min.js +19 -0
- data/lib/resque/server/public/jquery.relatize_date.js +95 -0
- data/lib/resque/server/public/poll.png +0 -0
- data/lib/resque/server/public/ranger.js +73 -0
- data/lib/resque/server/public/reset.css +44 -0
- data/lib/resque/server/public/style.css +86 -0
- data/lib/resque/server/public/working.png +0 -0
- data/lib/resque/server/test_helper.rb +19 -0
- data/lib/resque/server/views/error.erb +1 -0
- data/lib/resque/server/views/failed.erb +67 -0
- data/lib/resque/server/views/key_sets.erb +19 -0
- data/lib/resque/server/views/key_string.erb +11 -0
- data/lib/resque/server/views/layout.erb +44 -0
- data/lib/resque/server/views/next_more.erb +10 -0
- data/lib/resque/server/views/overview.erb +4 -0
- data/lib/resque/server/views/queues.erb +49 -0
- data/lib/resque/server/views/stats.erb +62 -0
- data/lib/resque/server/views/workers.erb +109 -0
- data/lib/resque/server/views/working.erb +72 -0
- data/lib/resque/stat.rb +53 -0
- data/lib/resque/tasks.rb +61 -0
- data/lib/resque/version.rb +3 -0
- data/lib/resque/worker.rb +546 -0
- data/lib/tasks/redis.rake +161 -0
- data/lib/tasks/resque.rake +2 -0
- data/test/airbrake_test.rb +27 -0
- data/test/hoptoad_test.rb +26 -0
- data/test/job_hooks_test.rb +423 -0
- data/test/job_plugins_test.rb +230 -0
- data/test/plugin_test.rb +116 -0
- data/test/redis-test-cluster.conf +115 -0
- data/test/redis-test.conf +115 -0
- data/test/resque-web_test.rb +59 -0
- data/test/resque_test.rb +278 -0
- data/test/test_helper.rb +160 -0
- data/test/worker_test.rb +434 -0
- metadata +186 -0
@@ -0,0 +1,546 @@
|
|
1
|
+
module Resque
|
2
|
+
# A Resque Worker processes jobs. On platforms that support fork(2),
|
3
|
+
# the worker will fork off a child to process each job. This ensures
|
4
|
+
# a clean slate when beginning the next job and cuts down on gradual
|
5
|
+
# memory growth as well as low level failures.
|
6
|
+
#
|
7
|
+
# It also ensures workers are always listening to signals from you,
|
8
|
+
# their master, and can react accordingly.
|
9
|
+
class Worker
|
10
|
+
include Resque::Helpers
|
11
|
+
extend Resque::Helpers
|
12
|
+
|
13
|
+
# Whether the worker should log basic info to STDOUT
|
14
|
+
attr_accessor :verbose
|
15
|
+
|
16
|
+
# Whether the worker should log lots of info to STDOUT
|
17
|
+
attr_accessor :very_verbose
|
18
|
+
|
19
|
+
# Boolean indicating whether this worker can or can not fork.
|
20
|
+
# Automatically set if a fork(2) fails.
|
21
|
+
attr_accessor :cant_fork
|
22
|
+
|
23
|
+
attr_writer :to_s
|
24
|
+
|
25
|
+
# Returns an array of all worker objects.
|
26
|
+
def self.all
|
27
|
+
Array(redis.smembers(:workers)).map { |id| find(id) }.compact
|
28
|
+
end
|
29
|
+
|
30
|
+
# Returns an array of all worker objects currently processing
|
31
|
+
# jobs.
|
32
|
+
def self.working
|
33
|
+
names = all
|
34
|
+
return [] unless names.any?
|
35
|
+
|
36
|
+
names.map! { |name| "worker:#{name}" }
|
37
|
+
|
38
|
+
reportedly_working = {}
|
39
|
+
|
40
|
+
begin
|
41
|
+
reportedly_working = redis.mapped_mget(*names).reject do |key, value|
|
42
|
+
value.nil? || value.empty?
|
43
|
+
end
|
44
|
+
rescue Redis::Distributed::CannotDistribute
|
45
|
+
names.each do |name|
|
46
|
+
value = redis.get name
|
47
|
+
reportedly_working[name] = value unless value.nil? || value.empty?
|
48
|
+
end
|
49
|
+
end
|
50
|
+
|
51
|
+
reportedly_working.keys.map do |key|
|
52
|
+
find key.sub("worker:", '')
|
53
|
+
end.compact
|
54
|
+
end
|
55
|
+
|
56
|
+
# Returns a single worker object. Accepts a string id.
|
57
|
+
def self.find(worker_id)
|
58
|
+
if exists? worker_id
|
59
|
+
queues = worker_id.split(':')[-1].split(',')
|
60
|
+
worker = new(*queues)
|
61
|
+
worker.to_s = worker_id
|
62
|
+
worker
|
63
|
+
else
|
64
|
+
nil
|
65
|
+
end
|
66
|
+
end
|
67
|
+
|
68
|
+
# Alias of `find`
|
69
|
+
def self.attach(worker_id)
|
70
|
+
find(worker_id)
|
71
|
+
end
|
72
|
+
|
73
|
+
# Given a string worker id, return a boolean indicating whether the
|
74
|
+
# worker exists
|
75
|
+
def self.exists?(worker_id)
|
76
|
+
redis.sismember(:workers, worker_id)
|
77
|
+
end
|
78
|
+
|
79
|
+
# Workers should be initialized with an array of string queue
|
80
|
+
# names. The order is important: a Worker will check the first
|
81
|
+
# queue given for a job. If none is found, it will check the
|
82
|
+
# second queue name given. If a job is found, it will be
|
83
|
+
# processed. Upon completion, the Worker will again check the
|
84
|
+
# first queue given, and so forth. In this way the queue list
|
85
|
+
# passed to a Worker on startup defines the priorities of queues.
|
86
|
+
#
|
87
|
+
# If passed a single "*", this Worker will operate on all queues
|
88
|
+
# in alphabetical order. Queues can be dynamically added or
|
89
|
+
# removed without needing to restart workers using this method.
|
90
|
+
def initialize(*queues)
|
91
|
+
@queues = queues.map { |queue| queue.to_s.strip }
|
92
|
+
@shutdown = nil
|
93
|
+
@paused = nil
|
94
|
+
validate_queues
|
95
|
+
end
|
96
|
+
|
97
|
+
# A worker must be given a queue, otherwise it won't know what to
|
98
|
+
# do with itself.
|
99
|
+
#
|
100
|
+
# You probably never need to call this.
|
101
|
+
def validate_queues
|
102
|
+
if @queues.nil? || @queues.empty?
|
103
|
+
raise NoQueueError.new("Please give each worker at least one queue.")
|
104
|
+
end
|
105
|
+
end
|
106
|
+
|
107
|
+
# This is the main workhorse method. Called on a Worker instance,
|
108
|
+
# it begins the worker life cycle.
|
109
|
+
#
|
110
|
+
# The following events occur during a worker's life cycle:
|
111
|
+
#
|
112
|
+
# 1. Startup: Signals are registered, dead workers are pruned,
|
113
|
+
# and this worker is registered.
|
114
|
+
# 2. Work loop: Jobs are pulled from a queue and processed.
|
115
|
+
# 3. Teardown: This worker is unregistered.
|
116
|
+
#
|
117
|
+
# Can be passed a float representing the polling frequency.
|
118
|
+
# The default is 5 seconds, but for a semi-active site you may
|
119
|
+
# want to use a smaller value.
|
120
|
+
#
|
121
|
+
# Also accepts a block which will be passed the job as soon as it
|
122
|
+
# has completed processing. Useful for testing.
|
123
|
+
def work(interval = 5.0, &block)
|
124
|
+
interval = Float(interval)
|
125
|
+
$0 = "resque: Starting"
|
126
|
+
startup
|
127
|
+
|
128
|
+
loop do
|
129
|
+
break if shutdown?
|
130
|
+
|
131
|
+
if not paused? and job = reserve
|
132
|
+
log "got: #{job.inspect}"
|
133
|
+
job.worker = self
|
134
|
+
run_hook :before_fork, job
|
135
|
+
working_on job
|
136
|
+
|
137
|
+
if @child = fork
|
138
|
+
srand # Reseeding
|
139
|
+
procline "Forked #{@child} at #{Time.now.to_i}"
|
140
|
+
Process.wait(@child)
|
141
|
+
else
|
142
|
+
procline "Processing #{job.queue} since #{Time.now.to_i}"
|
143
|
+
perform(job, &block)
|
144
|
+
exit! unless @cant_fork
|
145
|
+
end
|
146
|
+
|
147
|
+
done_working
|
148
|
+
@child = nil
|
149
|
+
else
|
150
|
+
break if interval.zero?
|
151
|
+
log! "Sleeping for #{interval} seconds"
|
152
|
+
procline paused? ? "Paused" : "Waiting for #{@queues.join(',')}"
|
153
|
+
sleep interval
|
154
|
+
end
|
155
|
+
end
|
156
|
+
|
157
|
+
ensure
|
158
|
+
unregister_worker
|
159
|
+
end
|
160
|
+
|
161
|
+
# DEPRECATED. Processes a single job. If none is given, it will
|
162
|
+
# try to produce one. Usually run in the child.
|
163
|
+
def process(job = nil, &block)
|
164
|
+
return unless job ||= reserve
|
165
|
+
|
166
|
+
job.worker = self
|
167
|
+
working_on job
|
168
|
+
perform(job, &block)
|
169
|
+
ensure
|
170
|
+
done_working
|
171
|
+
end
|
172
|
+
|
173
|
+
# Processes a given job in the child.
|
174
|
+
def perform(job)
|
175
|
+
begin
|
176
|
+
run_hook :after_fork, job
|
177
|
+
job.perform
|
178
|
+
rescue Object => e
|
179
|
+
log "#{job.inspect} failed: #{e.inspect}"
|
180
|
+
begin
|
181
|
+
job.fail(e)
|
182
|
+
rescue Object => e
|
183
|
+
log "Received exception when reporting failure: #{e.inspect}"
|
184
|
+
end
|
185
|
+
failed!
|
186
|
+
else
|
187
|
+
log "done: #{job.inspect}"
|
188
|
+
ensure
|
189
|
+
yield job if block_given?
|
190
|
+
end
|
191
|
+
end
|
192
|
+
|
193
|
+
# Attempts to grab a job off one of the provided queues. Returns
|
194
|
+
# nil if no job can be found.
|
195
|
+
def reserve
|
196
|
+
queues.each do |queue|
|
197
|
+
log! "Checking #{queue}"
|
198
|
+
if job = Resque.reserve(queue)
|
199
|
+
log! "Found job on #{queue}"
|
200
|
+
return job
|
201
|
+
end
|
202
|
+
end
|
203
|
+
|
204
|
+
nil
|
205
|
+
rescue Exception => e
|
206
|
+
log "Error reserving job: #{e.inspect}"
|
207
|
+
log e.backtrace.join("\n")
|
208
|
+
raise e
|
209
|
+
end
|
210
|
+
|
211
|
+
# Returns a list of queues to use when searching for a job.
|
212
|
+
# A splat ("*") means you want every queue (in alpha order) - this
|
213
|
+
# can be useful for dynamically adding new queues.
|
214
|
+
def queues
|
215
|
+
@queues.map {|queue| queue == "*" ? Resque.queues.sort : queue }.flatten.uniq
|
216
|
+
end
|
217
|
+
|
218
|
+
# Not every platform supports fork. Here we do our magic to
|
219
|
+
# determine if yours does.
|
220
|
+
def fork
|
221
|
+
@cant_fork = true if $TESTING
|
222
|
+
|
223
|
+
return if @cant_fork
|
224
|
+
|
225
|
+
begin
|
226
|
+
# IronRuby doesn't support `Kernel.fork` yet
|
227
|
+
if Kernel.respond_to?(:fork)
|
228
|
+
Kernel.fork
|
229
|
+
else
|
230
|
+
raise NotImplementedError
|
231
|
+
end
|
232
|
+
rescue NotImplementedError
|
233
|
+
@cant_fork = true
|
234
|
+
nil
|
235
|
+
end
|
236
|
+
end
|
237
|
+
|
238
|
+
# Runs all the methods needed when a worker begins its lifecycle.
|
239
|
+
def startup
|
240
|
+
enable_gc_optimizations
|
241
|
+
register_signal_handlers
|
242
|
+
prune_dead_workers
|
243
|
+
run_hook :before_first_fork
|
244
|
+
register_worker
|
245
|
+
|
246
|
+
# Fix buffering so we can `rake resque:work > resque.log` and
|
247
|
+
# get output from the child in there.
|
248
|
+
$stdout.sync = true
|
249
|
+
end
|
250
|
+
|
251
|
+
# Enables GC Optimizations if you're running REE.
|
252
|
+
# http://www.rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
|
253
|
+
def enable_gc_optimizations
|
254
|
+
if GC.respond_to?(:copy_on_write_friendly=)
|
255
|
+
GC.copy_on_write_friendly = true
|
256
|
+
end
|
257
|
+
end
|
258
|
+
|
259
|
+
# Registers the various signal handlers a worker responds to.
|
260
|
+
#
|
261
|
+
# TERM: Shutdown immediately, stop processing jobs.
|
262
|
+
# INT: Shutdown immediately, stop processing jobs.
|
263
|
+
# QUIT: Shutdown after the current job has finished processing.
|
264
|
+
# USR1: Kill the forked child immediately, continue processing jobs.
|
265
|
+
# USR2: Don't process any new jobs
|
266
|
+
# CONT: Start processing jobs again after a USR2
|
267
|
+
def register_signal_handlers
|
268
|
+
trap('TERM') { shutdown! }
|
269
|
+
trap('INT') { shutdown! }
|
270
|
+
|
271
|
+
begin
|
272
|
+
trap('QUIT') { shutdown }
|
273
|
+
trap('USR1') { kill_child }
|
274
|
+
trap('USR2') { pause_processing }
|
275
|
+
trap('CONT') { unpause_processing }
|
276
|
+
rescue ArgumentError
|
277
|
+
warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
|
278
|
+
end
|
279
|
+
|
280
|
+
log! "Registered signals"
|
281
|
+
end
|
282
|
+
|
283
|
+
# Schedule this worker for shutdown. Will finish processing the
|
284
|
+
# current job.
|
285
|
+
def shutdown
|
286
|
+
log 'Exiting...'
|
287
|
+
@shutdown = true
|
288
|
+
end
|
289
|
+
|
290
|
+
# Kill the child and shutdown immediately.
|
291
|
+
def shutdown!
|
292
|
+
shutdown
|
293
|
+
kill_child
|
294
|
+
end
|
295
|
+
|
296
|
+
# Should this worker shutdown as soon as current job is finished?
|
297
|
+
def shutdown?
|
298
|
+
@shutdown
|
299
|
+
end
|
300
|
+
|
301
|
+
# Kills the forked child immediately, without remorse. The job it
|
302
|
+
# is processing will not be completed.
|
303
|
+
def kill_child
|
304
|
+
if @child
|
305
|
+
log! "Killing child at #{@child}"
|
306
|
+
if system("ps -o pid,state -p #{@child}")
|
307
|
+
Process.kill("KILL", @child) rescue nil
|
308
|
+
else
|
309
|
+
log! "Child #{@child} not found, restarting."
|
310
|
+
shutdown
|
311
|
+
end
|
312
|
+
end
|
313
|
+
end
|
314
|
+
|
315
|
+
# are we paused?
|
316
|
+
def paused?
|
317
|
+
@paused
|
318
|
+
end
|
319
|
+
|
320
|
+
# Stop processing jobs after the current one has completed (if we're
|
321
|
+
# currently running one).
|
322
|
+
def pause_processing
|
323
|
+
log "USR2 received; pausing job processing"
|
324
|
+
@paused = true
|
325
|
+
end
|
326
|
+
|
327
|
+
# Start processing jobs again after a pause
|
328
|
+
def unpause_processing
|
329
|
+
log "CONT received; resuming job processing"
|
330
|
+
@paused = false
|
331
|
+
end
|
332
|
+
|
333
|
+
# Looks for any workers which should be running on this server
|
334
|
+
# and, if they're not, removes them from Redis.
|
335
|
+
#
|
336
|
+
# This is a form of garbage collection. If a server is killed by a
|
337
|
+
# hard shutdown, power failure, or something else beyond our
|
338
|
+
# control, the Resque workers will not die gracefully and therefore
|
339
|
+
# will leave stale state information in Redis.
|
340
|
+
#
|
341
|
+
# By checking the current Redis state against the actual
|
342
|
+
# environment, we can determine if Redis is old and clean it up a bit.
|
343
|
+
def prune_dead_workers
|
344
|
+
all_workers = Worker.all
|
345
|
+
known_workers = worker_pids unless all_workers.empty?
|
346
|
+
all_workers.each do |worker|
|
347
|
+
host, pid, queues = worker.id.split(':')
|
348
|
+
next unless host == hostname
|
349
|
+
next if known_workers.include?(pid)
|
350
|
+
log! "Pruning dead worker: #{worker}"
|
351
|
+
worker.unregister_worker
|
352
|
+
end
|
353
|
+
end
|
354
|
+
|
355
|
+
# Registers ourself as a worker. Useful when entering the worker
|
356
|
+
# lifecycle on startup.
|
357
|
+
def register_worker
|
358
|
+
redis.sadd(:workers, self)
|
359
|
+
started!
|
360
|
+
end
|
361
|
+
|
362
|
+
# Runs a named hook, passing along any arguments.
|
363
|
+
def run_hook(name, *args)
|
364
|
+
return unless hook = Resque.send(name)
|
365
|
+
msg = "Running #{name} hook"
|
366
|
+
msg << " with #{args.inspect}" if args.any?
|
367
|
+
log msg
|
368
|
+
|
369
|
+
args.any? ? hook.call(*args) : hook.call
|
370
|
+
end
|
371
|
+
|
372
|
+
# Unregisters ourself as a worker. Useful when shutting down.
|
373
|
+
def unregister_worker
|
374
|
+
# If we're still processing a job, make sure it gets logged as a
|
375
|
+
# failure.
|
376
|
+
if (hash = processing) && !hash.empty?
|
377
|
+
job = Job.new(hash['queue'], hash['payload'])
|
378
|
+
# Ensure the proper worker is attached to this job, even if
|
379
|
+
# it's not the precise instance that died.
|
380
|
+
job.worker = self
|
381
|
+
job.fail(DirtyExit.new)
|
382
|
+
end
|
383
|
+
|
384
|
+
redis.srem(:workers, self)
|
385
|
+
redis.del("worker:#{self}")
|
386
|
+
redis.del("worker:#{self}:started")
|
387
|
+
|
388
|
+
Stat.clear("processed:#{self}")
|
389
|
+
Stat.clear("failed:#{self}")
|
390
|
+
end
|
391
|
+
|
392
|
+
# Given a job, tells Redis we're working on it. Useful for seeing
|
393
|
+
# what workers are doing and when.
|
394
|
+
def working_on(job)
|
395
|
+
data = encode \
|
396
|
+
:queue => job.queue,
|
397
|
+
:run_at => Time.now.strftime("%Y/%m/%d %H:%M:%S %Z"),
|
398
|
+
:payload => job.payload
|
399
|
+
redis.set("worker:#{self}", data)
|
400
|
+
end
|
401
|
+
|
402
|
+
# Called when we are done working - clears our `working_on` state
|
403
|
+
# and tells Redis we processed a job.
|
404
|
+
def done_working
|
405
|
+
processed!
|
406
|
+
redis.del("worker:#{self}")
|
407
|
+
end
|
408
|
+
|
409
|
+
# How many jobs has this worker processed? Returns an int.
|
410
|
+
def processed
|
411
|
+
Stat["processed:#{self}"]
|
412
|
+
end
|
413
|
+
|
414
|
+
# Tell Redis we've processed a job.
|
415
|
+
def processed!
|
416
|
+
Stat << "processed"
|
417
|
+
Stat << "processed:#{self}"
|
418
|
+
end
|
419
|
+
|
420
|
+
# How many failed jobs has this worker seen? Returns an int.
|
421
|
+
def failed
|
422
|
+
Stat["failed:#{self}"]
|
423
|
+
end
|
424
|
+
|
425
|
+
# Tells Redis we've failed a job.
|
426
|
+
def failed!
|
427
|
+
Stat << "failed"
|
428
|
+
Stat << "failed:#{self}"
|
429
|
+
end
|
430
|
+
|
431
|
+
# What time did this worker start? Returns an instance of `Time`
|
432
|
+
def started
|
433
|
+
redis.get "worker:#{self}:started"
|
434
|
+
end
|
435
|
+
|
436
|
+
# Tell Redis we've started
|
437
|
+
def started!
|
438
|
+
redis.set("worker:#{self}:started", Time.now.to_s)
|
439
|
+
end
|
440
|
+
|
441
|
+
# Returns a hash explaining the Job we're currently processing, if any.
|
442
|
+
def job
|
443
|
+
decode(redis.get("worker:#{self}")) || {}
|
444
|
+
end
|
445
|
+
alias_method :processing, :job
|
446
|
+
|
447
|
+
# Boolean - true if working, false if not
|
448
|
+
def working?
|
449
|
+
state == :working
|
450
|
+
end
|
451
|
+
|
452
|
+
# Boolean - true if idle, false if not
|
453
|
+
def idle?
|
454
|
+
state == :idle
|
455
|
+
end
|
456
|
+
|
457
|
+
# Returns a symbol representing the current worker state,
|
458
|
+
# which can be either :working or :idle
|
459
|
+
def state
|
460
|
+
redis.exists("worker:#{self}") ? :working : :idle
|
461
|
+
end
|
462
|
+
|
463
|
+
# Is this worker the same as another worker?
|
464
|
+
def ==(other)
|
465
|
+
to_s == other.to_s
|
466
|
+
end
|
467
|
+
|
468
|
+
def inspect
|
469
|
+
"#<Worker #{to_s}>"
|
470
|
+
end
|
471
|
+
|
472
|
+
# The string representation is the same as the id for this worker
|
473
|
+
# instance. Can be used with `Worker.find`.
|
474
|
+
def to_s
|
475
|
+
@to_s ||= "#{hostname}:#{Process.pid}:#{@queues.join(',')}"
|
476
|
+
end
|
477
|
+
alias_method :id, :to_s
|
478
|
+
|
479
|
+
# chomp'd hostname of this machine
|
480
|
+
def hostname
|
481
|
+
@hostname ||= `hostname`.chomp
|
482
|
+
end
|
483
|
+
|
484
|
+
# Returns Integer PID of running worker
|
485
|
+
def pid
|
486
|
+
Process.pid
|
487
|
+
end
|
488
|
+
|
489
|
+
# Returns an Array of string pids of all the other workers on this
|
490
|
+
# machine. Useful when pruning dead workers on startup.
|
491
|
+
def worker_pids
|
492
|
+
if RUBY_PLATFORM =~ /solaris/
|
493
|
+
solaris_worker_pids
|
494
|
+
else
|
495
|
+
linux_worker_pids
|
496
|
+
end
|
497
|
+
end
|
498
|
+
|
499
|
+
# Find Resque worker pids on Linux and OS X.
|
500
|
+
#
|
501
|
+
# Returns an Array of string pids of all the other workers on this
|
502
|
+
# machine. Useful when pruning dead workers on startup.
|
503
|
+
def linux_worker_pids
|
504
|
+
`ps -A -o pid,command | grep "[r]esque" | grep -v "resque-web"`.split("\n").map do |line|
|
505
|
+
line.split(' ')[0]
|
506
|
+
end
|
507
|
+
end
|
508
|
+
|
509
|
+
# Find Resque worker pids on Solaris.
|
510
|
+
#
|
511
|
+
# Returns an Array of string pids of all the other workers on this
|
512
|
+
# machine. Useful when pruning dead workers on startup.
|
513
|
+
def solaris_worker_pids
|
514
|
+
`ps -A -o pid,comm | grep "[r]uby" | grep -v "resque-web"`.split("\n").map do |line|
|
515
|
+
real_pid = line.split(' ')[0]
|
516
|
+
pargs_command = `pargs -a #{real_pid} 2>/dev/null | grep [r]esque | grep -v "resque-web"`
|
517
|
+
if pargs_command.split(':')[1] == " resque-#{Resque::Version}"
|
518
|
+
real_pid
|
519
|
+
end
|
520
|
+
end.compact
|
521
|
+
end
|
522
|
+
|
523
|
+
# Given a string, sets the procline ($0) and logs.
|
524
|
+
# Procline is always in the format of:
|
525
|
+
# resque-VERSION: STRING
|
526
|
+
def procline(string)
|
527
|
+
$0 = "resque-#{Resque::Version}: #{string}"
|
528
|
+
log! $0
|
529
|
+
end
|
530
|
+
|
531
|
+
# Log a message to STDOUT if we are verbose or very_verbose.
|
532
|
+
def log(message)
|
533
|
+
if verbose
|
534
|
+
puts "*** #{message}"
|
535
|
+
elsif very_verbose
|
536
|
+
time = Time.now.strftime('%H:%M:%S %Y-%m-%d')
|
537
|
+
puts "** [#{time}] #$$: #{message}"
|
538
|
+
end
|
539
|
+
end
|
540
|
+
|
541
|
+
# Logs a very verbose message to STDOUT.
|
542
|
+
def log!(message)
|
543
|
+
log message if very_verbose
|
544
|
+
end
|
545
|
+
end
|
546
|
+
end
|