scotttam-resque 0.0.1
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +3 -0
- data/.kick +26 -0
- data/CONTRIBUTORS +23 -0
- data/HISTORY.md +80 -0
- data/LICENSE +20 -0
- data/README.markdown +767 -0
- data/Rakefile +66 -0
- data/bin/resque +57 -0
- data/bin/resque-web +18 -0
- data/config.ru +14 -0
- data/deps.rip +6 -0
- data/examples/async_helper.rb +31 -0
- data/examples/demo/README.markdown +71 -0
- data/examples/demo/Rakefile +3 -0
- data/examples/demo/app.rb +38 -0
- data/examples/demo/config.ru +19 -0
- data/examples/demo/job.rb +22 -0
- data/examples/god/resque.god +53 -0
- data/examples/god/stale.god +26 -0
- data/examples/instance.rb +11 -0
- data/examples/simple.rb +30 -0
- data/init.rb +1 -0
- data/lib/resque/errors.rb +7 -0
- data/lib/resque/failure/base.rb +58 -0
- data/lib/resque/failure/hoptoad.rb +121 -0
- data/lib/resque/failure/multiple.rb +44 -0
- data/lib/resque/failure/redis.rb +33 -0
- data/lib/resque/failure.rb +63 -0
- data/lib/resque/helpers.rb +57 -0
- data/lib/resque/job.rb +146 -0
- data/lib/resque/server/public/idle.png +0 -0
- data/lib/resque/server/public/jquery-1.3.2.min.js +19 -0
- data/lib/resque/server/public/jquery.relatize_date.js +95 -0
- data/lib/resque/server/public/poll.png +0 -0
- data/lib/resque/server/public/ranger.js +24 -0
- data/lib/resque/server/public/reset.css +48 -0
- data/lib/resque/server/public/style.css +76 -0
- data/lib/resque/server/public/working.png +0 -0
- data/lib/resque/server/views/error.erb +1 -0
- data/lib/resque/server/views/failed.erb +35 -0
- data/lib/resque/server/views/key.erb +17 -0
- data/lib/resque/server/views/layout.erb +38 -0
- data/lib/resque/server/views/next_more.erb +10 -0
- data/lib/resque/server/views/overview.erb +4 -0
- data/lib/resque/server/views/queues.erb +46 -0
- data/lib/resque/server/views/stats.erb +62 -0
- data/lib/resque/server/views/workers.erb +78 -0
- data/lib/resque/server/views/working.erb +69 -0
- data/lib/resque/server.rb +187 -0
- data/lib/resque/stat.rb +53 -0
- data/lib/resque/tasks.rb +39 -0
- data/lib/resque/version.rb +3 -0
- data/lib/resque/worker.rb +453 -0
- data/lib/resque.rb +246 -0
- data/tasks/redis.rake +135 -0
- data/tasks/resque.rake +2 -0
- data/test/redis-test.conf +132 -0
- data/test/resque_test.rb +220 -0
- data/test/test_helper.rb +96 -0
- data/test/worker_test.rb +260 -0
- metadata +172 -0
@@ -0,0 +1,453 @@
|
|
1
|
+
module Resque
|
2
|
+
# A Resque Worker processes jobs. On platforms that support fork(2),
|
3
|
+
# the worker will fork off a child to process each job. This ensures
|
4
|
+
# a clean slate when beginning the next job and cuts down on gradual
|
5
|
+
# memory growth as well as low level failures.
|
6
|
+
#
|
7
|
+
# It also ensures workers are always listening to signals from you,
|
8
|
+
# their master, and can react accordingly.
|
9
|
+
class Worker
|
10
|
+
include Resque::Helpers
|
11
|
+
extend Resque::Helpers
|
12
|
+
|
13
|
+
# Whether the worker should log basic info to STDOUT
|
14
|
+
attr_accessor :verbose
|
15
|
+
|
16
|
+
# Whether the worker should log lots of info to STDOUT
|
17
|
+
attr_accessor :very_verbose
|
18
|
+
|
19
|
+
# Boolean indicating whether this worker can or can not fork.
|
20
|
+
# Automatically set if a fork(2) fails.
|
21
|
+
attr_accessor :cant_fork
|
22
|
+
|
23
|
+
attr_writer :to_s
|
24
|
+
|
25
|
+
# Returns an array of all worker objects.
|
26
|
+
def self.all
|
27
|
+
redis.smembers(:workers).map { |id| find(id) }
|
28
|
+
end
|
29
|
+
|
30
|
+
# Returns an array of all worker objects currently processing
|
31
|
+
# jobs.
|
32
|
+
def self.working
|
33
|
+
names = all
|
34
|
+
return [] unless names.any?
|
35
|
+
names.map! { |name| "worker:#{name}" }
|
36
|
+
redis.mapped_mget(*names).keys.map do |key|
|
37
|
+
find key.sub("worker:", '')
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# Returns a single worker object. Accepts a string id.
|
42
|
+
def self.find(worker_id)
|
43
|
+
if exists? worker_id
|
44
|
+
queues = worker_id.split(':')[-1].split(',')
|
45
|
+
worker = new(*queues)
|
46
|
+
worker.to_s = worker_id
|
47
|
+
worker
|
48
|
+
else
|
49
|
+
nil
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
# Alias of `find`
|
54
|
+
def self.attach(worker_id)
|
55
|
+
find(worker_id)
|
56
|
+
end
|
57
|
+
|
58
|
+
# Given a string worker id, return a boolean indicating whether the
|
59
|
+
# worker exists
|
60
|
+
def self.exists?(worker_id)
|
61
|
+
redis.sismember(:workers, worker_id)
|
62
|
+
end
|
63
|
+
|
64
|
+
#Sets the before_hook proc
|
65
|
+
def self.before_fork=(before_fork)
|
66
|
+
@@before_fork = before_fork
|
67
|
+
end
|
68
|
+
|
69
|
+
# Workers should be initialized with an array of string queue
|
70
|
+
# names. The order is important: a Worker will check the first
|
71
|
+
# queue given for a job. If none is found, it will check the
|
72
|
+
# second queue name given. If a job is found, it will be
|
73
|
+
# processed. Upon completion, the Worker will again check the
|
74
|
+
# first queue given, and so forth. In this way the queue list
|
75
|
+
# passed to a Worker on startup defines the priorities of queues.
|
76
|
+
#
|
77
|
+
# If passed a single "*", this Worker will operate on all queues
|
78
|
+
# in alphabetical order. Queues can be dynamically added or
|
79
|
+
# removed without needing to restart workers using this method.
|
80
|
+
def initialize(*queues)
|
81
|
+
@queues = queues
|
82
|
+
validate_queues
|
83
|
+
end
|
84
|
+
|
85
|
+
# A worker must be given a queue, otherwise it won't know what to
|
86
|
+
# do with itself.
|
87
|
+
#
|
88
|
+
# You probably never need to call this.
|
89
|
+
def validate_queues
|
90
|
+
if @queues.nil? || @queues.empty?
|
91
|
+
raise NoQueueError.new("Please give each worker at least one queue.")
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
# This is the main workhorse method. Called on a Worker instance,
|
96
|
+
# it begins the worker life cycle.
|
97
|
+
#
|
98
|
+
# The following events occur during a worker's life cycle:
|
99
|
+
#
|
100
|
+
# 1. Startup: Signals are registered, dead workers are pruned,
|
101
|
+
# and this worker is registered.
|
102
|
+
# 2. Work loop: Jobs are pulled from a queue and processed.
|
103
|
+
# 3. Teardown: This worker is unregistered.
|
104
|
+
#
|
105
|
+
# Can be passed an integer representing the polling frequency.
|
106
|
+
# The default is 5 seconds, but for a semi-active site you may
|
107
|
+
# want to use a smaller value.
|
108
|
+
#
|
109
|
+
# Also accepts a block which will be passed the job as soon as it
|
110
|
+
# has completed processing. Useful for testing.
|
111
|
+
def work(interval = 5, &block)
|
112
|
+
$0 = "resque: Starting"
|
113
|
+
startup
|
114
|
+
|
115
|
+
loop do
|
116
|
+
break if @shutdown
|
117
|
+
|
118
|
+
if not @paused and job = reserve
|
119
|
+
log "got: #{job.inspect}"
|
120
|
+
|
121
|
+
if @child = fork
|
122
|
+
rand # Reseeding
|
123
|
+
procline "Forked #{@child} at #{Time.now.to_i}"
|
124
|
+
Process.wait
|
125
|
+
else
|
126
|
+
procline "Processing #{job.queue} since #{Time.now.to_i}"
|
127
|
+
process(job, &block)
|
128
|
+
exit! unless @cant_fork
|
129
|
+
end
|
130
|
+
|
131
|
+
@child = nil
|
132
|
+
else
|
133
|
+
break if interval.to_i == 0
|
134
|
+
log! "Sleeping for #{interval.to_i}"
|
135
|
+
procline @paused ? "Paused" : "Waiting for #{@queues.join(',')}"
|
136
|
+
sleep interval.to_i
|
137
|
+
end
|
138
|
+
end
|
139
|
+
|
140
|
+
ensure
|
141
|
+
unregister_worker
|
142
|
+
end
|
143
|
+
|
144
|
+
# Processes a single job. If none is given, it will try to produce
|
145
|
+
# one.
|
146
|
+
def process(job = nil)
|
147
|
+
return unless job ||= reserve
|
148
|
+
|
149
|
+
begin
|
150
|
+
working_on job
|
151
|
+
job.perform
|
152
|
+
rescue Object => e
|
153
|
+
log "#{job.inspect} failed: #{e.inspect}"
|
154
|
+
job.fail(e)
|
155
|
+
failed!
|
156
|
+
else
|
157
|
+
log "done: #{job.inspect}"
|
158
|
+
ensure
|
159
|
+
yield job if block_given?
|
160
|
+
done_working
|
161
|
+
end
|
162
|
+
end
|
163
|
+
|
164
|
+
# Attempts to grab a job off one of the provided queues. Returns
|
165
|
+
# nil if no job can be found.
|
166
|
+
def reserve
|
167
|
+
queues.each do |queue|
|
168
|
+
log! "Checking #{queue}"
|
169
|
+
if job = Resque::Job.reserve(queue)
|
170
|
+
log! "Found job on #{queue}"
|
171
|
+
return job
|
172
|
+
end
|
173
|
+
end
|
174
|
+
|
175
|
+
nil
|
176
|
+
end
|
177
|
+
|
178
|
+
# Returns a list of queues to use when searching for a job.
|
179
|
+
# A splat ("*") means you want every queue (in alpha order) - this
|
180
|
+
# can be useful for dynamically adding new queues.
|
181
|
+
def queues
|
182
|
+
@queues[0] == "*" ? Resque.queues.sort : @queues
|
183
|
+
end
|
184
|
+
|
185
|
+
# Not every platform supports fork. Here we do our magic to
|
186
|
+
# determine if yours does.
|
187
|
+
def fork
|
188
|
+
@cant_fork = true if $TESTING
|
189
|
+
|
190
|
+
return if @cant_fork
|
191
|
+
|
192
|
+
begin
|
193
|
+
# IronRuby doesn't support `Kernel.fork` yet
|
194
|
+
if Kernel.respond_to?(:fork)
|
195
|
+
Kernel.fork
|
196
|
+
else
|
197
|
+
raise NotImplementedError
|
198
|
+
end
|
199
|
+
rescue NotImplementedError
|
200
|
+
@cant_fork = true
|
201
|
+
nil
|
202
|
+
end
|
203
|
+
end
|
204
|
+
|
205
|
+
# Runs all the methods needed when a worker begins its lifecycle.
|
206
|
+
def startup
|
207
|
+
enable_gc_optimizations
|
208
|
+
register_signal_handlers
|
209
|
+
prune_dead_workers
|
210
|
+
before_fork
|
211
|
+
register_worker
|
212
|
+
end
|
213
|
+
|
214
|
+
# Enables GC Optimizations if you're running REE.
|
215
|
+
# http://www.rubyenterpriseedition.com/faq.html#adapt_apps_for_cow
|
216
|
+
def enable_gc_optimizations
|
217
|
+
if GC.respond_to?(:copy_on_write_friendly=)
|
218
|
+
GC.copy_on_write_friendly = true
|
219
|
+
end
|
220
|
+
end
|
221
|
+
|
222
|
+
# Registers the various signal handlers a worker responds to.
|
223
|
+
#
|
224
|
+
# TERM: Shutdown immediately, stop processing jobs.
|
225
|
+
# INT: Shutdown immediately, stop processing jobs.
|
226
|
+
# QUIT: Shutdown after the current job has finished processing.
|
227
|
+
# USR1: Kill the forked child immediately, continue processing jobs.
|
228
|
+
# USR2: Don't process any new jobs
|
229
|
+
# CONT: Start processing jobs again after a USR2
|
230
|
+
def register_signal_handlers
|
231
|
+
trap('TERM') { shutdown! }
|
232
|
+
trap('INT') { shutdown! }
|
233
|
+
|
234
|
+
begin
|
235
|
+
trap('QUIT') { shutdown }
|
236
|
+
trap('USR1') { kill_child }
|
237
|
+
trap('USR2') { pause_processing }
|
238
|
+
trap('CONT') { unpause_processing }
|
239
|
+
rescue ArgumentError
|
240
|
+
warn "Signals QUIT, USR1, USR2, and/or CONT not supported."
|
241
|
+
end
|
242
|
+
|
243
|
+
log! "Registered signals"
|
244
|
+
end
|
245
|
+
|
246
|
+
# Schedule this worker for shutdown. Will finish processing the
|
247
|
+
# current job.
|
248
|
+
def shutdown
|
249
|
+
log 'Exiting...'
|
250
|
+
@shutdown = true
|
251
|
+
end
|
252
|
+
|
253
|
+
# Kill the child and shutdown immediately.
|
254
|
+
def shutdown!
|
255
|
+
shutdown
|
256
|
+
kill_child
|
257
|
+
end
|
258
|
+
|
259
|
+
# Kills the forked child immediately, without remorse. The job it
|
260
|
+
# is processing will not be completed.
|
261
|
+
def kill_child
|
262
|
+
if @child
|
263
|
+
log! "Killing child at #{@child}"
|
264
|
+
if system("ps -o pid,state -p #{@child}")
|
265
|
+
Process.kill("KILL", @child) rescue nil
|
266
|
+
else
|
267
|
+
log! "Child #{@child} not found, restarting."
|
268
|
+
shutdown
|
269
|
+
end
|
270
|
+
end
|
271
|
+
end
|
272
|
+
|
273
|
+
# Stop processing jobs after the current one has completed (if we're
|
274
|
+
# currently running one).
|
275
|
+
def pause_processing
|
276
|
+
log "USR2 received; pausing job processing"
|
277
|
+
@paused = true
|
278
|
+
end
|
279
|
+
|
280
|
+
# Start processing jobs again after a pause
|
281
|
+
def unpause_processing
|
282
|
+
log "CONT received; resuming job processing"
|
283
|
+
@paused = false
|
284
|
+
end
|
285
|
+
|
286
|
+
# Looks for any workers which should be running on this server
|
287
|
+
# and, if they're not, removes them from Redis.
|
288
|
+
#
|
289
|
+
# This is a form of garbage collection. If a server is killed by a
|
290
|
+
# hard shutdown, power failure, or something else beyond our
|
291
|
+
# control, the Resque workers will not die gracefully and therefore
|
292
|
+
# will leave stale state information in Redis.
|
293
|
+
#
|
294
|
+
# By checking the current Redis state against the actual
|
295
|
+
# environment, we can determine if Redis is old and clean it up a bit.
|
296
|
+
def prune_dead_workers
|
297
|
+
all_workers = Worker.all
|
298
|
+
known_workers = worker_pids unless all_workers.empty?
|
299
|
+
all_workers.each do |worker|
|
300
|
+
host, pid, queues = worker.id.split(':')
|
301
|
+
next unless host == hostname
|
302
|
+
next if known_workers.include?(pid)
|
303
|
+
log! "Pruning dead worker: #{worker}"
|
304
|
+
worker.unregister_worker
|
305
|
+
end
|
306
|
+
end
|
307
|
+
|
308
|
+
# Registers ourself as a worker. Useful when entering the worker
|
309
|
+
# lifecycle on startup.
|
310
|
+
def register_worker
|
311
|
+
redis.sadd(:workers, self)
|
312
|
+
started!
|
313
|
+
end
|
314
|
+
|
315
|
+
#Call any before_fork procs, if any
|
316
|
+
def before_fork
|
317
|
+
@@before_fork.call if Worker.class_variable_defined?(:@@before_fork)
|
318
|
+
end
|
319
|
+
|
320
|
+
# Unregisters ourself as a worker. Useful when shutting down.
|
321
|
+
def unregister_worker
|
322
|
+
redis.srem(:workers, self)
|
323
|
+
redis.del("worker:#{self}:started")
|
324
|
+
|
325
|
+
Stat.clear("processed:#{self}")
|
326
|
+
Stat.clear("failed:#{self}")
|
327
|
+
end
|
328
|
+
|
329
|
+
# Given a job, tells Redis we're working on it. Useful for seeing
|
330
|
+
# what workers are doing and when.
|
331
|
+
def working_on(job)
|
332
|
+
job.worker = self
|
333
|
+
data = encode \
|
334
|
+
:queue => job.queue,
|
335
|
+
:run_at => Time.now.to_s,
|
336
|
+
:payload => job.payload
|
337
|
+
redis.set("worker:#{self}", data)
|
338
|
+
end
|
339
|
+
|
340
|
+
# Called when we are done working - clears our `working_on` state
|
341
|
+
# and tells Redis we processed a job.
|
342
|
+
def done_working
|
343
|
+
processed!
|
344
|
+
redis.del("worker:#{self}")
|
345
|
+
end
|
346
|
+
|
347
|
+
# How many jobs has this worker processed? Returns an int.
|
348
|
+
def processed
|
349
|
+
Stat["processed:#{self}"]
|
350
|
+
end
|
351
|
+
|
352
|
+
# Tell Redis we've processed a job.
|
353
|
+
def processed!
|
354
|
+
Stat << "processed"
|
355
|
+
Stat << "processed:#{self}"
|
356
|
+
end
|
357
|
+
|
358
|
+
# How many failed jobs has this worker seen? Returns an int.
|
359
|
+
def failed
|
360
|
+
Stat["failed:#{self}"]
|
361
|
+
end
|
362
|
+
|
363
|
+
# Tells Redis we've failed a job.
|
364
|
+
def failed!
|
365
|
+
Stat << "failed"
|
366
|
+
Stat << "failed:#{self}"
|
367
|
+
end
|
368
|
+
|
369
|
+
# What time did this worker start? Returns an instance of `Time`
|
370
|
+
def started
|
371
|
+
redis.get "worker:#{self}:started"
|
372
|
+
end
|
373
|
+
|
374
|
+
# Tell Redis we've started
|
375
|
+
def started!
|
376
|
+
redis.set("worker:#{self}:started", Time.now.to_s)
|
377
|
+
end
|
378
|
+
|
379
|
+
# Returns a hash explaining the Job we're currently processing, if any.
|
380
|
+
def job
|
381
|
+
decode(redis.get("worker:#{self}")) || {}
|
382
|
+
end
|
383
|
+
alias_method :processing, :job
|
384
|
+
|
385
|
+
# Boolean - true if working, false if not
|
386
|
+
def working?
|
387
|
+
state == :working
|
388
|
+
end
|
389
|
+
|
390
|
+
# Boolean - true if idle, false if not
|
391
|
+
def idle?
|
392
|
+
state == :idle
|
393
|
+
end
|
394
|
+
|
395
|
+
# Returns a symbol representing the current worker state,
|
396
|
+
# which can be either :working or :idle
|
397
|
+
def state
|
398
|
+
redis.exists("worker:#{self}") ? :working : :idle
|
399
|
+
end
|
400
|
+
|
401
|
+
# Is this worker the same as another worker?
|
402
|
+
def ==(other)
|
403
|
+
to_s == other.to_s
|
404
|
+
end
|
405
|
+
|
406
|
+
def inspect
|
407
|
+
"#<Worker #{to_s}>"
|
408
|
+
end
|
409
|
+
|
410
|
+
# The string representation is the same as the id for this worker
|
411
|
+
# instance. Can be used with `Worker.find`.
|
412
|
+
def to_s
|
413
|
+
@to_s ||= "#{hostname}:#{Process.pid}:#{@queues.join(',')}"
|
414
|
+
end
|
415
|
+
alias_method :id, :to_s
|
416
|
+
|
417
|
+
# chomp'd hostname of this machine
|
418
|
+
def hostname
|
419
|
+
@hostname ||= `hostname`.chomp
|
420
|
+
end
|
421
|
+
|
422
|
+
# Returns an array of string pids of all the other workers on this
|
423
|
+
# machine. Useful when pruning dead workers on startup.
|
424
|
+
def worker_pids
|
425
|
+
`ps -A -o pid,command | grep [r]esque`.split("\n").map do |line|
|
426
|
+
line.split(' ')[0]
|
427
|
+
end
|
428
|
+
end
|
429
|
+
|
430
|
+
# Given a string, sets the procline ($0) and logs.
|
431
|
+
# Procline is always in the format of:
|
432
|
+
# resque-VERSION: STRING
|
433
|
+
def procline(string)
|
434
|
+
$0 = "resque-#{Resque::Version}: #{string}"
|
435
|
+
log! $0
|
436
|
+
end
|
437
|
+
|
438
|
+
# Log a message to STDOUT if we are verbose or very_verbose.
|
439
|
+
def log(message)
|
440
|
+
if verbose
|
441
|
+
puts "*** #{message}"
|
442
|
+
elsif very_verbose
|
443
|
+
time = Time.now.strftime('%I:%M:%S %Y-%m-%d')
|
444
|
+
puts "** [#{time}] #$$: #{message}"
|
445
|
+
end
|
446
|
+
end
|
447
|
+
|
448
|
+
# Logs a very verbose message to STDOUT.
|
449
|
+
def log!(message)
|
450
|
+
log message if very_verbose
|
451
|
+
end
|
452
|
+
end
|
453
|
+
end
|
data/lib/resque.rb
ADDED
@@ -0,0 +1,246 @@
|
|
1
|
+
require 'redis/namespace'
|
2
|
+
|
3
|
+
begin
|
4
|
+
require 'yajl'
|
5
|
+
rescue LoadError
|
6
|
+
require 'json'
|
7
|
+
end
|
8
|
+
|
9
|
+
require 'resque/version'
|
10
|
+
|
11
|
+
require 'resque/errors'
|
12
|
+
|
13
|
+
require 'resque/failure'
|
14
|
+
require 'resque/failure/base'
|
15
|
+
|
16
|
+
require 'resque/helpers'
|
17
|
+
require 'resque/stat'
|
18
|
+
require 'resque/job'
|
19
|
+
require 'resque/worker'
|
20
|
+
|
21
|
+
module Resque
|
22
|
+
include Helpers
|
23
|
+
extend self
|
24
|
+
|
25
|
+
# Accepts:
|
26
|
+
# 1. A 'hostname:port' string
|
27
|
+
# 2. A 'hostname:port:db' string (to select the Redis db)
|
28
|
+
# 3. An instance of `Redis`
|
29
|
+
def redis=(server)
|
30
|
+
case server
|
31
|
+
when String
|
32
|
+
host, port, db = server.split(':')
|
33
|
+
redis = Redis.new(:host => host, :port => port,
|
34
|
+
:thread_safe => true, :db => db)
|
35
|
+
@redis = Redis::Namespace.new(:resque, :redis => redis)
|
36
|
+
when Redis
|
37
|
+
@redis = Redis::Namespace.new(:resque, :redis => server)
|
38
|
+
else
|
39
|
+
raise "I don't know what to do with #{server.inspect}"
|
40
|
+
end
|
41
|
+
end
|
42
|
+
|
43
|
+
# Returns the current Redis connection. If none has been created, will
|
44
|
+
# create a new one.
|
45
|
+
def redis
|
46
|
+
return @redis if @redis
|
47
|
+
self.redis = 'localhost:6379'
|
48
|
+
self.redis
|
49
|
+
end
|
50
|
+
|
51
|
+
#Set a proc that will be called once before the worker forks
|
52
|
+
def before_fork=(before_fork)
|
53
|
+
@before_fork = before_fork
|
54
|
+
end
|
55
|
+
|
56
|
+
#Returns the before_fork proc
|
57
|
+
def before_fork
|
58
|
+
@before_fork
|
59
|
+
end
|
60
|
+
|
61
|
+
def to_s
|
62
|
+
"Resque Client connected to #{redis.server}"
|
63
|
+
end
|
64
|
+
|
65
|
+
|
66
|
+
#
|
67
|
+
# queue manipulation
|
68
|
+
#
|
69
|
+
|
70
|
+
# Pushes a job onto a queue. Queue name should be a string and the
|
71
|
+
# item should be any JSON-able Ruby object.
|
72
|
+
def push(queue, item)
|
73
|
+
watch_queue(queue)
|
74
|
+
redis.rpush "queue:#{queue}", encode(item)
|
75
|
+
end
|
76
|
+
|
77
|
+
# Pops a job off a queue. Queue name should be a string.
|
78
|
+
#
|
79
|
+
# Returns a Ruby object.
|
80
|
+
def pop(queue)
|
81
|
+
decode redis.lpop("queue:#{queue}")
|
82
|
+
end
|
83
|
+
|
84
|
+
# Returns an int representing the size of a queue.
|
85
|
+
# Queue name should be a string.
|
86
|
+
def size(queue)
|
87
|
+
redis.llen("queue:#{queue}").to_i
|
88
|
+
end
|
89
|
+
|
90
|
+
# Returns an array of items currently queued. Queue name should be
|
91
|
+
# a string.
|
92
|
+
#
|
93
|
+
# start and count should be integer and can be used for pagination.
|
94
|
+
# start is the item to begin, count is how many items to return.
|
95
|
+
#
|
96
|
+
# To get the 3rd page of a 30 item, paginatied list one would use:
|
97
|
+
# Resque.peek('my_list', 59, 30)
|
98
|
+
def peek(queue, start = 0, count = 1)
|
99
|
+
list_range("queue:#{queue}", start, count)
|
100
|
+
end
|
101
|
+
|
102
|
+
# Does the dirty work of fetching a range of items from a Redis list
|
103
|
+
# and converting them into Ruby objects.
|
104
|
+
def list_range(key, start = 0, count = 1)
|
105
|
+
if count == 1
|
106
|
+
decode redis.lindex(key, start)
|
107
|
+
else
|
108
|
+
Array(redis.lrange(key, start, start+count-1)).map do |item|
|
109
|
+
decode item
|
110
|
+
end
|
111
|
+
end
|
112
|
+
end
|
113
|
+
|
114
|
+
# Returns an array of all known Resque queues as strings.
|
115
|
+
def queues
|
116
|
+
redis.smembers(:queues)
|
117
|
+
end
|
118
|
+
|
119
|
+
# Given a queue name, completely deletes the queue.
|
120
|
+
def remove_queue(queue)
|
121
|
+
redis.srem(:queues, queue.to_s)
|
122
|
+
redis.del("queue:#{queue}")
|
123
|
+
end
|
124
|
+
|
125
|
+
# Used internally to keep track of which queues we've created.
|
126
|
+
# Don't call this directly.
|
127
|
+
def watch_queue(queue)
|
128
|
+
redis.sadd(:queues, queue.to_s)
|
129
|
+
end
|
130
|
+
|
131
|
+
|
132
|
+
#
|
133
|
+
# job shortcuts
|
134
|
+
#
|
135
|
+
|
136
|
+
# This method can be used to conveniently add a job to a queue.
|
137
|
+
# It assumes the class you're passing it is a real Ruby class (not
|
138
|
+
# a string or reference) which either:
|
139
|
+
#
|
140
|
+
# a) has a @queue ivar set
|
141
|
+
# b) responds to `queue`
|
142
|
+
#
|
143
|
+
# If either of those conditions are met, it will use the value obtained
|
144
|
+
# from performing one of the above operations to determine the queue.
|
145
|
+
#
|
146
|
+
# If no queue can be inferred this method will raise a `Resque::NoQueueError`
|
147
|
+
#
|
148
|
+
# This method is considered part of the `stable` API.
|
149
|
+
def enqueue(klass, *args)
|
150
|
+
Job.create(queue_from_class(klass), klass, *args)
|
151
|
+
end
|
152
|
+
|
153
|
+
# This method can be used to conveniently remove a job from a queue.
|
154
|
+
# It assumes the class you're passing it is a real Ruby class (not
|
155
|
+
# a string or reference) which either:
|
156
|
+
#
|
157
|
+
# a) has a @queue ivar set
|
158
|
+
# b) responds to `queue`
|
159
|
+
#
|
160
|
+
# If either of those conditions are met, it will use the value obtained
|
161
|
+
# from performing one of the above operations to determine the queue.
|
162
|
+
#
|
163
|
+
# If no queue can be inferred this method will raise a `Resque::NoQueueError`
|
164
|
+
#
|
165
|
+
# If no args are given, this method will dequeue *all* jobs matching
|
166
|
+
# the provided class. See `Resque::Job.destroy` for more
|
167
|
+
# information.
|
168
|
+
#
|
169
|
+
# Returns the number of jobs destroyed.
|
170
|
+
#
|
171
|
+
# Example:
|
172
|
+
#
|
173
|
+
# # Removes all jobs of class `UpdateNetworkGraph`
|
174
|
+
# Resque.dequeue(GitHub::Jobs::UpdateNetworkGraph)
|
175
|
+
#
|
176
|
+
# # Removes all jobs of class `UpdateNetworkGraph` with matching args.
|
177
|
+
# Resque.dequeue(GitHub::Jobs::UpdateNetworkGraph, 'repo:135325')
|
178
|
+
#
|
179
|
+
# This method is considered part of the `stable` API.
|
180
|
+
def dequeue(klass, *args)
|
181
|
+
Job.destroy(queue_from_class(klass), klass, *args)
|
182
|
+
end
|
183
|
+
|
184
|
+
# Given a class, try to extrapolate an appropriate queue based on a
|
185
|
+
# class instance variable or `queue` method.
|
186
|
+
def queue_from_class(klass)
|
187
|
+
klass.instance_variable_get(:@queue) ||
|
188
|
+
(klass.respond_to?(:queue) and klass.queue)
|
189
|
+
end
|
190
|
+
|
191
|
+
# This method will return a `Resque::Job` object or a non-true value
|
192
|
+
# depending on whether a job can be obtained. You should pass it the
|
193
|
+
# precise name of a queue: case matters.
|
194
|
+
#
|
195
|
+
# This method is considered part of the `stable` API.
|
196
|
+
def reserve(queue)
|
197
|
+
Job.reserve(queue)
|
198
|
+
end
|
199
|
+
|
200
|
+
|
201
|
+
#
|
202
|
+
# worker shortcuts
|
203
|
+
#
|
204
|
+
|
205
|
+
# A shortcut to Worker.all
|
206
|
+
def workers
|
207
|
+
Worker.all
|
208
|
+
end
|
209
|
+
|
210
|
+
# A shortcut to Worker.working
|
211
|
+
def working
|
212
|
+
Worker.working
|
213
|
+
end
|
214
|
+
|
215
|
+
# A shortcut to unregister_worker
|
216
|
+
# useful for command line tool
|
217
|
+
def remove_worker(worker_id)
|
218
|
+
worker = Resque::Worker.find(worker_id)
|
219
|
+
worker.unregister_worker
|
220
|
+
end
|
221
|
+
|
222
|
+
#
|
223
|
+
# stats
|
224
|
+
#
|
225
|
+
|
226
|
+
# Returns a hash, similar to redis-rb's #info, of interesting stats.
|
227
|
+
def info
|
228
|
+
return {
|
229
|
+
:pending => queues.inject(0) { |m,k| m + size(k) },
|
230
|
+
:processed => Stat[:processed],
|
231
|
+
:queues => queues.size,
|
232
|
+
:workers => workers.size.to_i,
|
233
|
+
:working => working.size,
|
234
|
+
:failed => Stat[:failed],
|
235
|
+
:servers => [redis.server]
|
236
|
+
}
|
237
|
+
end
|
238
|
+
|
239
|
+
# Returns an array of all known Resque keys in Redis. Redis' KEYS operation
|
240
|
+
# is O(N) for the keyspace, so be careful - this can be slow for big databases.
|
241
|
+
def keys
|
242
|
+
redis.keys("*").map do |key|
|
243
|
+
key.sub('resque:', '')
|
244
|
+
end
|
245
|
+
end
|
246
|
+
end
|