delayed_job_with_named_queues 2.0.7.1
Sign up to get free protection for your applications and to get access to all the features.
- data/MIT-LICENSE +20 -0
- data/README.textile +248 -0
- data/contrib/delayed_job.monitrc +14 -0
- data/contrib/delayed_job_multiple.monitrc +23 -0
- data/generators/delayed_job/delayed_job_generator.rb +22 -0
- data/generators/delayed_job/templates/migration.rb +22 -0
- data/generators/delayed_job/templates/script +5 -0
- data/lib/delayed/backend/active_record.rb +93 -0
- data/lib/delayed/backend/base.rb +130 -0
- data/lib/delayed/backend/data_mapper.rb +125 -0
- data/lib/delayed/backend/mongo_mapper.rb +110 -0
- data/lib/delayed/command.rb +114 -0
- data/lib/delayed/message_sending.rb +53 -0
- data/lib/delayed/performable_method.rb +62 -0
- data/lib/delayed/railtie.rb +10 -0
- data/lib/delayed/recipes.rb +50 -0
- data/lib/delayed/tasks.rb +15 -0
- data/lib/delayed/worker.rb +178 -0
- data/lib/delayed_job.rb +14 -0
- data/rails/init.rb +5 -0
- data/recipes/delayed_job.rb +1 -0
- data/spec/backend/active_record_job_spec.rb +46 -0
- data/spec/backend/data_mapper_job_spec.rb +16 -0
- data/spec/backend/mongo_mapper_job_spec.rb +94 -0
- data/spec/backend/shared_backend_spec.rb +372 -0
- data/spec/delayed_method_spec.rb +46 -0
- data/spec/message_sending_spec.rb +89 -0
- data/spec/performable_method_spec.rb +53 -0
- data/spec/sample_jobs.rb +26 -0
- data/spec/setup/active_record.rb +34 -0
- data/spec/setup/data_mapper.rb +8 -0
- data/spec/setup/mongo_mapper.rb +17 -0
- data/spec/spec_helper.rb +28 -0
- data/spec/story_spec.rb +17 -0
- data/spec/worker_spec.rb +237 -0
- data/tasks/jobs.rake +1 -0
- metadata +327 -0
@@ -0,0 +1,130 @@
|
|
1
|
+
module Delayed
|
2
|
+
module Backend
|
3
|
+
class DeserializationError < StandardError
|
4
|
+
end
|
5
|
+
|
6
|
+
module Base
|
7
|
+
def self.included(base)
|
8
|
+
base.extend ClassMethods
|
9
|
+
end
|
10
|
+
|
11
|
+
module ClassMethods
|
12
|
+
# Add a job to the queue
|
13
|
+
def enqueue(*args)
|
14
|
+
object = args.shift
|
15
|
+
unless object.respond_to?(:perform)
|
16
|
+
raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
|
17
|
+
end
|
18
|
+
|
19
|
+
priority = args.first || Delayed::Worker.default_priority
|
20
|
+
run_at = args[1]
|
21
|
+
queue = args[2]
|
22
|
+
self.create(:payload_object => object, :priority => priority.to_i, :run_at => run_at, :queue => queue)
|
23
|
+
end
|
24
|
+
|
25
|
+
def reserve(worker, max_run_time = Worker.max_run_time)
|
26
|
+
# We get up to 5 jobs from the db. In case we cannot get exclusive access to a job we try the next.
|
27
|
+
# this leads to a more even distribution of jobs across the worker processes
|
28
|
+
find_available(worker.name, 5, max_run_time).detect do |job|
|
29
|
+
job.lock_exclusively!(max_run_time, worker.name)
|
30
|
+
end
|
31
|
+
end
|
32
|
+
|
33
|
+
# Hook method that is called before a new worker is forked
|
34
|
+
def before_fork
|
35
|
+
end
|
36
|
+
|
37
|
+
# Hook method that is called after a new worker is forked
|
38
|
+
def after_fork
|
39
|
+
end
|
40
|
+
|
41
|
+
def work_off(num = 100)
|
42
|
+
warn "[DEPRECATION] `Delayed::Job.work_off` is deprecated. Use `Delayed::Worker.new.work_off instead."
|
43
|
+
Delayed::Worker.new.work_off(num)
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
|
48
|
+
|
49
|
+
def failed?
|
50
|
+
failed_at
|
51
|
+
end
|
52
|
+
alias_method :failed, :failed?
|
53
|
+
|
54
|
+
def name
|
55
|
+
@name ||= begin
|
56
|
+
payload = payload_object
|
57
|
+
if payload.respond_to?(:display_name)
|
58
|
+
payload.display_name
|
59
|
+
else
|
60
|
+
payload.class.name
|
61
|
+
end
|
62
|
+
end
|
63
|
+
end
|
64
|
+
|
65
|
+
def payload_object=(object)
|
66
|
+
self['handler'] = object.to_yaml
|
67
|
+
end
|
68
|
+
|
69
|
+
def payload_object
|
70
|
+
@payload_object ||= deserialize(self['handler'])
|
71
|
+
end
|
72
|
+
|
73
|
+
# Moved into its own method so that new_relic can trace it.
|
74
|
+
def invoke_job
|
75
|
+
payload_object.perform
|
76
|
+
end
|
77
|
+
|
78
|
+
# Unlock this job (note: not saved to DB)
|
79
|
+
def unlock
|
80
|
+
self.locked_at = nil
|
81
|
+
self.locked_by = nil
|
82
|
+
end
|
83
|
+
|
84
|
+
def reschedule_at
|
85
|
+
payload_object.respond_to?(:reschedule_at) ?
|
86
|
+
payload_object.reschedule_at(self.class.db_time_now, attempts) :
|
87
|
+
self.class.db_time_now + (attempts ** 4) + 5
|
88
|
+
end
|
89
|
+
|
90
|
+
def max_attempts
|
91
|
+
payload_object.max_attempts if payload_object.respond_to?(:max_attempts)
|
92
|
+
end
|
93
|
+
|
94
|
+
private
|
95
|
+
|
96
|
+
def deserialize(source)
|
97
|
+
handler = YAML.load(source) rescue nil
|
98
|
+
|
99
|
+
unless handler.respond_to?(:perform)
|
100
|
+
if handler.nil? && source =~ ParseObjectFromYaml
|
101
|
+
handler_class = $1
|
102
|
+
end
|
103
|
+
attempt_to_load(handler_class || handler.class)
|
104
|
+
handler = YAML.load(source)
|
105
|
+
end
|
106
|
+
|
107
|
+
return handler if handler.respond_to?(:perform)
|
108
|
+
|
109
|
+
raise DeserializationError,
|
110
|
+
'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
|
111
|
+
rescue TypeError, LoadError, NameError => e
|
112
|
+
raise DeserializationError,
|
113
|
+
"Job failed to load: #{e.message}. Try to manually require the required file."
|
114
|
+
end
|
115
|
+
|
116
|
+
# Constantize the object so that ActiveSupport can attempt
|
117
|
+
# its auto loading magic. Will raise LoadError if not successful.
|
118
|
+
def attempt_to_load(klass)
|
119
|
+
klass.constantize
|
120
|
+
end
|
121
|
+
|
122
|
+
protected
|
123
|
+
|
124
|
+
def set_default_run_at
|
125
|
+
self.run_at ||= self.class.db_time_now
|
126
|
+
end
|
127
|
+
|
128
|
+
end
|
129
|
+
end
|
130
|
+
end
|
@@ -0,0 +1,125 @@
|
|
1
|
+
require 'dm-core'
|
2
|
+
require 'dm-observer'
|
3
|
+
require 'dm-aggregates'
|
4
|
+
|
5
|
+
module DataMapper
|
6
|
+
module Resource
|
7
|
+
module ClassMethods
|
8
|
+
def load_for_delayed_job(id)
|
9
|
+
find!(id)
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
module InstanceMethods
|
14
|
+
def dump_for_delayed_job
|
15
|
+
"#{self.class};#{id}"
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
module Delayed
|
22
|
+
module Backend
|
23
|
+
module DataMapper
|
24
|
+
class Job
|
25
|
+
include ::DataMapper::Resource
|
26
|
+
include Delayed::Backend::Base
|
27
|
+
|
28
|
+
storage_names[:default] = 'delayed_jobs'
|
29
|
+
|
30
|
+
property :id, Serial
|
31
|
+
property :priority, Integer, :default => 0, :index => :run_at_priority
|
32
|
+
property :attempts, Integer, :default => 0
|
33
|
+
property :handler, Text, :lazy => false
|
34
|
+
property :run_at, Time, :index => :run_at_priority
|
35
|
+
property :locked_at, Time, :index => true
|
36
|
+
property :locked_by, String
|
37
|
+
property :failed_at, Time
|
38
|
+
property :last_error, Text
|
39
|
+
|
40
|
+
def self.db_time_now
|
41
|
+
Time.now
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
|
45
|
+
|
46
|
+
simple_conditions = { :run_at.lte => db_time_now, :limit => limit, :failed_at => nil, :order => [:priority.asc, :run_at.asc] }
|
47
|
+
|
48
|
+
# respect priorities
|
49
|
+
simple_conditions[:priority.gte] = Worker.min_priority if Worker.min_priority
|
50
|
+
simple_conditions[:priority.lte] = Worker.max_priority if Worker.max_priority
|
51
|
+
|
52
|
+
# lockable
|
53
|
+
lockable = (
|
54
|
+
# not locked or past the max time
|
55
|
+
( all(:locked_at => nil ) | all(:locked_at.lt => db_time_now - max_run_time)) |
|
56
|
+
|
57
|
+
# OR locked by our worker
|
58
|
+
all(:locked_by => worker_name))
|
59
|
+
|
60
|
+
# plus some other boring junk
|
61
|
+
(lockable).all( simple_conditions )
|
62
|
+
end
|
63
|
+
|
64
|
+
# When a worker is exiting, make sure we don't have any locked jobs.
|
65
|
+
def self.clear_locks!(worker_name)
|
66
|
+
all(:locked_by => worker_name).update(:locked_at => nil, :locked_by => nil)
|
67
|
+
end
|
68
|
+
|
69
|
+
# Lock this job for this worker.
|
70
|
+
# Returns true if we have the lock, false otherwise.
|
71
|
+
def lock_exclusively!(max_run_time, worker = worker_name)
|
72
|
+
|
73
|
+
now = self.class.db_time_now
|
74
|
+
overtime = now - max_run_time
|
75
|
+
|
76
|
+
# FIXME - this is a bit gross
|
77
|
+
# DM doesn't give us the number of rows affected by a collection update
|
78
|
+
# so we have to circumvent some niceness in DM::Collection here
|
79
|
+
collection = locked_by != worker ?
|
80
|
+
(self.class.all(:id => id, :run_at.lte => now) & ( self.class.all(:locked_at => nil) | self.class.all(:locked_at.lt => overtime) ) ) :
|
81
|
+
self.class.all(:id => id, :locked_by => worker)
|
82
|
+
|
83
|
+
attributes = collection.model.new(:locked_at => now, :locked_by => worker).dirty_attributes
|
84
|
+
affected_rows = self.repository.update(attributes, collection)
|
85
|
+
|
86
|
+
if affected_rows == 1
|
87
|
+
self.locked_at = now
|
88
|
+
self.locked_by = worker
|
89
|
+
return true
|
90
|
+
else
|
91
|
+
return false
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
# these are common to the other backends, so we provide an implementation
|
96
|
+
def self.delete_all
|
97
|
+
Delayed::Job.auto_migrate!
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.find id
|
101
|
+
get id
|
102
|
+
end
|
103
|
+
|
104
|
+
def update_attributes(attributes)
|
105
|
+
attributes.each do |k,v|
|
106
|
+
self[k] = v
|
107
|
+
end
|
108
|
+
self.save
|
109
|
+
end
|
110
|
+
|
111
|
+
|
112
|
+
end
|
113
|
+
|
114
|
+
class JobObserver
|
115
|
+
include ::DataMapper::Observer
|
116
|
+
|
117
|
+
observe Job
|
118
|
+
|
119
|
+
before :save do
|
120
|
+
self.run_at ||= self.class.db_time_now
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
@@ -0,0 +1,110 @@
|
|
1
|
+
require 'mongo_mapper'
|
2
|
+
|
3
|
+
module ::MongoMapper
|
4
|
+
module Document
|
5
|
+
module ClassMethods
|
6
|
+
def load_for_delayed_job(id)
|
7
|
+
find!(id)
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
module InstanceMethods
|
12
|
+
def dump_for_delayed_job
|
13
|
+
"#{self.class};#{id}"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
module Delayed
|
20
|
+
module Backend
|
21
|
+
module MongoMapper
|
22
|
+
class Job
|
23
|
+
include ::MongoMapper::Document
|
24
|
+
include Delayed::Backend::Base
|
25
|
+
set_collection_name 'delayed_jobs'
|
26
|
+
|
27
|
+
key :priority, Integer, :default => 0
|
28
|
+
key :attempts, Integer, :default => 0
|
29
|
+
key :handler, String
|
30
|
+
key :run_at, Time
|
31
|
+
key :locked_at, Time
|
32
|
+
key :locked_by, String, :index => true
|
33
|
+
key :failed_at, Time
|
34
|
+
key :last_error, String
|
35
|
+
timestamps!
|
36
|
+
|
37
|
+
before_save :set_default_run_at
|
38
|
+
|
39
|
+
ensure_index [[:priority, 1], [:run_at, 1]]
|
40
|
+
|
41
|
+
def self.before_fork
|
42
|
+
::MongoMapper.connection.close
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.after_fork
|
46
|
+
::MongoMapper.connect(RAILS_ENV)
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.db_time_now
|
50
|
+
Time.now.utc
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
|
54
|
+
right_now = db_time_now
|
55
|
+
|
56
|
+
conditions = {
|
57
|
+
:run_at => {"$lte" => right_now},
|
58
|
+
:limit => -limit, # In mongo, positive limits are 'soft' and negative are 'hard'
|
59
|
+
:failed_at => nil,
|
60
|
+
:sort => [['priority', 1], ['run_at', 1]]
|
61
|
+
}
|
62
|
+
|
63
|
+
where = "this.locked_at == null || this.locked_at < #{make_date(right_now - max_run_time)}"
|
64
|
+
|
65
|
+
(conditions[:priority] ||= {})['$gte'] = Worker.min_priority.to_i if Worker.min_priority
|
66
|
+
(conditions[:priority] ||= {})['$lte'] = Worker.max_priority.to_i if Worker.max_priority
|
67
|
+
|
68
|
+
results = all(conditions.merge(:locked_by => worker_name))
|
69
|
+
results += all(conditions.merge('$where' => where)) if results.size < limit
|
70
|
+
results
|
71
|
+
end
|
72
|
+
|
73
|
+
# When a worker is exiting, make sure we don't have any locked jobs.
|
74
|
+
def self.clear_locks!(worker_name)
|
75
|
+
collection.update({:locked_by => worker_name}, {"$set" => {:locked_at => nil, :locked_by => nil}}, :multi => true)
|
76
|
+
end
|
77
|
+
|
78
|
+
# Lock this job for this worker.
|
79
|
+
# Returns true if we have the lock, false otherwise.
|
80
|
+
def lock_exclusively!(max_run_time, worker = worker_name)
|
81
|
+
right_now = self.class.db_time_now
|
82
|
+
overtime = right_now - max_run_time.to_i
|
83
|
+
|
84
|
+
query = "this.locked_at == null || this.locked_at < #{make_date(overtime)} || this.locked_by == #{worker.to_json}"
|
85
|
+
conditions = {:_id => id, :run_at => {"$lte" => right_now}, "$where" => query}
|
86
|
+
|
87
|
+
collection.update(conditions, {"$set" => {:locked_at => right_now, :locked_by => worker}})
|
88
|
+
affected_rows = collection.find({:_id => id, :locked_by => worker}).count
|
89
|
+
if affected_rows == 1
|
90
|
+
self.locked_at = right_now
|
91
|
+
self.locked_by = worker
|
92
|
+
return true
|
93
|
+
else
|
94
|
+
return false
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
private
|
99
|
+
|
100
|
+
def self.make_date(date_or_seconds)
|
101
|
+
"new Date(#{date_or_seconds.to_f * 1000})"
|
102
|
+
end
|
103
|
+
|
104
|
+
def make_date(date)
|
105
|
+
self.class.make_date(date)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|
@@ -0,0 +1,114 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'daemons'
|
3
|
+
require 'optparse'
|
4
|
+
|
5
|
+
module Delayed
|
6
|
+
class Command
|
7
|
+
attr_accessor :worker_count
|
8
|
+
|
9
|
+
def initialize(args)
|
10
|
+
@files_to_reopen = []
|
11
|
+
@options = {
|
12
|
+
:quiet => true,
|
13
|
+
:pid_dir => "#{RAILS_ROOT}/tmp/pids"
|
14
|
+
}
|
15
|
+
|
16
|
+
@worker_count = 1
|
17
|
+
@monitor = false
|
18
|
+
|
19
|
+
opts = OptionParser.new do |opts|
|
20
|
+
opts.banner = "Usage: #{File.basename($0)} [options] start|stop|restart|run"
|
21
|
+
|
22
|
+
opts.on('-h', '--help', 'Show this message') do
|
23
|
+
puts opts
|
24
|
+
exit 1
|
25
|
+
end
|
26
|
+
opts.on('-e', '--environment=NAME', 'Specifies the environment to run this delayed jobs under (test/development/production).') do |e|
|
27
|
+
STDERR.puts "The -e/--environment option has been deprecated and has no effect. Use RAILS_ENV and see http://github.com/collectiveidea/delayed_job/issues/#issue/7"
|
28
|
+
end
|
29
|
+
opts.on('--min-priority N', 'Minimum priority of jobs to run.') do |n|
|
30
|
+
@options[:min_priority] = n
|
31
|
+
end
|
32
|
+
opts.on('--max-priority N', 'Maximum priority of jobs to run.') do |n|
|
33
|
+
@options[:max_priority] = n
|
34
|
+
end
|
35
|
+
opts.on('-n', '--number_of_workers=workers', "Number of unique workers to spawn") do |worker_count|
|
36
|
+
@worker_count = worker_count.to_i rescue 1
|
37
|
+
end
|
38
|
+
opts.on('--pid-dir=DIR', 'Specifies an alternate directory in which to store the process ids.') do |dir|
|
39
|
+
@options[:pid_dir] = dir
|
40
|
+
end
|
41
|
+
opts.on('-i', '--identifier=n', 'A numeric identifier for the worker.') do |n|
|
42
|
+
@options[:identifier] = n
|
43
|
+
end
|
44
|
+
opts.on('-m', '--monitor', 'Start monitor process.') do
|
45
|
+
@monitor = true
|
46
|
+
end
|
47
|
+
opts.on('--sleep-delay N', "Amount of time to sleep when no jobs are found") do |n|
|
48
|
+
@options[:sleep_delay] = n
|
49
|
+
end
|
50
|
+
opts.on('--queues=queues', "Specify which queue DJ must look up for jobs") do |queues|
|
51
|
+
@options[:queues] = queues.split(',')
|
52
|
+
end
|
53
|
+
opts.on('--queue=queue', "Specify which queue DJ must look up for jobs") do |queue|
|
54
|
+
@options[:queues] = queues.split(',')
|
55
|
+
end
|
56
|
+
end
|
57
|
+
@args = opts.parse!(args)
|
58
|
+
end
|
59
|
+
|
60
|
+
def daemonize
|
61
|
+
Delayed::Worker.backend.before_fork
|
62
|
+
|
63
|
+
ObjectSpace.each_object(File) do |file|
|
64
|
+
@files_to_reopen << file unless file.closed?
|
65
|
+
end
|
66
|
+
|
67
|
+
dir = @options[:pid_dir]
|
68
|
+
Dir.mkdir(dir) unless File.exists?(dir)
|
69
|
+
|
70
|
+
if @worker_count > 1 && @options[:identifier]
|
71
|
+
raise ArgumentError, 'Cannot specify both --number-of-workers and --identifier'
|
72
|
+
elsif @worker_count == 1 && @options[:identifier]
|
73
|
+
process_name = "delayed_job.#{@options[:identifier]}"
|
74
|
+
run_process(process_name, dir)
|
75
|
+
else
|
76
|
+
worker_count.times do |worker_index|
|
77
|
+
process_name = worker_count == 1 ? "delayed_job" : "delayed_job.#{worker_index}"
|
78
|
+
run_process(process_name, dir)
|
79
|
+
end
|
80
|
+
end
|
81
|
+
end
|
82
|
+
|
83
|
+
def run_process(process_name, dir)
|
84
|
+
Daemons.run_proc(process_name, :dir => dir, :dir_mode => :normal, :monitor => @monitor, :ARGV => @args) do |*args|
|
85
|
+
run process_name
|
86
|
+
end
|
87
|
+
end
|
88
|
+
|
89
|
+
def run(worker_name = nil)
|
90
|
+
Dir.chdir(RAILS_ROOT)
|
91
|
+
|
92
|
+
# Re-open file handles
|
93
|
+
@files_to_reopen.each do |file|
|
94
|
+
begin
|
95
|
+
file.reopen file.path, "a+"
|
96
|
+
file.sync = true
|
97
|
+
rescue ::Exception
|
98
|
+
end
|
99
|
+
end
|
100
|
+
|
101
|
+
Delayed::Worker.logger = Logger.new(File.join(RAILS_ROOT, 'log', 'delayed_job.log'))
|
102
|
+
Delayed::Worker.backend.after_fork
|
103
|
+
|
104
|
+
worker = Delayed::Worker.new(@options)
|
105
|
+
worker.name_prefix = "#{worker_name} "
|
106
|
+
worker.start
|
107
|
+
rescue => e
|
108
|
+
Rails.logger.fatal e
|
109
|
+
STDERR.puts e.message
|
110
|
+
exit 1
|
111
|
+
end
|
112
|
+
|
113
|
+
end
|
114
|
+
end
|