opsb-delayed_job 2.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +2 -0
- data/MIT-LICENSE +20 -0
- data/README.textile +213 -0
- data/Rakefile +46 -0
- data/VERSION +1 -0
- data/benchmarks.rb +33 -0
- data/contrib/delayed_job.monitrc +14 -0
- data/contrib/delayed_job_multiple.monitrc +23 -0
- data/delayed_job.gemspec +115 -0
- data/generators/delayed_job/delayed_job_generator.rb +22 -0
- data/generators/delayed_job/templates/migration.rb +21 -0
- data/generators/delayed_job/templates/script +5 -0
- data/init.rb +1 -0
- data/lib/delayed/backend/active_record.rb +90 -0
- data/lib/delayed/backend/base.rb +111 -0
- data/lib/delayed/backend/data_mapper.rb +125 -0
- data/lib/delayed/backend/mongo_mapper.rb +110 -0
- data/lib/delayed/command.rb +101 -0
- data/lib/delayed/message_sending.rb +22 -0
- data/lib/delayed/performable_method.rb +62 -0
- data/lib/delayed/railtie.rb +10 -0
- data/lib/delayed/recipes.rb +31 -0
- data/lib/delayed/tasks.rb +15 -0
- data/lib/delayed/worker.rb +183 -0
- data/lib/delayed_job.rb +14 -0
- data/rails/init.rb +5 -0
- data/recipes/delayed_job.rb +1 -0
- data/spec/backend/active_record_job_spec.rb +46 -0
- data/spec/backend/data_mapper_job_spec.rb +16 -0
- data/spec/backend/mongo_mapper_job_spec.rb +94 -0
- data/spec/backend/shared_backend_spec.rb +265 -0
- data/spec/delayed_method_spec.rb +59 -0
- data/spec/performable_method_spec.rb +42 -0
- data/spec/sample_jobs.rb +25 -0
- data/spec/setup/active_record.rb +33 -0
- data/spec/setup/data_mapper.rb +8 -0
- data/spec/setup/mongo_mapper.rb +17 -0
- data/spec/spec_helper.rb +26 -0
- data/spec/story_spec.rb +17 -0
- data/spec/worker_spec.rb +216 -0
- data/tasks/jobs.rake +1 -0
- metadata +256 -0
@@ -0,0 +1,101 @@
|
|
1
|
+
require 'rubygems'
|
2
|
+
require 'daemons'
|
3
|
+
require 'optparse'
|
4
|
+
|
5
|
+
module Delayed
|
6
|
+
class Command
|
7
|
+
attr_accessor :worker_count
|
8
|
+
|
9
|
+
def initialize(args)
|
10
|
+
@files_to_reopen = []
|
11
|
+
@options = {
|
12
|
+
:quiet => true,
|
13
|
+
:pid_dir => "#{RAILS_ROOT}/tmp/pids"
|
14
|
+
}
|
15
|
+
|
16
|
+
@worker_count = 1
|
17
|
+
|
18
|
+
opts = OptionParser.new do |opts|
|
19
|
+
opts.banner = "Usage: #{File.basename($0)} [options] start|stop|restart|run"
|
20
|
+
|
21
|
+
opts.on('-h', '--help', 'Show this message') do
|
22
|
+
puts opts
|
23
|
+
exit 1
|
24
|
+
end
|
25
|
+
opts.on('-e', '--environment=NAME', 'Specifies the environment to run this delayed jobs under (test/development/production).') do |e|
|
26
|
+
STDERR.puts "The -e/--environment option has been deprecated and has no effect. Use RAILS_ENV and see http://github.com/collectiveidea/delayed_job/issues/#issue/7"
|
27
|
+
end
|
28
|
+
opts.on('--min-priority N', 'Minimum priority of jobs to run.') do |n|
|
29
|
+
@options[:min_priority] = n
|
30
|
+
end
|
31
|
+
opts.on('--max-priority N', 'Maximum priority of jobs to run.') do |n|
|
32
|
+
@options[:max_priority] = n
|
33
|
+
end
|
34
|
+
opts.on('-n', '--number_of_workers=workers', "Number of unique workers to spawn") do |worker_count|
|
35
|
+
@worker_count = worker_count.to_i rescue 1
|
36
|
+
end
|
37
|
+
opts.on('--pid-dir=DIR', 'Specifies an alternate directory in which to store the process ids.') do |dir|
|
38
|
+
@options[:pid_dir] = dir
|
39
|
+
end
|
40
|
+
opts.on('-i', '--identifier=n', 'A numeric identifier for the worker.') do |n|
|
41
|
+
@options[:identifier] = n
|
42
|
+
end
|
43
|
+
end
|
44
|
+
@args = opts.parse!(args)
|
45
|
+
end
|
46
|
+
|
47
|
+
def daemonize
|
48
|
+
Delayed::Worker.backend.before_fork
|
49
|
+
|
50
|
+
ObjectSpace.each_object(File) do |file|
|
51
|
+
@files_to_reopen << file unless file.closed?
|
52
|
+
end
|
53
|
+
|
54
|
+
dir = @options[:pid_dir]
|
55
|
+
Dir.mkdir(dir) unless File.exists?(dir)
|
56
|
+
|
57
|
+
if @worker_count > 1 && @options[:identifier]
|
58
|
+
raise ArgumentError, 'Cannot specify both --number-of-workers and --identifier'
|
59
|
+
elsif @worker_count == 1 && @options[:identifier]
|
60
|
+
process_name = "delayed_job.#{@options[:identifier]}"
|
61
|
+
run_process(process_name, dir)
|
62
|
+
else
|
63
|
+
worker_count.times do |worker_index|
|
64
|
+
process_name = worker_count == 1 ? "delayed_job" : "delayed_job.#{worker_index}"
|
65
|
+
run_process(process_name, dir)
|
66
|
+
end
|
67
|
+
end
|
68
|
+
end
|
69
|
+
|
70
|
+
def run_process(process_name, dir)
|
71
|
+
Daemons.run_proc(process_name, :dir => dir, :dir_mode => :normal, :ARGV => @args) do |*args|
|
72
|
+
run process_name
|
73
|
+
end
|
74
|
+
end
|
75
|
+
|
76
|
+
def run(worker_name = nil)
|
77
|
+
Dir.chdir(RAILS_ROOT)
|
78
|
+
|
79
|
+
# Re-open file handles
|
80
|
+
@files_to_reopen.each do |file|
|
81
|
+
begin
|
82
|
+
file.reopen file.path
|
83
|
+
file.sync = true
|
84
|
+
rescue ::Exception
|
85
|
+
end
|
86
|
+
end
|
87
|
+
|
88
|
+
Delayed::Worker.logger = Logger.new(File.join(RAILS_ROOT, 'log', 'delayed_job.log'))
|
89
|
+
Delayed::Worker.backend.after_fork
|
90
|
+
|
91
|
+
worker = Delayed::Worker.new(@options)
|
92
|
+
worker.name_prefix = "#{worker_name} "
|
93
|
+
worker.start
|
94
|
+
rescue => e
|
95
|
+
Rails.logger.fatal e
|
96
|
+
STDERR.puts e.message
|
97
|
+
exit 1
|
98
|
+
end
|
99
|
+
|
100
|
+
end
|
101
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
module Delayed
|
2
|
+
module MessageSending
|
3
|
+
def send_later(method, *args)
|
4
|
+
Delayed::Job.enqueue Delayed::PerformableMethod.new(self, method.to_sym, args)
|
5
|
+
end
|
6
|
+
|
7
|
+
def send_at(time, method, *args)
|
8
|
+
Delayed::Job.enqueue(Delayed::PerformableMethod.new(self, method.to_sym, args), 0, time)
|
9
|
+
end
|
10
|
+
|
11
|
+
module ClassMethods
|
12
|
+
def handle_asynchronously(method)
|
13
|
+
aliased_method, punctuation = method.to_s.sub(/([?!=])$/, ''), $1
|
14
|
+
with_method, without_method = "#{aliased_method}_with_send_later#{punctuation}", "#{aliased_method}_without_send_later#{punctuation}"
|
15
|
+
define_method(with_method) do |*args|
|
16
|
+
send_later(without_method, *args)
|
17
|
+
end
|
18
|
+
alias_method_chain method, :send_later
|
19
|
+
end
|
20
|
+
end
|
21
|
+
end
|
22
|
+
end
|
@@ -0,0 +1,62 @@
|
|
1
|
+
class Class
|
2
|
+
def load_for_delayed_job(arg)
|
3
|
+
self
|
4
|
+
end
|
5
|
+
|
6
|
+
def dump_for_delayed_job
|
7
|
+
name
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
module Delayed
|
12
|
+
class PerformableMethod < Struct.new(:object, :method, :args)
|
13
|
+
STRING_FORMAT = /^LOAD\;([A-Z][\w\:]+)(?:\;(\w+))?$/
|
14
|
+
|
15
|
+
class LoadError < StandardError
|
16
|
+
end
|
17
|
+
|
18
|
+
def initialize(object, method, args)
|
19
|
+
raise NoMethodError, "undefined method `#{method}' for #{object.inspect}" unless object.respond_to?(method)
|
20
|
+
|
21
|
+
self.object = dump(object)
|
22
|
+
self.args = args.map { |a| dump(a) }
|
23
|
+
self.method = method.to_sym
|
24
|
+
end
|
25
|
+
|
26
|
+
def display_name
|
27
|
+
if STRING_FORMAT === object
|
28
|
+
"#{$1}#{$2 ? '#' : '.'}#{method}"
|
29
|
+
else
|
30
|
+
"#{object.class}##{method}"
|
31
|
+
end
|
32
|
+
end
|
33
|
+
|
34
|
+
def perform
|
35
|
+
load(object).send(method, *args.map{|a| load(a)})
|
36
|
+
rescue PerformableMethod::LoadError
|
37
|
+
# We cannot do anything about objects that can't be loaded
|
38
|
+
true
|
39
|
+
end
|
40
|
+
|
41
|
+
private
|
42
|
+
|
43
|
+
def load(obj)
|
44
|
+
if STRING_FORMAT === obj
|
45
|
+
$1.constantize.load_for_delayed_job($2)
|
46
|
+
else
|
47
|
+
obj
|
48
|
+
end
|
49
|
+
rescue => e
|
50
|
+
Delayed::Worker.logger.warn "Could not load object for job: #{e.message}"
|
51
|
+
raise PerformableMethod::LoadError
|
52
|
+
end
|
53
|
+
|
54
|
+
def dump(obj)
|
55
|
+
if obj.respond_to?(:dump_for_delayed_job)
|
56
|
+
"LOAD;#{obj.dump_for_delayed_job}"
|
57
|
+
else
|
58
|
+
obj
|
59
|
+
end
|
60
|
+
end
|
61
|
+
end
|
62
|
+
end
|
@@ -0,0 +1,31 @@
|
|
1
|
+
# Capistrano Recipes for managing delayed_job
|
2
|
+
#
|
3
|
+
# Add these callbacks to have the delayed_job process restart when the server
|
4
|
+
# is restarted:
|
5
|
+
#
|
6
|
+
# after "deploy:stop", "delayed_job:stop"
|
7
|
+
# after "deploy:start", "delayed_job:start"
|
8
|
+
# after "deploy:restart", "delayed_job:restart"
|
9
|
+
|
10
|
+
Capistrano::Configuration.instance.load do
|
11
|
+
namespace :delayed_job do
|
12
|
+
def rails_env
|
13
|
+
fetch(:rails_env, false) ? "RAILS_ENV=#{fetch(:rails_env)}" : ''
|
14
|
+
end
|
15
|
+
|
16
|
+
desc "Stop the delayed_job process"
|
17
|
+
task :stop, :roles => :app do
|
18
|
+
run "cd #{current_path};#{rails_env} script/delayed_job stop"
|
19
|
+
end
|
20
|
+
|
21
|
+
desc "Start the delayed_job process"
|
22
|
+
task :start, :roles => :app do
|
23
|
+
run "cd #{current_path};#{rails_env} script/delayed_job start"
|
24
|
+
end
|
25
|
+
|
26
|
+
desc "Restart the delayed_job process"
|
27
|
+
task :restart, :roles => :app do
|
28
|
+
run "cd #{current_path};#{rails_env} script/delayed_job restart"
|
29
|
+
end
|
30
|
+
end
|
31
|
+
end
|
@@ -0,0 +1,15 @@
|
|
1
|
+
# Re-definitions are appended to existing tasks
|
2
|
+
task :environment
|
3
|
+
task :merb_env
|
4
|
+
|
5
|
+
namespace :jobs do
|
6
|
+
desc "Clear the delayed_job queue."
|
7
|
+
task :clear => [:merb_env, :environment] do
|
8
|
+
Delayed::Job.delete_all
|
9
|
+
end
|
10
|
+
|
11
|
+
desc "Start a delayed_job worker."
|
12
|
+
task :work => [:merb_env, :environment] do
|
13
|
+
Delayed::Worker.new(:min_priority => ENV['MIN_PRIORITY'], :max_priority => ENV['MAX_PRIORITY']).start
|
14
|
+
end
|
15
|
+
end
|
@@ -0,0 +1,183 @@
|
|
1
|
+
require 'timeout'
|
2
|
+
require 'active_support/core_ext/numeric/time'
|
3
|
+
|
4
|
+
module Delayed
|
5
|
+
class Worker
|
6
|
+
cattr_accessor :min_priority, :max_priority, :max_attempts, :max_run_time, :sleep_delay, :logger
|
7
|
+
self.sleep_delay = 5
|
8
|
+
self.max_attempts = 25
|
9
|
+
self.max_run_time = 4.hours
|
10
|
+
|
11
|
+
# By default failed jobs are destroyed after too many attempts. If you want to keep them around
|
12
|
+
# (perhaps to inspect the reason for the failure), set this to false.
|
13
|
+
cattr_accessor :destroy_failed_jobs
|
14
|
+
self.destroy_failed_jobs = true
|
15
|
+
|
16
|
+
self.logger = if defined?(Merb::Logger)
|
17
|
+
Merb.logger
|
18
|
+
elsif defined?(RAILS_DEFAULT_LOGGER)
|
19
|
+
RAILS_DEFAULT_LOGGER
|
20
|
+
end
|
21
|
+
|
22
|
+
# name_prefix is ignored if name is set directly
|
23
|
+
attr_accessor :name_prefix
|
24
|
+
|
25
|
+
cattr_reader :backend
|
26
|
+
|
27
|
+
def self.backend=(backend)
|
28
|
+
if backend.is_a? Symbol
|
29
|
+
require "delayed/backend/#{backend}"
|
30
|
+
backend = "Delayed::Backend::#{backend.to_s.classify}::Job".constantize
|
31
|
+
end
|
32
|
+
@@backend = backend
|
33
|
+
silence_warnings { ::Delayed.const_set(:Job, backend) }
|
34
|
+
end
|
35
|
+
|
36
|
+
def self.guess_backend
|
37
|
+
self.backend ||= if defined?(ActiveRecord)
|
38
|
+
:active_record
|
39
|
+
elsif defined?(MongoMapper)
|
40
|
+
:mongo_mapper
|
41
|
+
else
|
42
|
+
logger.warn "Could not decide on a backend, defaulting to active_record"
|
43
|
+
:active_record
|
44
|
+
end
|
45
|
+
end
|
46
|
+
|
47
|
+
def initialize(options={})
|
48
|
+
@quiet = options[:quiet]
|
49
|
+
self.class.min_priority = options[:min_priority] if options.has_key?(:min_priority)
|
50
|
+
self.class.max_priority = options[:max_priority] if options.has_key?(:max_priority)
|
51
|
+
end
|
52
|
+
|
53
|
+
# Every worker has a unique name which by default is the pid of the process. There are some
|
54
|
+
# advantages to overriding this with something which survives worker retarts: Workers can#
|
55
|
+
# safely resume working on tasks which are locked by themselves. The worker will assume that
|
56
|
+
# it crashed before.
|
57
|
+
def name
|
58
|
+
return @name unless @name.nil?
|
59
|
+
"#{@name_prefix}host:#{Socket.gethostname} pid:#{Process.pid}" rescue "#{@name_prefix}pid:#{Process.pid}"
|
60
|
+
end
|
61
|
+
|
62
|
+
# Sets the name of the worker.
|
63
|
+
# Setting the name to nil will reset the default worker name
|
64
|
+
def name=(val)
|
65
|
+
@name = val
|
66
|
+
end
|
67
|
+
|
68
|
+
def start
|
69
|
+
say "*** Starting job worker #{name}"
|
70
|
+
|
71
|
+
trap('TERM') { say 'Exiting...'; $exit = true }
|
72
|
+
trap('INT') { say 'Exiting...'; $exit = true }
|
73
|
+
|
74
|
+
loop do
|
75
|
+
result = nil
|
76
|
+
|
77
|
+
realtime = Benchmark.realtime do
|
78
|
+
result = work_off
|
79
|
+
end
|
80
|
+
|
81
|
+
count = result.sum
|
82
|
+
|
83
|
+
break if $exit
|
84
|
+
|
85
|
+
if count.zero?
|
86
|
+
sleep(@@sleep_delay)
|
87
|
+
else
|
88
|
+
say "#{count} jobs processed at %.4f j/s, %d failed ..." % [count / realtime, result.last]
|
89
|
+
end
|
90
|
+
|
91
|
+
break if $exit
|
92
|
+
end
|
93
|
+
|
94
|
+
ensure
|
95
|
+
Delayed::Job.clear_locks!(name)
|
96
|
+
end
|
97
|
+
|
98
|
+
# Do num jobs and return stats on success/failure.
|
99
|
+
# Exit early if interrupted.
|
100
|
+
def work_off(num = 100)
|
101
|
+
success, failure = 0, 0
|
102
|
+
|
103
|
+
num.times do
|
104
|
+
case reserve_and_run_one_job
|
105
|
+
when true
|
106
|
+
success += 1
|
107
|
+
when false
|
108
|
+
failure += 1
|
109
|
+
else
|
110
|
+
break # leave if no work could be done
|
111
|
+
end
|
112
|
+
break if $exit # leave if we're exiting
|
113
|
+
end
|
114
|
+
|
115
|
+
return [success, failure]
|
116
|
+
end
|
117
|
+
|
118
|
+
def run(job)
|
119
|
+
runtime = Benchmark.realtime do
|
120
|
+
Timeout.timeout(self.class.max_run_time.to_i) { job.invoke_job }
|
121
|
+
job.destroy
|
122
|
+
end
|
123
|
+
# TODO: warn if runtime > max_run_time ?
|
124
|
+
say "* [JOB] #{name} completed after %.4f" % runtime
|
125
|
+
return true # did work
|
126
|
+
rescue Exception => e
|
127
|
+
handle_failed_job(job, e)
|
128
|
+
return false # work failed
|
129
|
+
end
|
130
|
+
|
131
|
+
# Reschedule the job in the future (when a job fails).
|
132
|
+
# Uses an exponential scale depending on the number of failed attempts.
|
133
|
+
def reschedule(job, time = nil)
|
134
|
+
if (job.attempts += 1) < self.class.max_attempts
|
135
|
+
time ||= Job.db_time_now + (job.attempts ** 4) + 5
|
136
|
+
job.run_at = time
|
137
|
+
job.unlock
|
138
|
+
job.save!
|
139
|
+
else
|
140
|
+
say "* [JOB] PERMANENTLY removing #{job.name} because of #{job.attempts} consecutive failures.", Logger::INFO
|
141
|
+
|
142
|
+
if job.payload_object.respond_to? :on_permanent_failure
|
143
|
+
say "* [JOB] Running on_permanent_failure hook"
|
144
|
+
job.payload_object.on_permanent_failure
|
145
|
+
end
|
146
|
+
|
147
|
+
self.class.destroy_failed_jobs ? job.destroy : job.update_attributes(:failed_at => Delayed::Job.db_time_now)
|
148
|
+
end
|
149
|
+
end
|
150
|
+
|
151
|
+
def say(text, level = Logger::INFO)
|
152
|
+
puts text unless @quiet
|
153
|
+
logger.add level, "#{Time.now.strftime('%FT%T%z')}: #{text}" if logger
|
154
|
+
end
|
155
|
+
|
156
|
+
protected
|
157
|
+
|
158
|
+
def handle_failed_job(job, error)
|
159
|
+
job.last_error = error.message + "\n" + error.backtrace.join("\n")
|
160
|
+
say "* [JOB] #{name} failed with #{error.class.name}: #{error.message} - #{job.attempts} failed attempts", Logger::ERROR
|
161
|
+
reschedule(job)
|
162
|
+
end
|
163
|
+
|
164
|
+
# Run the next job we can get an exclusive lock on.
|
165
|
+
# If no jobs are left we return nil
|
166
|
+
def reserve_and_run_one_job
|
167
|
+
|
168
|
+
# We get up to 5 jobs from the db. In case we cannot get exclusive access to a job we try the next.
|
169
|
+
# this leads to a more even distribution of jobs across the worker processes
|
170
|
+
job = Delayed::Job.find_available(name, 5, self.class.max_run_time).detect do |job|
|
171
|
+
if job.lock_exclusively!(self.class.max_run_time, name)
|
172
|
+
say "* [Worker(#{name})] acquired lock on #{job.name}"
|
173
|
+
true
|
174
|
+
else
|
175
|
+
say "* [Worker(#{name})] failed to acquire exclusive lock for #{job.name}", Logger::WARN
|
176
|
+
false
|
177
|
+
end
|
178
|
+
end
|
179
|
+
|
180
|
+
run(job) if job
|
181
|
+
end
|
182
|
+
end
|
183
|
+
end
|
data/lib/delayed_job.rb
ADDED
@@ -0,0 +1,14 @@
|
|
1
|
+
require 'active_support'
|
2
|
+
|
3
|
+
require File.dirname(__FILE__) + '/delayed/message_sending'
|
4
|
+
require File.dirname(__FILE__) + '/delayed/performable_method'
|
5
|
+
require File.dirname(__FILE__) + '/delayed/backend/base'
|
6
|
+
require File.dirname(__FILE__) + '/delayed/worker'
|
7
|
+
require File.dirname(__FILE__) + '/delayed/railtie' if defined?(::Rails::Railtie)
|
8
|
+
|
9
|
+
Object.send(:include, Delayed::MessageSending)
|
10
|
+
Module.send(:include, Delayed::MessageSending::ClassMethods)
|
11
|
+
|
12
|
+
if defined?(Merb::Plugins)
|
13
|
+
Merb::Plugins.add_rakefiles File.dirname(__FILE__) / 'delayed' / 'tasks'
|
14
|
+
end
|