opsb-delayed_job 2.0.3
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/.gitignore +2 -0
- data/MIT-LICENSE +20 -0
- data/README.textile +213 -0
- data/Rakefile +46 -0
- data/VERSION +1 -0
- data/benchmarks.rb +33 -0
- data/contrib/delayed_job.monitrc +14 -0
- data/contrib/delayed_job_multiple.monitrc +23 -0
- data/delayed_job.gemspec +115 -0
- data/generators/delayed_job/delayed_job_generator.rb +22 -0
- data/generators/delayed_job/templates/migration.rb +21 -0
- data/generators/delayed_job/templates/script +5 -0
- data/init.rb +1 -0
- data/lib/delayed/backend/active_record.rb +90 -0
- data/lib/delayed/backend/base.rb +111 -0
- data/lib/delayed/backend/data_mapper.rb +125 -0
- data/lib/delayed/backend/mongo_mapper.rb +110 -0
- data/lib/delayed/command.rb +101 -0
- data/lib/delayed/message_sending.rb +22 -0
- data/lib/delayed/performable_method.rb +62 -0
- data/lib/delayed/railtie.rb +10 -0
- data/lib/delayed/recipes.rb +31 -0
- data/lib/delayed/tasks.rb +15 -0
- data/lib/delayed/worker.rb +183 -0
- data/lib/delayed_job.rb +14 -0
- data/rails/init.rb +5 -0
- data/recipes/delayed_job.rb +1 -0
- data/spec/backend/active_record_job_spec.rb +46 -0
- data/spec/backend/data_mapper_job_spec.rb +16 -0
- data/spec/backend/mongo_mapper_job_spec.rb +94 -0
- data/spec/backend/shared_backend_spec.rb +265 -0
- data/spec/delayed_method_spec.rb +59 -0
- data/spec/performable_method_spec.rb +42 -0
- data/spec/sample_jobs.rb +25 -0
- data/spec/setup/active_record.rb +33 -0
- data/spec/setup/data_mapper.rb +8 -0
- data/spec/setup/mongo_mapper.rb +17 -0
- data/spec/spec_helper.rb +26 -0
- data/spec/story_spec.rb +17 -0
- data/spec/worker_spec.rb +216 -0
- data/tasks/jobs.rake +1 -0
- metadata +256 -0
@@ -0,0 +1,22 @@
|
|
1
|
+
class DelayedJobGenerator < Rails::Generator::Base
|
2
|
+
default_options :skip_migration => false
|
3
|
+
|
4
|
+
def manifest
|
5
|
+
record do |m|
|
6
|
+
m.template 'script', 'script/delayed_job', :chmod => 0755
|
7
|
+
if !options[:skip_migration] && defined?(ActiveRecord)
|
8
|
+
m.migration_template "migration.rb", 'db/migrate',
|
9
|
+
:migration_file_name => "create_delayed_jobs"
|
10
|
+
end
|
11
|
+
end
|
12
|
+
end
|
13
|
+
|
14
|
+
protected
|
15
|
+
|
16
|
+
def add_options!(opt)
|
17
|
+
opt.separator ''
|
18
|
+
opt.separator 'Options:'
|
19
|
+
opt.on("--skip-migration", "Don't generate a migration") { |v| options[:skip_migration] = v }
|
20
|
+
end
|
21
|
+
|
22
|
+
end
|
@@ -0,0 +1,21 @@
|
|
1
|
+
class CreateDelayedJobs < ActiveRecord::Migration
|
2
|
+
def self.up
|
3
|
+
create_table :delayed_jobs, :force => true do |table|
|
4
|
+
table.integer :priority, :default => 0 # Allows some jobs to jump to the front of the queue
|
5
|
+
table.integer :attempts, :default => 0 # Provides for retries, but still fail eventually.
|
6
|
+
table.text :handler # YAML-encoded string of the object that will do work
|
7
|
+
table.text :last_error # reason for last failure (See Note below)
|
8
|
+
table.datetime :run_at # When to run. Could be Time.zone.now for immediately, or sometime in the future.
|
9
|
+
table.datetime :locked_at # Set when a client is working on this object
|
10
|
+
table.datetime :failed_at # Set when all retries have failed (actually, by default, the record is deleted instead)
|
11
|
+
table.string :locked_by # Who is working on this object (if locked)
|
12
|
+
table.timestamps
|
13
|
+
end
|
14
|
+
|
15
|
+
add_index :delayed_jobs, [:priority, :run_at], :name => 'delayed_jobs_priority'
|
16
|
+
end
|
17
|
+
|
18
|
+
def self.down
|
19
|
+
drop_table :delayed_jobs
|
20
|
+
end
|
21
|
+
end
|
data/init.rb
ADDED
@@ -0,0 +1 @@
|
|
1
|
+
require File.join(File.dirname(__FILE__), 'rails', 'init')
|
@@ -0,0 +1,90 @@
|
|
1
|
+
require 'active_record'
|
2
|
+
|
3
|
+
class ActiveRecord::Base
|
4
|
+
def self.load_for_delayed_job(id)
|
5
|
+
if id
|
6
|
+
find(id)
|
7
|
+
else
|
8
|
+
super
|
9
|
+
end
|
10
|
+
end
|
11
|
+
|
12
|
+
def dump_for_delayed_job
|
13
|
+
"#{self.class};#{id}"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
|
17
|
+
module Delayed
|
18
|
+
module Backend
|
19
|
+
module ActiveRecord
|
20
|
+
# A job object that is persisted to the database.
|
21
|
+
# Contains the work object as a YAML field.
|
22
|
+
class Job < ::ActiveRecord::Base
|
23
|
+
include Delayed::Backend::Base
|
24
|
+
set_table_name :delayed_jobs
|
25
|
+
|
26
|
+
before_save :set_default_run_at
|
27
|
+
|
28
|
+
named_scope :ready_to_run, lambda {|worker_name, max_run_time|
|
29
|
+
{:conditions => ['(run_at <= ? AND (locked_at IS NULL OR locked_at < ?) OR locked_by = ?) AND failed_at IS NULL', db_time_now, db_time_now - max_run_time, worker_name]}
|
30
|
+
}
|
31
|
+
named_scope :by_priority, :order => 'priority ASC, run_at ASC'
|
32
|
+
|
33
|
+
def self.after_fork
|
34
|
+
::ActiveRecord::Base.connection.reconnect!
|
35
|
+
end
|
36
|
+
|
37
|
+
# When a worker is exiting, make sure we don't have any locked jobs.
|
38
|
+
def self.clear_locks!(worker_name)
|
39
|
+
update_all("locked_by = null, locked_at = null", ["locked_by = ?", worker_name])
|
40
|
+
end
|
41
|
+
|
42
|
+
# Find a few candidate jobs to run (in case some immediately get locked by others).
|
43
|
+
def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
|
44
|
+
scope = self.ready_to_run(worker_name, max_run_time)
|
45
|
+
scope = scope.scoped(:conditions => ['priority >= ?', Worker.min_priority]) if Worker.min_priority
|
46
|
+
scope = scope.scoped(:conditions => ['priority <= ?', Worker.max_priority]) if Worker.max_priority
|
47
|
+
|
48
|
+
::ActiveRecord::Base.silence do
|
49
|
+
scope.by_priority.all(:limit => limit)
|
50
|
+
end
|
51
|
+
end
|
52
|
+
|
53
|
+
# Lock this job for this worker.
|
54
|
+
# Returns true if we have the lock, false otherwise.
|
55
|
+
def lock_exclusively!(max_run_time, worker)
|
56
|
+
now = self.class.db_time_now
|
57
|
+
affected_rows = if locked_by != worker
|
58
|
+
# We don't own this job so we will update the locked_by name and the locked_at
|
59
|
+
self.class.update_all(["locked_at = ?, locked_by = ?", now, worker], ["id = ? and (locked_at is null or locked_at < ?) and (run_at <= ?)", id, (now - max_run_time.to_i), now])
|
60
|
+
else
|
61
|
+
# We already own this job, this may happen if the job queue crashes.
|
62
|
+
# Simply resume and update the locked_at
|
63
|
+
self.class.update_all(["locked_at = ?", now], ["id = ? and locked_by = ?", id, worker])
|
64
|
+
end
|
65
|
+
if affected_rows == 1
|
66
|
+
self.locked_at = now
|
67
|
+
self.locked_by = worker
|
68
|
+
return true
|
69
|
+
else
|
70
|
+
return false
|
71
|
+
end
|
72
|
+
end
|
73
|
+
|
74
|
+
# Get the current time (GMT or local depending on DB)
|
75
|
+
# Note: This does not ping the DB to get the time, so all your clients
|
76
|
+
# must have syncronized clocks.
|
77
|
+
def self.db_time_now
|
78
|
+
if Time.zone
|
79
|
+
Time.zone.now
|
80
|
+
elsif ::ActiveRecord::Base.default_timezone == :utc
|
81
|
+
Time.now.utc
|
82
|
+
else
|
83
|
+
Time.now
|
84
|
+
end
|
85
|
+
end
|
86
|
+
|
87
|
+
end
|
88
|
+
end
|
89
|
+
end
|
90
|
+
end
|
@@ -0,0 +1,111 @@
|
|
1
|
+
module Delayed
|
2
|
+
module Backend
|
3
|
+
class DeserializationError < StandardError
|
4
|
+
end
|
5
|
+
|
6
|
+
module Base
|
7
|
+
def self.included(base)
|
8
|
+
base.extend ClassMethods
|
9
|
+
end
|
10
|
+
|
11
|
+
module ClassMethods
|
12
|
+
# Add a job to the queue
|
13
|
+
def enqueue(*args)
|
14
|
+
object = args.shift
|
15
|
+
unless object.respond_to?(:perform)
|
16
|
+
raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
|
17
|
+
end
|
18
|
+
|
19
|
+
priority = args.first || 0
|
20
|
+
run_at = args[1]
|
21
|
+
self.create(:payload_object => object, :priority => priority.to_i, :run_at => run_at)
|
22
|
+
end
|
23
|
+
|
24
|
+
# Hook method that is called before a new worker is forked
|
25
|
+
def before_fork
|
26
|
+
end
|
27
|
+
|
28
|
+
# Hook method that is called after a new worker is forked
|
29
|
+
def after_fork
|
30
|
+
end
|
31
|
+
|
32
|
+
def work_off(num = 100)
|
33
|
+
warn "[DEPRECATION] `Delayed::Job.work_off` is deprecated. Use `Delayed::Worker.new.work_off instead."
|
34
|
+
Delayed::Worker.new.work_off(num)
|
35
|
+
end
|
36
|
+
end
|
37
|
+
|
38
|
+
ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
|
39
|
+
|
40
|
+
def failed?
|
41
|
+
failed_at
|
42
|
+
end
|
43
|
+
alias_method :failed, :failed?
|
44
|
+
|
45
|
+
def payload_object
|
46
|
+
@payload_object ||= deserialize(self['handler'])
|
47
|
+
end
|
48
|
+
|
49
|
+
def name
|
50
|
+
@name ||= begin
|
51
|
+
payload = payload_object
|
52
|
+
if payload.respond_to?(:display_name)
|
53
|
+
payload.display_name
|
54
|
+
else
|
55
|
+
payload.class.name
|
56
|
+
end
|
57
|
+
end
|
58
|
+
end
|
59
|
+
|
60
|
+
def payload_object=(object)
|
61
|
+
self['handler'] = object.to_yaml
|
62
|
+
end
|
63
|
+
|
64
|
+
# Moved into its own method so that new_relic can trace it.
|
65
|
+
def invoke_job
|
66
|
+
payload_object.perform
|
67
|
+
end
|
68
|
+
|
69
|
+
# Unlock this job (note: not saved to DB)
|
70
|
+
def unlock
|
71
|
+
self.locked_at = nil
|
72
|
+
self.locked_by = nil
|
73
|
+
end
|
74
|
+
|
75
|
+
private
|
76
|
+
|
77
|
+
def deserialize(source)
|
78
|
+
handler = YAML.load(source) rescue nil
|
79
|
+
|
80
|
+
unless handler.respond_to?(:perform)
|
81
|
+
if handler.nil? && source =~ ParseObjectFromYaml
|
82
|
+
handler_class = $1
|
83
|
+
end
|
84
|
+
attempt_to_load(handler_class || handler.class)
|
85
|
+
handler = YAML.load(source)
|
86
|
+
end
|
87
|
+
|
88
|
+
return handler if handler.respond_to?(:perform)
|
89
|
+
|
90
|
+
raise DeserializationError,
|
91
|
+
'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
|
92
|
+
rescue TypeError, LoadError, NameError => e
|
93
|
+
raise DeserializationError,
|
94
|
+
"Job failed to load: #{e.message}. Try to manually require the required file."
|
95
|
+
end
|
96
|
+
|
97
|
+
# Constantize the object so that ActiveSupport can attempt
|
98
|
+
# its auto loading magic. Will raise LoadError if not successful.
|
99
|
+
def attempt_to_load(klass)
|
100
|
+
klass.constantize
|
101
|
+
end
|
102
|
+
|
103
|
+
protected
|
104
|
+
|
105
|
+
def set_default_run_at
|
106
|
+
self.run_at ||= self.class.db_time_now
|
107
|
+
end
|
108
|
+
|
109
|
+
end
|
110
|
+
end
|
111
|
+
end
|
@@ -0,0 +1,125 @@
|
|
1
|
+
require 'dm-core'
|
2
|
+
require 'dm-observer'
|
3
|
+
require 'dm-aggregates'
|
4
|
+
|
5
|
+
module DataMapper
|
6
|
+
module Resource
|
7
|
+
module ClassMethods
|
8
|
+
def load_for_delayed_job(id)
|
9
|
+
find!(id)
|
10
|
+
end
|
11
|
+
end
|
12
|
+
|
13
|
+
module InstanceMethods
|
14
|
+
def dump_for_delayed_job
|
15
|
+
"#{self.class};#{id}"
|
16
|
+
end
|
17
|
+
end
|
18
|
+
end
|
19
|
+
end
|
20
|
+
|
21
|
+
module Delayed
|
22
|
+
module Backend
|
23
|
+
module DataMapper
|
24
|
+
class Job
|
25
|
+
include ::DataMapper::Resource
|
26
|
+
include Delayed::Backend::Base
|
27
|
+
|
28
|
+
storage_names[:default] = 'delayed_jobs'
|
29
|
+
|
30
|
+
property :id, Serial
|
31
|
+
property :priority, Integer, :default => 0, :index => :run_at_priority
|
32
|
+
property :attempts, Integer, :default => 0
|
33
|
+
property :handler, Text, :lazy => false
|
34
|
+
property :run_at, Time, :index => :run_at_priority
|
35
|
+
property :locked_at, Time, :index => true
|
36
|
+
property :locked_by, String
|
37
|
+
property :failed_at, Time
|
38
|
+
property :last_error, Text
|
39
|
+
|
40
|
+
def self.db_time_now
|
41
|
+
Time.now
|
42
|
+
end
|
43
|
+
|
44
|
+
def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
|
45
|
+
|
46
|
+
simple_conditions = { :run_at.lte => db_time_now, :limit => limit, :failed_at => nil, :order => [:priority.asc, :run_at.asc] }
|
47
|
+
|
48
|
+
# respect priorities
|
49
|
+
simple_conditions[:priority.gte] = Worker.min_priority if Worker.min_priority
|
50
|
+
simple_conditions[:priority.lte] = Worker.max_priority if Worker.max_priority
|
51
|
+
|
52
|
+
# lockable
|
53
|
+
lockable = (
|
54
|
+
# not locked or past the max time
|
55
|
+
( all(:locked_at => nil ) | all(:locked_at.lt => db_time_now - max_run_time)) |
|
56
|
+
|
57
|
+
# OR locked by our worker
|
58
|
+
all(:locked_by => worker_name))
|
59
|
+
|
60
|
+
# plus some other boring junk
|
61
|
+
(lockable).all( simple_conditions )
|
62
|
+
end
|
63
|
+
|
64
|
+
# When a worker is exiting, make sure we don't have any locked jobs.
|
65
|
+
def self.clear_locks!(worker_name)
|
66
|
+
all(:locked_by => worker_name).update(:locked_at => nil, :locked_by => nil)
|
67
|
+
end
|
68
|
+
|
69
|
+
# Lock this job for this worker.
|
70
|
+
# Returns true if we have the lock, false otherwise.
|
71
|
+
def lock_exclusively!(max_run_time, worker = worker_name)
|
72
|
+
|
73
|
+
now = self.class.db_time_now
|
74
|
+
overtime = now - max_run_time
|
75
|
+
|
76
|
+
# FIXME - this is a bit gross
|
77
|
+
# DM doesn't give us the number of rows affected by a collection update
|
78
|
+
# so we have to circumvent some niceness in DM::Collection here
|
79
|
+
collection = locked_by != worker ?
|
80
|
+
(self.class.all(:id => id, :run_at.lte => now) & ( self.class.all(:locked_at => nil) | self.class.all(:locked_at.lt => overtime) ) ) :
|
81
|
+
self.class.all(:id => id, :locked_by => worker)
|
82
|
+
|
83
|
+
attributes = collection.model.new(:locked_at => now, :locked_by => worker).dirty_attributes
|
84
|
+
affected_rows = self.repository.update(attributes, collection)
|
85
|
+
|
86
|
+
if affected_rows == 1
|
87
|
+
self.locked_at = now
|
88
|
+
self.locked_by = worker
|
89
|
+
return true
|
90
|
+
else
|
91
|
+
return false
|
92
|
+
end
|
93
|
+
end
|
94
|
+
|
95
|
+
# these are common to the other backends, so we provide an implementation
|
96
|
+
def self.delete_all
|
97
|
+
Delayed::Job.auto_migrate!
|
98
|
+
end
|
99
|
+
|
100
|
+
def self.find id
|
101
|
+
get id
|
102
|
+
end
|
103
|
+
|
104
|
+
def update_attributes(attributes)
|
105
|
+
attributes.each do |k,v|
|
106
|
+
self[k] = v
|
107
|
+
end
|
108
|
+
self.save
|
109
|
+
end
|
110
|
+
|
111
|
+
|
112
|
+
end
|
113
|
+
|
114
|
+
class JobObserver
|
115
|
+
include ::DataMapper::Observer
|
116
|
+
|
117
|
+
observe Job
|
118
|
+
|
119
|
+
before :save do
|
120
|
+
self.run_at ||= self.class.db_time_now
|
121
|
+
end
|
122
|
+
end
|
123
|
+
end
|
124
|
+
end
|
125
|
+
end
|
@@ -0,0 +1,110 @@
|
|
1
|
+
require 'mongo_mapper'
|
2
|
+
|
3
|
+
module ::MongoMapper
|
4
|
+
module Document
|
5
|
+
module ClassMethods
|
6
|
+
def load_for_delayed_job(id)
|
7
|
+
find!(id)
|
8
|
+
end
|
9
|
+
end
|
10
|
+
|
11
|
+
module InstanceMethods
|
12
|
+
def dump_for_delayed_job
|
13
|
+
"#{self.class};#{id}"
|
14
|
+
end
|
15
|
+
end
|
16
|
+
end
|
17
|
+
end
|
18
|
+
|
19
|
+
module Delayed
|
20
|
+
module Backend
|
21
|
+
module MongoMapper
|
22
|
+
class Job
|
23
|
+
include ::MongoMapper::Document
|
24
|
+
include Delayed::Backend::Base
|
25
|
+
set_collection_name 'delayed_jobs'
|
26
|
+
|
27
|
+
key :priority, Integer, :default => 0
|
28
|
+
key :attempts, Integer, :default => 0
|
29
|
+
key :handler, String
|
30
|
+
key :run_at, Time
|
31
|
+
key :locked_at, Time
|
32
|
+
key :locked_by, String, :index => true
|
33
|
+
key :failed_at, Time
|
34
|
+
key :last_error, String
|
35
|
+
timestamps!
|
36
|
+
|
37
|
+
before_save :set_default_run_at
|
38
|
+
|
39
|
+
ensure_index [[:priority, 1], [:run_at, 1]]
|
40
|
+
|
41
|
+
def self.before_fork
|
42
|
+
::MongoMapper.connection.close
|
43
|
+
end
|
44
|
+
|
45
|
+
def self.after_fork
|
46
|
+
::MongoMapper.connect(RAILS_ENV)
|
47
|
+
end
|
48
|
+
|
49
|
+
def self.db_time_now
|
50
|
+
Time.now.utc
|
51
|
+
end
|
52
|
+
|
53
|
+
def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
|
54
|
+
right_now = db_time_now
|
55
|
+
|
56
|
+
conditions = {
|
57
|
+
:run_at => {"$lte" => right_now},
|
58
|
+
:limit => -limit, # In mongo, positive limits are 'soft' and negative are 'hard'
|
59
|
+
:failed_at => nil,
|
60
|
+
:sort => [['priority', 1], ['run_at', 1]]
|
61
|
+
}
|
62
|
+
|
63
|
+
where = "this.locked_at == null || this.locked_at < #{make_date(right_now - max_run_time)}"
|
64
|
+
|
65
|
+
(conditions[:priority] ||= {})['$gte'] = Worker.min_priority.to_i if Worker.min_priority
|
66
|
+
(conditions[:priority] ||= {})['$lte'] = Worker.max_priority.to_i if Worker.max_priority
|
67
|
+
|
68
|
+
results = all(conditions.merge(:locked_by => worker_name))
|
69
|
+
results += all(conditions.merge('$where' => where)) if results.size < limit
|
70
|
+
results
|
71
|
+
end
|
72
|
+
|
73
|
+
# When a worker is exiting, make sure we don't have any locked jobs.
|
74
|
+
def self.clear_locks!(worker_name)
|
75
|
+
collection.update({:locked_by => worker_name}, {"$set" => {:locked_at => nil, :locked_by => nil}}, :multi => true)
|
76
|
+
end
|
77
|
+
|
78
|
+
# Lock this job for this worker.
|
79
|
+
# Returns true if we have the lock, false otherwise.
|
80
|
+
def lock_exclusively!(max_run_time, worker = worker_name)
|
81
|
+
right_now = self.class.db_time_now
|
82
|
+
overtime = right_now - max_run_time.to_i
|
83
|
+
|
84
|
+
query = "this.locked_at == null || this.locked_at < #{make_date(overtime)} || this.locked_by == #{worker.to_json}"
|
85
|
+
conditions = {:_id => id, :run_at => {"$lte" => right_now}, "$where" => query}
|
86
|
+
|
87
|
+
collection.update(conditions, {"$set" => {:locked_at => right_now, :locked_by => worker}})
|
88
|
+
affected_rows = collection.find({:_id => id, :locked_by => worker}).count
|
89
|
+
if affected_rows == 1
|
90
|
+
self.locked_at = right_now
|
91
|
+
self.locked_by = worker
|
92
|
+
return true
|
93
|
+
else
|
94
|
+
return false
|
95
|
+
end
|
96
|
+
end
|
97
|
+
|
98
|
+
private
|
99
|
+
|
100
|
+
def self.make_date(date_or_seconds)
|
101
|
+
"new Date(#{date_or_seconds.to_f * 1000})"
|
102
|
+
end
|
103
|
+
|
104
|
+
def make_date(date)
|
105
|
+
self.class.make_date(date)
|
106
|
+
end
|
107
|
+
end
|
108
|
+
end
|
109
|
+
end
|
110
|
+
end
|