delayed_job 2.1.0.pre → 2.1.0.pre2
This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
- data/README.textile +1 -16
- data/lib/delayed/backend/active_record.rb +12 -5
- data/lib/delayed/backend/base.rb +12 -3
- data/lib/delayed/backend/shared_spec.rb +472 -0
- data/lib/delayed/command.rb +4 -4
- data/lib/delayed/message_sending.rb +8 -6
- data/lib/delayed/performable_method.rb +9 -1
- data/lib/delayed/tasks.rb +2 -6
- data/lib/delayed/worker.rb +12 -12
- data/lib/delayed_job.rb +0 -4
- data/spec/{backend/active_record_job_spec.rb → active_record_job_spec.rb} +1 -11
- data/spec/message_sending_spec.rb +8 -1
- data/spec/performable_method_spec.rb +19 -1
- data/spec/sample_jobs.rb +36 -0
- data/spec/spec_helper.rb +35 -13
- data/spec/worker_spec.rb +14 -191
- data/spec/yaml_ext_spec.rb +16 -0
- metadata +24 -103
- data/.gitignore +0 -2
- data/Rakefile +0 -53
- data/VERSION +0 -1
- data/benchmarks.rb +0 -33
- data/delayed_job.gemspec +0 -125
- data/lib/delayed/backend/couch_rest.rb +0 -109
- data/lib/delayed/backend/data_mapper.rb +0 -121
- data/lib/delayed/backend/mongo_mapper.rb +0 -106
- data/spec/backend/couch_rest_job_spec.rb +0 -15
- data/spec/backend/data_mapper_job_spec.rb +0 -16
- data/spec/backend/mongo_mapper_job_spec.rb +0 -94
- data/spec/backend/shared_backend_spec.rb +0 -273
- data/spec/setup/active_record.rb +0 -33
- data/spec/setup/couch_rest.rb +0 -7
- data/spec/setup/data_mapper.rb +0 -8
- data/spec/setup/mongo_mapper.rb +0 -17
@@ -1,109 +0,0 @@
|
|
1
|
-
require 'couchrest'
|
2
|
-
|
3
|
-
#extent couchrest to handle delayed_job serialization.
|
4
|
-
class CouchRest::ExtendedDocument
|
5
|
-
yaml_as "tag:ruby.yaml.org,2002:CouchRest"
|
6
|
-
|
7
|
-
def reload
|
8
|
-
job = self.class.get self['_id']
|
9
|
-
job.each {|k,v| self[k] = v}
|
10
|
-
end
|
11
|
-
def self.find(id)
|
12
|
-
get id
|
13
|
-
end
|
14
|
-
def self.yaml_new(klass, tag, val)
|
15
|
-
klass.get(val['_id'])
|
16
|
-
end
|
17
|
-
def ==(other)
|
18
|
-
if other.is_a? ::CouchRest::ExtendedDocument
|
19
|
-
self['_id'] == other['_id']
|
20
|
-
else
|
21
|
-
super
|
22
|
-
end
|
23
|
-
end
|
24
|
-
end
|
25
|
-
|
26
|
-
#couchrest adapter
|
27
|
-
module Delayed
|
28
|
-
module Backend
|
29
|
-
module CouchRest
|
30
|
-
class Job < ::CouchRest::ExtendedDocument
|
31
|
-
include Delayed::Backend::Base
|
32
|
-
use_database ::CouchRest::Server.new.database('delayed_job')
|
33
|
-
|
34
|
-
property :handler
|
35
|
-
property :last_error
|
36
|
-
property :locked_by
|
37
|
-
property :priority, :default => 0
|
38
|
-
property :attempts, :default => 0
|
39
|
-
property :run_at, :cast_as => 'Time'
|
40
|
-
property :locked_at, :cast_as => 'Time'
|
41
|
-
property :failed_at, :cast_as => 'Time'
|
42
|
-
timestamps!
|
43
|
-
|
44
|
-
set_callback :save, :before, :set_default_run_at
|
45
|
-
|
46
|
-
view_by(:failed_at, :locked_by, :run_at,
|
47
|
-
:map => "function(doc){" +
|
48
|
-
" if(doc['couchrest-type'] == 'Delayed::Backend::CouchRest::Job') {" +
|
49
|
-
" emit([doc.failed_at || null, doc.locked_by || null, doc.run_at || null], null);}" +
|
50
|
-
" }")
|
51
|
-
view_by(:failed_at, :locked_at, :run_at,
|
52
|
-
:map => "function(doc){" +
|
53
|
-
" if(doc['couchrest-type'] == 'Delayed::Backend::CouchRest::Job') {" +
|
54
|
-
" emit([doc.failed_at || null, doc.locked_at || null, doc.run_at || null], null);}" +
|
55
|
-
" }")
|
56
|
-
|
57
|
-
def self.db_time_now; Time.now; end
|
58
|
-
def self.find_available(worker_name, limit = 5, max_run_time = ::Delayed::Worker.max_run_time)
|
59
|
-
ready = ready_jobs
|
60
|
-
mine = my_jobs worker_name
|
61
|
-
expire = expired_jobs max_run_time
|
62
|
-
jobs = (ready + mine + expire)[0..limit-1].sort_by { |j| j.priority }
|
63
|
-
jobs = jobs.find_all { |j| j.priority >= Worker.min_priority } if Worker.min_priority
|
64
|
-
jobs = jobs.find_all { |j| j.priority <= Worker.max_priority } if Worker.max_priority
|
65
|
-
jobs
|
66
|
-
end
|
67
|
-
def self.clear_locks!(worker_name)
|
68
|
-
jobs = my_jobs worker_name
|
69
|
-
jobs.each { |j| j.locked_by, j.locked_at = nil, nil; }
|
70
|
-
database.bulk_save jobs
|
71
|
-
end
|
72
|
-
def self.delete_all
|
73
|
-
database.bulk_save all.each { |doc| doc['_deleted'] = true }
|
74
|
-
end
|
75
|
-
|
76
|
-
def lock_exclusively!(max_run_time, worker = worker_name)
|
77
|
-
return false if locked_by_other?(worker) and not expired?(max_run_time)
|
78
|
-
case
|
79
|
-
when locked_by_me?(worker)
|
80
|
-
self.locked_at = self.class.db_time_now
|
81
|
-
when (unlocked? or (locked_by_other?(worker) and expired?(max_run_time)))
|
82
|
-
self.locked_at, self.locked_by = self.class.db_time_now, worker
|
83
|
-
end
|
84
|
-
save
|
85
|
-
rescue RestClient::Conflict
|
86
|
-
false
|
87
|
-
end
|
88
|
-
|
89
|
-
private
|
90
|
-
def self.ready_jobs
|
91
|
-
options = {:startkey => [nil, nil], :endkey => [nil, nil, db_time_now]}
|
92
|
-
by_failed_at_and_locked_by_and_run_at options
|
93
|
-
end
|
94
|
-
def self.my_jobs(worker_name)
|
95
|
-
options = {:startkey => [nil, worker_name], :endkey => [nil, worker_name, {}]}
|
96
|
-
by_failed_at_and_locked_by_and_run_at options
|
97
|
-
end
|
98
|
-
def self.expired_jobs(max_run_time)
|
99
|
-
options = {:startkey => [nil,'0'], :endkey => [nil, db_time_now - max_run_time, db_time_now]}
|
100
|
-
by_failed_at_and_locked_at_and_run_at options
|
101
|
-
end
|
102
|
-
def unlocked?; locked_by.nil?; end
|
103
|
-
def expired?(time); locked_at < self.class.db_time_now - time; end
|
104
|
-
def locked_by_me?(worker); not locked_by.nil? and locked_by == worker; end
|
105
|
-
def locked_by_other?(worker); not locked_by.nil? and locked_by != worker; end
|
106
|
-
end
|
107
|
-
end
|
108
|
-
end
|
109
|
-
end
|
@@ -1,121 +0,0 @@
|
|
1
|
-
require 'dm-core'
|
2
|
-
require 'dm-observer'
|
3
|
-
require 'dm-aggregates'
|
4
|
-
|
5
|
-
DataMapper::Resource.class_eval do
|
6
|
-
yaml_as "tag:ruby.yaml.org,2002:DataMapper"
|
7
|
-
|
8
|
-
def self.yaml_new(klass, tag, val)
|
9
|
-
klass.find(val['id'])
|
10
|
-
end
|
11
|
-
|
12
|
-
def to_yaml_properties
|
13
|
-
['@id']
|
14
|
-
end
|
15
|
-
end
|
16
|
-
|
17
|
-
module Delayed
|
18
|
-
module Backend
|
19
|
-
module DataMapper
|
20
|
-
class Job
|
21
|
-
include ::DataMapper::Resource
|
22
|
-
include Delayed::Backend::Base
|
23
|
-
|
24
|
-
storage_names[:default] = 'delayed_jobs'
|
25
|
-
|
26
|
-
property :id, Serial
|
27
|
-
property :priority, Integer, :default => 0, :index => :run_at_priority
|
28
|
-
property :attempts, Integer, :default => 0
|
29
|
-
property :handler, Text, :lazy => false
|
30
|
-
property :run_at, Time, :index => :run_at_priority
|
31
|
-
property :locked_at, Time, :index => true
|
32
|
-
property :locked_by, String
|
33
|
-
property :failed_at, Time
|
34
|
-
property :last_error, Text
|
35
|
-
|
36
|
-
def self.db_time_now
|
37
|
-
Time.now
|
38
|
-
end
|
39
|
-
|
40
|
-
def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
|
41
|
-
|
42
|
-
simple_conditions = { :run_at.lte => db_time_now, :limit => limit, :failed_at => nil, :order => [:priority.asc, :run_at.asc] }
|
43
|
-
|
44
|
-
# respect priorities
|
45
|
-
simple_conditions[:priority.gte] = Worker.min_priority if Worker.min_priority
|
46
|
-
simple_conditions[:priority.lte] = Worker.max_priority if Worker.max_priority
|
47
|
-
|
48
|
-
# lockable
|
49
|
-
lockable = (
|
50
|
-
# not locked or past the max time
|
51
|
-
( all(:locked_at => nil ) | all(:locked_at.lt => db_time_now - max_run_time)) |
|
52
|
-
|
53
|
-
# OR locked by our worker
|
54
|
-
all(:locked_by => worker_name))
|
55
|
-
|
56
|
-
# plus some other boring junk
|
57
|
-
(lockable).all( simple_conditions )
|
58
|
-
end
|
59
|
-
|
60
|
-
# When a worker is exiting, make sure we don't have any locked jobs.
|
61
|
-
def self.clear_locks!(worker_name)
|
62
|
-
all(:locked_by => worker_name).update(:locked_at => nil, :locked_by => nil)
|
63
|
-
end
|
64
|
-
|
65
|
-
# Lock this job for this worker.
|
66
|
-
# Returns true if we have the lock, false otherwise.
|
67
|
-
def lock_exclusively!(max_run_time, worker = worker_name)
|
68
|
-
|
69
|
-
now = self.class.db_time_now
|
70
|
-
overtime = now - max_run_time
|
71
|
-
|
72
|
-
# FIXME - this is a bit gross
|
73
|
-
# DM doesn't give us the number of rows affected by a collection update
|
74
|
-
# so we have to circumvent some niceness in DM::Collection here
|
75
|
-
collection = locked_by != worker ?
|
76
|
-
(self.class.all(:id => id, :run_at.lte => now) & ( self.class.all(:locked_at => nil) | self.class.all(:locked_at.lt => overtime) ) ) :
|
77
|
-
self.class.all(:id => id, :locked_by => worker)
|
78
|
-
|
79
|
-
attributes = collection.model.new(:locked_at => now, :locked_by => worker).dirty_attributes
|
80
|
-
affected_rows = self.repository.update(attributes, collection)
|
81
|
-
|
82
|
-
if affected_rows == 1
|
83
|
-
self.locked_at = now
|
84
|
-
self.locked_by = worker
|
85
|
-
return true
|
86
|
-
else
|
87
|
-
return false
|
88
|
-
end
|
89
|
-
end
|
90
|
-
|
91
|
-
# these are common to the other backends, so we provide an implementation
|
92
|
-
def self.delete_all
|
93
|
-
Delayed::Job.auto_migrate!
|
94
|
-
end
|
95
|
-
|
96
|
-
def self.find id
|
97
|
-
get id
|
98
|
-
end
|
99
|
-
|
100
|
-
def update_attributes(attributes)
|
101
|
-
attributes.each do |k,v|
|
102
|
-
self[k] = v
|
103
|
-
end
|
104
|
-
self.save
|
105
|
-
end
|
106
|
-
|
107
|
-
|
108
|
-
end
|
109
|
-
|
110
|
-
class JobObserver
|
111
|
-
include ::DataMapper::Observer
|
112
|
-
|
113
|
-
observe Job
|
114
|
-
|
115
|
-
before :save do
|
116
|
-
self.run_at ||= self.class.db_time_now
|
117
|
-
end
|
118
|
-
end
|
119
|
-
end
|
120
|
-
end
|
121
|
-
end
|
@@ -1,106 +0,0 @@
|
|
1
|
-
require 'mongo_mapper'
|
2
|
-
|
3
|
-
MongoMapper::Document.class_eval do
|
4
|
-
yaml_as "tag:ruby.yaml.org,2002:MongoMapper"
|
5
|
-
|
6
|
-
def self.yaml_new(klass, tag, val)
|
7
|
-
klass.find(val['_id'])
|
8
|
-
end
|
9
|
-
|
10
|
-
def to_yaml_properties
|
11
|
-
['@_id']
|
12
|
-
end
|
13
|
-
end
|
14
|
-
|
15
|
-
module Delayed
|
16
|
-
module Backend
|
17
|
-
module MongoMapper
|
18
|
-
class Job
|
19
|
-
include ::MongoMapper::Document
|
20
|
-
include Delayed::Backend::Base
|
21
|
-
set_collection_name 'delayed_jobs'
|
22
|
-
|
23
|
-
key :priority, Integer, :default => 0
|
24
|
-
key :attempts, Integer, :default => 0
|
25
|
-
key :handler, String
|
26
|
-
key :run_at, Time
|
27
|
-
key :locked_at, Time
|
28
|
-
key :locked_by, String, :index => true
|
29
|
-
key :failed_at, Time
|
30
|
-
key :last_error, String
|
31
|
-
timestamps!
|
32
|
-
|
33
|
-
before_save :set_default_run_at
|
34
|
-
|
35
|
-
ensure_index [[:priority, 1], [:run_at, 1]]
|
36
|
-
|
37
|
-
def self.before_fork
|
38
|
-
::MongoMapper.connection.close
|
39
|
-
end
|
40
|
-
|
41
|
-
def self.after_fork
|
42
|
-
::MongoMapper.connect(RAILS_ENV)
|
43
|
-
end
|
44
|
-
|
45
|
-
def self.db_time_now
|
46
|
-
Time.now.utc
|
47
|
-
end
|
48
|
-
|
49
|
-
def self.find_available(worker_name, limit = 5, max_run_time = Worker.max_run_time)
|
50
|
-
right_now = db_time_now
|
51
|
-
|
52
|
-
conditions = {
|
53
|
-
:run_at => {"$lte" => right_now},
|
54
|
-
:limit => -limit, # In mongo, positive limits are 'soft' and negative are 'hard'
|
55
|
-
:failed_at => nil,
|
56
|
-
:sort => [['priority', 1], ['run_at', 1]]
|
57
|
-
}
|
58
|
-
|
59
|
-
where = "this.locked_at == null || this.locked_at < #{make_date(right_now - max_run_time)}"
|
60
|
-
|
61
|
-
(conditions[:priority] ||= {})['$gte'] = Worker.min_priority.to_i if Worker.min_priority
|
62
|
-
(conditions[:priority] ||= {})['$lte'] = Worker.max_priority.to_i if Worker.max_priority
|
63
|
-
|
64
|
-
results = all(conditions.merge(:locked_by => worker_name))
|
65
|
-
results += all(conditions.merge('$where' => where)) if results.size < limit
|
66
|
-
results
|
67
|
-
end
|
68
|
-
|
69
|
-
# When a worker is exiting, make sure we don't have any locked jobs.
|
70
|
-
def self.clear_locks!(worker_name)
|
71
|
-
collection.update({:locked_by => worker_name}, {"$set" => {:locked_at => nil, :locked_by => nil}}, :multi => true)
|
72
|
-
end
|
73
|
-
|
74
|
-
# Lock this job for this worker.
|
75
|
-
# Returns true if we have the lock, false otherwise.
|
76
|
-
def lock_exclusively!(max_run_time, worker = worker_name)
|
77
|
-
right_now = self.class.db_time_now
|
78
|
-
overtime = right_now - max_run_time.to_i
|
79
|
-
|
80
|
-
query = "this.locked_at == null || this.locked_at < #{make_date(overtime)} || this.locked_by == #{worker.to_json}"
|
81
|
-
conditions = {:_id => id, :run_at => {"$lte" => right_now}, "$where" => query}
|
82
|
-
|
83
|
-
collection.update(conditions, {"$set" => {:locked_at => right_now, :locked_by => worker}})
|
84
|
-
affected_rows = collection.find({:_id => id, :locked_by => worker}).count
|
85
|
-
if affected_rows == 1
|
86
|
-
self.locked_at = right_now
|
87
|
-
self.locked_by = worker
|
88
|
-
return true
|
89
|
-
else
|
90
|
-
return false
|
91
|
-
end
|
92
|
-
end
|
93
|
-
|
94
|
-
private
|
95
|
-
|
96
|
-
def self.make_date(date_or_seconds)
|
97
|
-
"new Date(#{date_or_seconds.to_f * 1000})"
|
98
|
-
end
|
99
|
-
|
100
|
-
def make_date(date)
|
101
|
-
self.class.make_date(date)
|
102
|
-
end
|
103
|
-
end
|
104
|
-
end
|
105
|
-
end
|
106
|
-
end
|
@@ -1,15 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
require 'backend/shared_backend_spec'
|
3
|
-
require 'delayed/backend/couch_rest'
|
4
|
-
|
5
|
-
describe Delayed::Backend::CouchRest::Job do
|
6
|
-
before(:all) do
|
7
|
-
@backend = Delayed::Backend::CouchRest::Job
|
8
|
-
end
|
9
|
-
|
10
|
-
before(:each) do
|
11
|
-
@backend.delete_all
|
12
|
-
end
|
13
|
-
|
14
|
-
it_should_behave_like 'a backend'
|
15
|
-
end
|
@@ -1,16 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
require 'backend/shared_backend_spec'
|
3
|
-
require 'delayed/backend/data_mapper'
|
4
|
-
|
5
|
-
describe Delayed::Backend::DataMapper::Job do
|
6
|
-
before(:all) do
|
7
|
-
@backend = Delayed::Backend::DataMapper::Job
|
8
|
-
end
|
9
|
-
|
10
|
-
before(:each) do
|
11
|
-
# reset database before each example is run
|
12
|
-
DataMapper.auto_migrate!
|
13
|
-
end
|
14
|
-
|
15
|
-
it_should_behave_like 'a backend'
|
16
|
-
end
|
@@ -1,94 +0,0 @@
|
|
1
|
-
require 'spec_helper'
|
2
|
-
require 'backend/shared_backend_spec'
|
3
|
-
require 'delayed/backend/mongo_mapper'
|
4
|
-
|
5
|
-
describe Delayed::Backend::MongoMapper::Job do
|
6
|
-
before(:all) do
|
7
|
-
@backend = Delayed::Backend::MongoMapper::Job
|
8
|
-
end
|
9
|
-
|
10
|
-
before(:each) do
|
11
|
-
MongoMapper.database.collections.each(&:remove)
|
12
|
-
end
|
13
|
-
|
14
|
-
it_should_behave_like 'a backend'
|
15
|
-
|
16
|
-
describe "indexes" do
|
17
|
-
it "should have combo index on priority and run_at" do
|
18
|
-
@backend.collection.index_information.detect { |index| index[0] == 'priority_1_run_at_1' }.should_not be_nil
|
19
|
-
end
|
20
|
-
|
21
|
-
it "should have index on locked_by" do
|
22
|
-
@backend.collection.index_information.detect { |index| index[0] == 'locked_by_1' }.should_not be_nil
|
23
|
-
end
|
24
|
-
end
|
25
|
-
|
26
|
-
describe "delayed method" do
|
27
|
-
class MongoStoryReader
|
28
|
-
def read(story)
|
29
|
-
"Epilog: #{story.tell}"
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
class MongoStory
|
34
|
-
include ::MongoMapper::Document
|
35
|
-
key :text, String
|
36
|
-
|
37
|
-
def tell
|
38
|
-
text
|
39
|
-
end
|
40
|
-
end
|
41
|
-
|
42
|
-
it "should ignore not found errors because they are permanent" do
|
43
|
-
story = MongoStory.create :text => 'Once upon a time...'
|
44
|
-
job = story.delay.tell
|
45
|
-
story.destroy
|
46
|
-
lambda { job.invoke_job }.should_not raise_error
|
47
|
-
end
|
48
|
-
|
49
|
-
it "should store the object as string" do
|
50
|
-
story = MongoStory.create :text => 'Once upon a time...'
|
51
|
-
job = story.delay.tell
|
52
|
-
|
53
|
-
job.payload_object.class.should == Delayed::PerformableMethod
|
54
|
-
job.payload_object.object.should == story
|
55
|
-
job.payload_object.method.should == :tell
|
56
|
-
job.payload_object.args.should == []
|
57
|
-
job.payload_object.perform.should == 'Once upon a time...'
|
58
|
-
end
|
59
|
-
|
60
|
-
it "should store arguments as string" do
|
61
|
-
story = MongoStory.create :text => 'Once upon a time...'
|
62
|
-
job = MongoStoryReader.new.delay.read(story)
|
63
|
-
job.payload_object.class.should == Delayed::PerformableMethod
|
64
|
-
job.payload_object.method.should == :read
|
65
|
-
job.payload_object.args.should == [story]
|
66
|
-
job.payload_object.perform.should == 'Epilog: Once upon a time...'
|
67
|
-
end
|
68
|
-
end
|
69
|
-
|
70
|
-
describe "before_fork" do
|
71
|
-
after do
|
72
|
-
MongoMapper.connection.connect_to_master
|
73
|
-
end
|
74
|
-
|
75
|
-
it "should disconnect" do
|
76
|
-
lambda do
|
77
|
-
Delayed::Backend::MongoMapper::Job.before_fork
|
78
|
-
end.should change { !!MongoMapper.connection.connected? }.from(true).to(false)
|
79
|
-
end
|
80
|
-
end
|
81
|
-
|
82
|
-
describe "after_fork" do
|
83
|
-
before do
|
84
|
-
MongoMapper.connection.close
|
85
|
-
end
|
86
|
-
|
87
|
-
it "should call reconnect" do
|
88
|
-
lambda do
|
89
|
-
Delayed::Backend::MongoMapper::Job.after_fork
|
90
|
-
end.should change { !!MongoMapper.connection.connected? }.from(false).to(true)
|
91
|
-
end
|
92
|
-
end
|
93
|
-
|
94
|
-
end
|