delayed_job 1.8.5 → 1.9.0pre
Sign up to get free protection for your applications and to get access to all the features.
- data/README.textile +34 -21
- data/Rakefile +7 -2
- data/VERSION +1 -1
- data/delayed_job.gemspec +39 -10
- data/generators/delayed_job/delayed_job_generator.rb +1 -1
- data/generators/delayed_job/templates/migration.rb +2 -1
- data/lib/delayed/backend/active_record.rb +90 -0
- data/lib/delayed/backend/base.rb +106 -0
- data/lib/delayed/backend/mongo_mapper.rb +110 -0
- data/lib/delayed/command.rb +6 -4
- data/lib/delayed/performable_method.rb +38 -31
- data/lib/delayed/worker.rb +121 -11
- data/lib/delayed_job.rb +2 -2
- data/rails/init.rb +12 -0
- data/spec/backend/active_record_job_spec.rb +41 -0
- data/spec/backend/mongo_mapper_job_spec.rb +69 -0
- data/spec/backend/shared_backend_spec.rb +244 -0
- data/spec/delayed_method_spec.rb +8 -99
- data/spec/performable_method_spec.rb +42 -0
- data/spec/sample_jobs.rb +21 -0
- data/spec/{database.rb → setup/active_record.rb} +2 -12
- data/spec/setup/mongo_mapper.rb +13 -0
- data/spec/spec_helper.rb +35 -0
- data/spec/story_spec.rb +1 -1
- data/spec/worker_spec.rb +178 -0
- metadata +59 -11
- data/init.rb +0 -1
- data/lib/delayed/job.rb +0 -290
- data/spec/job_spec.rb +0 -419
metadata
CHANGED
@@ -1,7 +1,7 @@
|
|
1
1
|
--- !ruby/object:Gem::Specification
|
2
2
|
name: delayed_job
|
3
3
|
version: !ruby/object:Gem::Version
|
4
|
-
version: 1.
|
4
|
+
version: 1.9.0pre
|
5
5
|
platform: ruby
|
6
6
|
authors:
|
7
7
|
- Brandon Keepers
|
@@ -10,11 +10,43 @@ autorequire:
|
|
10
10
|
bindir: bin
|
11
11
|
cert_chain: []
|
12
12
|
|
13
|
-
date: 2010-03-
|
13
|
+
date: 2010-03-26 00:00:00 -04:00
|
14
14
|
default_executable:
|
15
|
-
dependencies:
|
16
|
-
|
17
|
-
|
15
|
+
dependencies:
|
16
|
+
- !ruby/object:Gem::Dependency
|
17
|
+
name: daemons
|
18
|
+
type: :runtime
|
19
|
+
version_requirement:
|
20
|
+
version_requirements: !ruby/object:Gem::Requirement
|
21
|
+
requirements:
|
22
|
+
- - ">="
|
23
|
+
- !ruby/object:Gem::Version
|
24
|
+
version: "0"
|
25
|
+
version:
|
26
|
+
- !ruby/object:Gem::Dependency
|
27
|
+
name: rspec
|
28
|
+
type: :development
|
29
|
+
version_requirement:
|
30
|
+
version_requirements: !ruby/object:Gem::Requirement
|
31
|
+
requirements:
|
32
|
+
- - ">="
|
33
|
+
- !ruby/object:Gem::Version
|
34
|
+
version: "0"
|
35
|
+
version:
|
36
|
+
- !ruby/object:Gem::Dependency
|
37
|
+
name: sqlite3-ruby
|
38
|
+
type: :development
|
39
|
+
version_requirement:
|
40
|
+
version_requirements: !ruby/object:Gem::Requirement
|
41
|
+
requirements:
|
42
|
+
- - ">="
|
43
|
+
- !ruby/object:Gem::Version
|
44
|
+
version: "0"
|
45
|
+
version:
|
46
|
+
description: |-
|
47
|
+
Delayed_job (or DJ) encapsulates the common pattern of asynchronously executing longer tasks in the background. It is a direct extraction from Shopify where the job table is responsible for a multitude of core tasks.
|
48
|
+
|
49
|
+
This gem is collectiveidea's fork (http://github.com/collectiveidea/delayed_job).
|
18
50
|
email: tobi@leetsoft.com
|
19
51
|
executables: []
|
20
52
|
|
@@ -33,20 +65,29 @@ files:
|
|
33
65
|
- generators/delayed_job/delayed_job_generator.rb
|
34
66
|
- generators/delayed_job/templates/migration.rb
|
35
67
|
- generators/delayed_job/templates/script
|
36
|
-
-
|
68
|
+
- lib/delayed/backend/active_record.rb
|
69
|
+
- lib/delayed/backend/base.rb
|
70
|
+
- lib/delayed/backend/mongo_mapper.rb
|
37
71
|
- lib/delayed/command.rb
|
38
|
-
- lib/delayed/job.rb
|
39
72
|
- lib/delayed/message_sending.rb
|
40
73
|
- lib/delayed/performable_method.rb
|
41
74
|
- lib/delayed/recipes.rb
|
42
75
|
- lib/delayed/tasks.rb
|
43
76
|
- lib/delayed/worker.rb
|
44
77
|
- lib/delayed_job.rb
|
78
|
+
- rails/init.rb
|
45
79
|
- recipes/delayed_job.rb
|
46
|
-
- spec/
|
80
|
+
- spec/backend/active_record_job_spec.rb
|
81
|
+
- spec/backend/mongo_mapper_job_spec.rb
|
82
|
+
- spec/backend/shared_backend_spec.rb
|
47
83
|
- spec/delayed_method_spec.rb
|
48
|
-
- spec/
|
84
|
+
- spec/performable_method_spec.rb
|
85
|
+
- spec/sample_jobs.rb
|
86
|
+
- spec/setup/active_record.rb
|
87
|
+
- spec/setup/mongo_mapper.rb
|
88
|
+
- spec/spec_helper.rb
|
49
89
|
- spec/story_spec.rb
|
90
|
+
- spec/worker_spec.rb
|
50
91
|
- tasks/jobs.rake
|
51
92
|
has_rdoc: true
|
52
93
|
homepage: http://github.com/collectiveidea/delayed_job
|
@@ -80,7 +121,14 @@ signing_key:
|
|
80
121
|
specification_version: 3
|
81
122
|
summary: Database-backed asynchronous priority queue system -- Extracted from Shopify
|
82
123
|
test_files:
|
83
|
-
- spec/
|
124
|
+
- spec/backend/active_record_job_spec.rb
|
125
|
+
- spec/backend/mongo_mapper_job_spec.rb
|
126
|
+
- spec/backend/shared_backend_spec.rb
|
84
127
|
- spec/delayed_method_spec.rb
|
85
|
-
- spec/
|
128
|
+
- spec/performable_method_spec.rb
|
129
|
+
- spec/sample_jobs.rb
|
130
|
+
- spec/setup/active_record.rb
|
131
|
+
- spec/setup/mongo_mapper.rb
|
132
|
+
- spec/spec_helper.rb
|
86
133
|
- spec/story_spec.rb
|
134
|
+
- spec/worker_spec.rb
|
data/init.rb
DELETED
@@ -1 +0,0 @@
|
|
1
|
-
require File.dirname(__FILE__) + '/lib/delayed_job'
|
data/lib/delayed/job.rb
DELETED
@@ -1,290 +0,0 @@
|
|
1
|
-
require 'timeout'
|
2
|
-
|
3
|
-
module Delayed
|
4
|
-
|
5
|
-
class DeserializationError < StandardError
|
6
|
-
end
|
7
|
-
|
8
|
-
# A job object that is persisted to the database.
|
9
|
-
# Contains the work object as a YAML field.
|
10
|
-
class Job < ActiveRecord::Base
|
11
|
-
@@max_attempts = 25
|
12
|
-
@@max_run_time = 4.hours
|
13
|
-
|
14
|
-
cattr_accessor :max_attempts, :max_run_time
|
15
|
-
|
16
|
-
set_table_name :delayed_jobs
|
17
|
-
|
18
|
-
# By default failed jobs are destroyed after too many attempts.
|
19
|
-
# If you want to keep them around (perhaps to inspect the reason
|
20
|
-
# for the failure), set this to false.
|
21
|
-
cattr_accessor :destroy_failed_jobs
|
22
|
-
self.destroy_failed_jobs = true
|
23
|
-
|
24
|
-
# Every worker has a unique name which by default is the pid of the process.
|
25
|
-
# There are some advantages to overriding this with something which survives worker retarts:
|
26
|
-
# Workers can safely resume working on tasks which are locked by themselves. The worker will assume that it crashed before.
|
27
|
-
@@worker_name = nil
|
28
|
-
|
29
|
-
def self.worker_name
|
30
|
-
return @@worker_name unless @@worker_name.nil?
|
31
|
-
"host:#{Socket.gethostname} pid:#{Process.pid}" rescue "pid:#{Process.pid}"
|
32
|
-
end
|
33
|
-
|
34
|
-
def self.worker_name=(val)
|
35
|
-
@@worker_name = val
|
36
|
-
end
|
37
|
-
|
38
|
-
def worker_name
|
39
|
-
self.class.worker_name
|
40
|
-
end
|
41
|
-
|
42
|
-
def worker_name=(val)
|
43
|
-
@@worker_name = val
|
44
|
-
end
|
45
|
-
|
46
|
-
NextTaskSQL = '(run_at <= ? AND (locked_at IS NULL OR locked_at < ?) OR (locked_by = ?)) AND failed_at IS NULL'
|
47
|
-
NextTaskOrder = 'priority DESC, run_at ASC'
|
48
|
-
|
49
|
-
ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
|
50
|
-
|
51
|
-
cattr_accessor :min_priority, :max_priority
|
52
|
-
self.min_priority = nil
|
53
|
-
self.max_priority = nil
|
54
|
-
|
55
|
-
# When a worker is exiting, make sure we don't have any locked jobs.
|
56
|
-
def self.clear_locks!
|
57
|
-
update_all("locked_by = null, locked_at = null", ["locked_by = ?", worker_name])
|
58
|
-
end
|
59
|
-
|
60
|
-
def failed?
|
61
|
-
failed_at
|
62
|
-
end
|
63
|
-
alias_method :failed, :failed?
|
64
|
-
|
65
|
-
def payload_object
|
66
|
-
@payload_object ||= deserialize(self['handler'])
|
67
|
-
end
|
68
|
-
|
69
|
-
def name
|
70
|
-
@name ||= begin
|
71
|
-
payload = payload_object
|
72
|
-
if payload.respond_to?(:display_name)
|
73
|
-
payload.display_name
|
74
|
-
else
|
75
|
-
payload.class.name
|
76
|
-
end
|
77
|
-
end
|
78
|
-
end
|
79
|
-
|
80
|
-
def payload_object=(object)
|
81
|
-
self['handler'] = object.to_yaml
|
82
|
-
end
|
83
|
-
|
84
|
-
# Reschedule the job in the future (when a job fails).
|
85
|
-
# Uses an exponential scale depending on the number of failed attempts.
|
86
|
-
def reschedule(message, backtrace = [], time = nil)
|
87
|
-
self.last_error = message + "\n" + backtrace.join("\n")
|
88
|
-
|
89
|
-
if (self.attempts += 1) < max_attempts
|
90
|
-
time ||= Job.db_time_now + (attempts ** 4) + 5
|
91
|
-
|
92
|
-
self.run_at = time
|
93
|
-
self.unlock
|
94
|
-
save!
|
95
|
-
else
|
96
|
-
logger.info "* [JOB] PERMANENTLY removing #{self.name} because of #{attempts} consecutive failures."
|
97
|
-
destroy_failed_jobs ? destroy : update_attribute(:failed_at, Delayed::Job.db_time_now)
|
98
|
-
end
|
99
|
-
end
|
100
|
-
|
101
|
-
|
102
|
-
# Try to run one job. Returns true/false (work done/work failed) or nil if job can't be locked.
|
103
|
-
def run_with_lock(max_run_time, worker_name)
|
104
|
-
logger.info "* [JOB] acquiring lock on #{name}"
|
105
|
-
unless lock_exclusively!(max_run_time, worker_name)
|
106
|
-
# We did not get the lock, some other worker process must have
|
107
|
-
logger.warn "* [JOB] failed to acquire exclusive lock for #{name}"
|
108
|
-
return nil # no work done
|
109
|
-
end
|
110
|
-
|
111
|
-
begin
|
112
|
-
runtime = Benchmark.realtime do
|
113
|
-
Timeout.timeout(max_run_time.to_i) { invoke_job }
|
114
|
-
destroy
|
115
|
-
end
|
116
|
-
# TODO: warn if runtime > max_run_time ?
|
117
|
-
logger.info "* [JOB] #{name} completed after %.4f" % runtime
|
118
|
-
return true # did work
|
119
|
-
rescue Exception => e
|
120
|
-
reschedule e.message, e.backtrace
|
121
|
-
log_exception(e)
|
122
|
-
return false # work failed
|
123
|
-
end
|
124
|
-
end
|
125
|
-
|
126
|
-
# Add a job to the queue
|
127
|
-
def self.enqueue(*args, &block)
|
128
|
-
object = block_given? ? EvaledJob.new(&block) : args.shift
|
129
|
-
|
130
|
-
unless object.respond_to?(:perform) || block_given?
|
131
|
-
raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
|
132
|
-
end
|
133
|
-
|
134
|
-
priority = args.first || 0
|
135
|
-
run_at = args[1]
|
136
|
-
|
137
|
-
Job.create(:payload_object => object, :priority => priority.to_i, :run_at => run_at)
|
138
|
-
end
|
139
|
-
|
140
|
-
# Find a few candidate jobs to run (in case some immediately get locked by others).
|
141
|
-
def self.find_available(limit = 5, max_run_time = max_run_time)
|
142
|
-
|
143
|
-
time_now = db_time_now
|
144
|
-
|
145
|
-
sql = NextTaskSQL.dup
|
146
|
-
|
147
|
-
conditions = [time_now, time_now - max_run_time, worker_name]
|
148
|
-
|
149
|
-
if self.min_priority
|
150
|
-
sql << ' AND (priority >= ?)'
|
151
|
-
conditions << min_priority
|
152
|
-
end
|
153
|
-
|
154
|
-
if self.max_priority
|
155
|
-
sql << ' AND (priority <= ?)'
|
156
|
-
conditions << max_priority
|
157
|
-
end
|
158
|
-
|
159
|
-
conditions.unshift(sql)
|
160
|
-
|
161
|
-
ActiveRecord::Base.silence do
|
162
|
-
find(:all, :conditions => conditions, :order => NextTaskOrder, :limit => limit)
|
163
|
-
end
|
164
|
-
end
|
165
|
-
|
166
|
-
# Run the next job we can get an exclusive lock on.
|
167
|
-
# If no jobs are left we return nil
|
168
|
-
def self.reserve_and_run_one_job(max_run_time = max_run_time)
|
169
|
-
|
170
|
-
# We get up to 5 jobs from the db. In case we cannot get exclusive access to a job we try the next.
|
171
|
-
# this leads to a more even distribution of jobs across the worker processes
|
172
|
-
find_available(5, max_run_time).each do |job|
|
173
|
-
t = job.run_with_lock(max_run_time, worker_name)
|
174
|
-
return t unless t == nil # return if we did work (good or bad)
|
175
|
-
end
|
176
|
-
|
177
|
-
nil # we didn't do any work, all 5 were not lockable
|
178
|
-
end
|
179
|
-
|
180
|
-
# Lock this job for this worker.
|
181
|
-
# Returns true if we have the lock, false otherwise.
|
182
|
-
def lock_exclusively!(max_run_time, worker = worker_name)
|
183
|
-
now = self.class.db_time_now
|
184
|
-
affected_rows = if locked_by != worker
|
185
|
-
# We don't own this job so we will update the locked_by name and the locked_at
|
186
|
-
self.class.update_all(["locked_at = ?, locked_by = ?", now, worker], ["id = ? and (locked_at is null or locked_at < ?) and (run_at <= ?)", id, (now - max_run_time.to_i), now])
|
187
|
-
else
|
188
|
-
# We already own this job, this may happen if the job queue crashes.
|
189
|
-
# Simply resume and update the locked_at
|
190
|
-
self.class.update_all(["locked_at = ?", now], ["id = ? and locked_by = ?", id, worker])
|
191
|
-
end
|
192
|
-
if affected_rows == 1
|
193
|
-
self.locked_at = now
|
194
|
-
self.locked_by = worker
|
195
|
-
return true
|
196
|
-
else
|
197
|
-
return false
|
198
|
-
end
|
199
|
-
end
|
200
|
-
|
201
|
-
# Unlock this job (note: not saved to DB)
|
202
|
-
def unlock
|
203
|
-
self.locked_at = nil
|
204
|
-
self.locked_by = nil
|
205
|
-
end
|
206
|
-
|
207
|
-
# This is a good hook if you need to report job processing errors in additional or different ways
|
208
|
-
def log_exception(error)
|
209
|
-
logger.error "* [JOB] #{name} failed with #{error.class.name}: #{error.message} - #{attempts} failed attempts"
|
210
|
-
logger.error(error)
|
211
|
-
end
|
212
|
-
|
213
|
-
# Do num jobs and return stats on success/failure.
|
214
|
-
# Exit early if interrupted.
|
215
|
-
def self.work_off(num = 100)
|
216
|
-
success, failure = 0, 0
|
217
|
-
|
218
|
-
num.times do
|
219
|
-
case self.reserve_and_run_one_job
|
220
|
-
when true
|
221
|
-
success += 1
|
222
|
-
when false
|
223
|
-
failure += 1
|
224
|
-
else
|
225
|
-
break # leave if no work could be done
|
226
|
-
end
|
227
|
-
break if $exit # leave if we're exiting
|
228
|
-
end
|
229
|
-
|
230
|
-
return [success, failure]
|
231
|
-
end
|
232
|
-
|
233
|
-
# Moved into its own method so that new_relic can trace it.
|
234
|
-
def invoke_job
|
235
|
-
payload_object.perform
|
236
|
-
end
|
237
|
-
|
238
|
-
private
|
239
|
-
|
240
|
-
def deserialize(source)
|
241
|
-
handler = YAML.load(source) rescue nil
|
242
|
-
|
243
|
-
unless handler.respond_to?(:perform)
|
244
|
-
if handler.nil? && source =~ ParseObjectFromYaml
|
245
|
-
handler_class = $1
|
246
|
-
end
|
247
|
-
attempt_to_load(handler_class || handler.class)
|
248
|
-
handler = YAML.load(source)
|
249
|
-
end
|
250
|
-
|
251
|
-
return handler if handler.respond_to?(:perform)
|
252
|
-
|
253
|
-
raise DeserializationError,
|
254
|
-
'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
|
255
|
-
rescue TypeError, LoadError, NameError => e
|
256
|
-
raise DeserializationError,
|
257
|
-
"Job failed to load: #{e.message}. Try to manually require the required file."
|
258
|
-
end
|
259
|
-
|
260
|
-
# Constantize the object so that ActiveSupport can attempt
|
261
|
-
# its auto loading magic. Will raise LoadError if not successful.
|
262
|
-
def attempt_to_load(klass)
|
263
|
-
klass.constantize
|
264
|
-
end
|
265
|
-
|
266
|
-
# Get the current time (GMT or local depending on DB)
|
267
|
-
# Note: This does not ping the DB to get the time, so all your clients
|
268
|
-
# must have syncronized clocks.
|
269
|
-
def self.db_time_now
|
270
|
-
(ActiveRecord::Base.default_timezone == :utc) ? Time.now.utc : Time.zone.now
|
271
|
-
end
|
272
|
-
|
273
|
-
protected
|
274
|
-
|
275
|
-
def before_save
|
276
|
-
self.run_at ||= self.class.db_time_now
|
277
|
-
end
|
278
|
-
|
279
|
-
end
|
280
|
-
|
281
|
-
class EvaledJob
|
282
|
-
def initialize
|
283
|
-
@job = yield
|
284
|
-
end
|
285
|
-
|
286
|
-
def perform
|
287
|
-
eval(@job)
|
288
|
-
end
|
289
|
-
end
|
290
|
-
end
|
data/spec/job_spec.rb
DELETED
@@ -1,419 +0,0 @@
|
|
1
|
-
require File.dirname(__FILE__) + '/database'
|
2
|
-
|
3
|
-
class SimpleJob
|
4
|
-
cattr_accessor :runs; self.runs = 0
|
5
|
-
def perform; @@runs += 1; end
|
6
|
-
end
|
7
|
-
|
8
|
-
class ErrorJob
|
9
|
-
cattr_accessor :runs; self.runs = 0
|
10
|
-
def perform; raise 'did not work'; end
|
11
|
-
end
|
12
|
-
|
13
|
-
class LongRunningJob
|
14
|
-
def perform; sleep 250; end
|
15
|
-
end
|
16
|
-
|
17
|
-
module M
|
18
|
-
class ModuleJob
|
19
|
-
cattr_accessor :runs; self.runs = 0
|
20
|
-
def perform; @@runs += 1; end
|
21
|
-
end
|
22
|
-
|
23
|
-
end
|
24
|
-
|
25
|
-
describe Delayed::Job do
|
26
|
-
before do
|
27
|
-
Delayed::Job.max_priority = nil
|
28
|
-
Delayed::Job.min_priority = nil
|
29
|
-
|
30
|
-
Delayed::Job.delete_all
|
31
|
-
end
|
32
|
-
|
33
|
-
before(:each) do
|
34
|
-
SimpleJob.runs = 0
|
35
|
-
end
|
36
|
-
|
37
|
-
it "should set run_at automatically if not set" do
|
38
|
-
Delayed::Job.create(:payload_object => ErrorJob.new ).run_at.should_not == nil
|
39
|
-
end
|
40
|
-
|
41
|
-
it "should not set run_at automatically if already set" do
|
42
|
-
later = 5.minutes.from_now
|
43
|
-
Delayed::Job.create(:payload_object => ErrorJob.new, :run_at => later).run_at.should == later
|
44
|
-
end
|
45
|
-
|
46
|
-
it "should raise ArgumentError when handler doesn't respond_to :perform" do
|
47
|
-
lambda { Delayed::Job.enqueue(Object.new) }.should raise_error(ArgumentError)
|
48
|
-
end
|
49
|
-
|
50
|
-
it "should increase count after enqueuing items" do
|
51
|
-
Delayed::Job.enqueue SimpleJob.new
|
52
|
-
Delayed::Job.count.should == 1
|
53
|
-
end
|
54
|
-
|
55
|
-
it "should be able to set priority when enqueuing items" do
|
56
|
-
Delayed::Job.enqueue SimpleJob.new, 5
|
57
|
-
Delayed::Job.first.priority.should == 5
|
58
|
-
end
|
59
|
-
|
60
|
-
it "should be able to set run_at when enqueuing items" do
|
61
|
-
later = (Delayed::Job.db_time_now+5.minutes)
|
62
|
-
Delayed::Job.enqueue SimpleJob.new, 5, later
|
63
|
-
|
64
|
-
# use be close rather than equal to because millisecond values cn be lost in DB round trip
|
65
|
-
Delayed::Job.first.run_at.should be_close(later, 1)
|
66
|
-
end
|
67
|
-
|
68
|
-
it "should call perform on jobs when running work_off" do
|
69
|
-
SimpleJob.runs.should == 0
|
70
|
-
|
71
|
-
Delayed::Job.enqueue SimpleJob.new
|
72
|
-
Delayed::Job.work_off
|
73
|
-
|
74
|
-
SimpleJob.runs.should == 1
|
75
|
-
end
|
76
|
-
|
77
|
-
|
78
|
-
it "should work with eval jobs" do
|
79
|
-
$eval_job_ran = false
|
80
|
-
|
81
|
-
Delayed::Job.enqueue do <<-JOB
|
82
|
-
$eval_job_ran = true
|
83
|
-
JOB
|
84
|
-
end
|
85
|
-
|
86
|
-
Delayed::Job.work_off
|
87
|
-
|
88
|
-
$eval_job_ran.should == true
|
89
|
-
end
|
90
|
-
|
91
|
-
it "should work with jobs in modules" do
|
92
|
-
M::ModuleJob.runs.should == 0
|
93
|
-
|
94
|
-
Delayed::Job.enqueue M::ModuleJob.new
|
95
|
-
Delayed::Job.work_off
|
96
|
-
|
97
|
-
M::ModuleJob.runs.should == 1
|
98
|
-
end
|
99
|
-
|
100
|
-
it "should re-schedule by about 1 second at first and increment this more and more minutes when it fails to execute properly" do
|
101
|
-
Delayed::Job.enqueue ErrorJob.new
|
102
|
-
Delayed::Job.work_off(1)
|
103
|
-
|
104
|
-
job = Delayed::Job.find(:first)
|
105
|
-
|
106
|
-
job.last_error.should =~ /did not work/
|
107
|
-
job.last_error.should =~ /job_spec.rb:10:in `perform'/
|
108
|
-
job.attempts.should == 1
|
109
|
-
|
110
|
-
job.run_at.should > Delayed::Job.db_time_now - 10.minutes
|
111
|
-
job.run_at.should < Delayed::Job.db_time_now + 10.minutes
|
112
|
-
end
|
113
|
-
|
114
|
-
it "should record last_error when destroy_failed_jobs = false, max_attempts = 1" do
|
115
|
-
Delayed::Job.destroy_failed_jobs = false
|
116
|
-
Delayed::Job::max_attempts = 1
|
117
|
-
job = Delayed::Job.enqueue ErrorJob.new
|
118
|
-
Delayed::Job.work_off
|
119
|
-
job.reload
|
120
|
-
job.last_error.should =~ /did not work/
|
121
|
-
job.last_error.should =~ /job_spec.rb/
|
122
|
-
job.attempts.should == 1
|
123
|
-
|
124
|
-
job.failed_at.should_not == nil
|
125
|
-
end
|
126
|
-
|
127
|
-
it "should raise an DeserializationError when the job class is totally unknown" do
|
128
|
-
|
129
|
-
job = Delayed::Job.new
|
130
|
-
job['handler'] = "--- !ruby/object:JobThatDoesNotExist {}"
|
131
|
-
|
132
|
-
lambda { job.payload_object.perform }.should raise_error(Delayed::DeserializationError)
|
133
|
-
end
|
134
|
-
|
135
|
-
it "should try to load the class when it is unknown at the time of the deserialization" do
|
136
|
-
job = Delayed::Job.new
|
137
|
-
job['handler'] = "--- !ruby/object:JobThatDoesNotExist {}"
|
138
|
-
|
139
|
-
job.should_receive(:attempt_to_load).with('JobThatDoesNotExist').and_return(true)
|
140
|
-
|
141
|
-
lambda { job.payload_object.perform }.should raise_error(Delayed::DeserializationError)
|
142
|
-
end
|
143
|
-
|
144
|
-
it "should try include the namespace when loading unknown objects" do
|
145
|
-
job = Delayed::Job.new
|
146
|
-
job['handler'] = "--- !ruby/object:Delayed::JobThatDoesNotExist {}"
|
147
|
-
job.should_receive(:attempt_to_load).with('Delayed::JobThatDoesNotExist').and_return(true)
|
148
|
-
lambda { job.payload_object.perform }.should raise_error(Delayed::DeserializationError)
|
149
|
-
end
|
150
|
-
|
151
|
-
it "should also try to load structs when they are unknown (raises TypeError)" do
|
152
|
-
job = Delayed::Job.new
|
153
|
-
job['handler'] = "--- !ruby/struct:JobThatDoesNotExist {}"
|
154
|
-
|
155
|
-
job.should_receive(:attempt_to_load).with('JobThatDoesNotExist').and_return(true)
|
156
|
-
|
157
|
-
lambda { job.payload_object.perform }.should raise_error(Delayed::DeserializationError)
|
158
|
-
end
|
159
|
-
|
160
|
-
it "should try include the namespace when loading unknown structs" do
|
161
|
-
job = Delayed::Job.new
|
162
|
-
job['handler'] = "--- !ruby/struct:Delayed::JobThatDoesNotExist {}"
|
163
|
-
|
164
|
-
job.should_receive(:attempt_to_load).with('Delayed::JobThatDoesNotExist').and_return(true)
|
165
|
-
lambda { job.payload_object.perform }.should raise_error(Delayed::DeserializationError)
|
166
|
-
end
|
167
|
-
|
168
|
-
context "reschedule" do
|
169
|
-
before do
|
170
|
-
@job = Delayed::Job.create :payload_object => SimpleJob.new
|
171
|
-
end
|
172
|
-
|
173
|
-
context "and we want to destroy jobs" do
|
174
|
-
before do
|
175
|
-
Delayed::Job.destroy_failed_jobs = true
|
176
|
-
end
|
177
|
-
|
178
|
-
it "should be destroyed if it failed more than Job::max_attempts times" do
|
179
|
-
@job.should_receive(:destroy)
|
180
|
-
Delayed::Job::max_attempts.times { @job.reschedule 'FAIL' }
|
181
|
-
end
|
182
|
-
|
183
|
-
it "should not be destroyed if failed fewer than Job::max_attempts times" do
|
184
|
-
@job.should_not_receive(:destroy)
|
185
|
-
(Delayed::Job::max_attempts - 1).times { @job.reschedule 'FAIL' }
|
186
|
-
end
|
187
|
-
end
|
188
|
-
|
189
|
-
context "and we don't want to destroy jobs" do
|
190
|
-
before do
|
191
|
-
Delayed::Job.destroy_failed_jobs = false
|
192
|
-
end
|
193
|
-
|
194
|
-
it "should be failed if it failed more than Job::max_attempts times" do
|
195
|
-
@job.reload.failed_at.should == nil
|
196
|
-
Delayed::Job::max_attempts.times { @job.reschedule 'FAIL' }
|
197
|
-
@job.reload.failed_at.should_not == nil
|
198
|
-
end
|
199
|
-
|
200
|
-
it "should not be failed if it failed fewer than Job::max_attempts times" do
|
201
|
-
(Delayed::Job::max_attempts - 1).times { @job.reschedule 'FAIL' }
|
202
|
-
@job.reload.failed_at.should == nil
|
203
|
-
end
|
204
|
-
|
205
|
-
end
|
206
|
-
end
|
207
|
-
|
208
|
-
it "should fail after Job::max_run_time" do
|
209
|
-
@job = Delayed::Job.create :payload_object => LongRunningJob.new
|
210
|
-
Delayed::Job.reserve_and_run_one_job(1.second)
|
211
|
-
@job.reload.last_error.should =~ /expired/
|
212
|
-
@job.attempts.should == 1
|
213
|
-
end
|
214
|
-
|
215
|
-
it "should never find failed jobs" do
|
216
|
-
@job = Delayed::Job.create :payload_object => SimpleJob.new, :attempts => 50, :failed_at => Delayed::Job.db_time_now
|
217
|
-
Delayed::Job.find_available(1).length.should == 0
|
218
|
-
end
|
219
|
-
|
220
|
-
context "when another worker is already performing an task, it" do
|
221
|
-
|
222
|
-
before :each do
|
223
|
-
Delayed::Job.worker_name = 'worker1'
|
224
|
-
@job = Delayed::Job.create :payload_object => SimpleJob.new, :locked_by => 'worker1', :locked_at => Delayed::Job.db_time_now - 5.minutes
|
225
|
-
end
|
226
|
-
|
227
|
-
it "should not allow a second worker to get exclusive access" do
|
228
|
-
@job.lock_exclusively!(4.hours, 'worker2').should == false
|
229
|
-
end
|
230
|
-
|
231
|
-
it "should allow a second worker to get exclusive access if the timeout has passed" do
|
232
|
-
@job.lock_exclusively!(1.minute, 'worker2').should == true
|
233
|
-
end
|
234
|
-
|
235
|
-
it "should be able to get access to the task if it was started more then max_age ago" do
|
236
|
-
@job.locked_at = 5.hours.ago
|
237
|
-
@job.save
|
238
|
-
|
239
|
-
@job.lock_exclusively! 4.hours, 'worker2'
|
240
|
-
@job.reload
|
241
|
-
@job.locked_by.should == 'worker2'
|
242
|
-
@job.locked_at.should > 1.minute.ago
|
243
|
-
end
|
244
|
-
|
245
|
-
it "should not be found by another worker" do
|
246
|
-
Delayed::Job.worker_name = 'worker2'
|
247
|
-
|
248
|
-
Delayed::Job.find_available(1, 6.minutes).length.should == 0
|
249
|
-
end
|
250
|
-
|
251
|
-
it "should be found by another worker if the time has expired" do
|
252
|
-
Delayed::Job.worker_name = 'worker2'
|
253
|
-
|
254
|
-
Delayed::Job.find_available(1, 4.minutes).length.should == 1
|
255
|
-
end
|
256
|
-
|
257
|
-
it "should be able to get exclusive access again when the worker name is the same" do
|
258
|
-
@job.lock_exclusively! 5.minutes, 'worker1'
|
259
|
-
@job.lock_exclusively! 5.minutes, 'worker1'
|
260
|
-
@job.lock_exclusively! 5.minutes, 'worker1'
|
261
|
-
end
|
262
|
-
end
|
263
|
-
|
264
|
-
context "when another worker has worked on a task since the job was found to be available, it" do
|
265
|
-
|
266
|
-
before :each do
|
267
|
-
Delayed::Job.worker_name = 'worker1'
|
268
|
-
@job = Delayed::Job.create :payload_object => SimpleJob.new
|
269
|
-
@job_copy_for_worker_2 = Delayed::Job.find(@job.id)
|
270
|
-
end
|
271
|
-
|
272
|
-
it "should not allow a second worker to get exclusive access if already successfully processed by worker1" do
|
273
|
-
@job.delete
|
274
|
-
@job_copy_for_worker_2.lock_exclusively!(4.hours, 'worker2').should == false
|
275
|
-
end
|
276
|
-
|
277
|
-
it "should not allow a second worker to get exclusive access if failed to be processed by worker1 and run_at time is now in future (due to backing off behaviour)" do
|
278
|
-
@job.update_attributes(:attempts => 1, :run_at => 1.day.from_now)
|
279
|
-
@job_copy_for_worker_2.lock_exclusively!(4.hours, 'worker2').should == false
|
280
|
-
end
|
281
|
-
end
|
282
|
-
|
283
|
-
context "#name" do
|
284
|
-
it "should be the class name of the job that was enqueued" do
|
285
|
-
Delayed::Job.create(:payload_object => ErrorJob.new ).name.should == 'ErrorJob'
|
286
|
-
end
|
287
|
-
|
288
|
-
it "should be the method that will be called if its a performable method object" do
|
289
|
-
Delayed::Job.send_later(:clear_locks!)
|
290
|
-
Delayed::Job.last.name.should == 'Delayed::Job.clear_locks!'
|
291
|
-
|
292
|
-
end
|
293
|
-
it "should be the instance method that will be called if its a performable method object" do
|
294
|
-
story = Story.create :text => "..."
|
295
|
-
|
296
|
-
story.send_later(:save)
|
297
|
-
|
298
|
-
Delayed::Job.last.name.should == 'Story#save'
|
299
|
-
end
|
300
|
-
end
|
301
|
-
|
302
|
-
context "worker prioritization" do
|
303
|
-
|
304
|
-
before(:each) do
|
305
|
-
Delayed::Job.max_priority = nil
|
306
|
-
Delayed::Job.min_priority = nil
|
307
|
-
end
|
308
|
-
|
309
|
-
it "should only work_off jobs that are >= min_priority" do
|
310
|
-
Delayed::Job.min_priority = -5
|
311
|
-
Delayed::Job.max_priority = 5
|
312
|
-
SimpleJob.runs.should == 0
|
313
|
-
|
314
|
-
Delayed::Job.enqueue SimpleJob.new, -10
|
315
|
-
Delayed::Job.enqueue SimpleJob.new, 0
|
316
|
-
Delayed::Job.work_off
|
317
|
-
|
318
|
-
SimpleJob.runs.should == 1
|
319
|
-
end
|
320
|
-
|
321
|
-
it "should only work_off jobs that are <= max_priority" do
|
322
|
-
Delayed::Job.min_priority = -5
|
323
|
-
Delayed::Job.max_priority = 5
|
324
|
-
SimpleJob.runs.should == 0
|
325
|
-
|
326
|
-
Delayed::Job.enqueue SimpleJob.new, 10
|
327
|
-
Delayed::Job.enqueue SimpleJob.new, 0
|
328
|
-
|
329
|
-
Delayed::Job.work_off
|
330
|
-
|
331
|
-
SimpleJob.runs.should == 1
|
332
|
-
end
|
333
|
-
|
334
|
-
it "should fetch jobs ordered by priority" do
|
335
|
-
number_of_jobs = 10
|
336
|
-
number_of_jobs.times { Delayed::Job.enqueue SimpleJob.new, rand(10) }
|
337
|
-
jobs = Delayed::Job.find_available(10)
|
338
|
-
ordered = true
|
339
|
-
jobs[1..-1].each_index{ |i|
|
340
|
-
if (jobs[i].priority < jobs[i+1].priority)
|
341
|
-
ordered = false
|
342
|
-
break
|
343
|
-
end
|
344
|
-
}
|
345
|
-
ordered.should == true
|
346
|
-
end
|
347
|
-
|
348
|
-
end
|
349
|
-
|
350
|
-
context "when pulling jobs off the queue for processing, it" do
|
351
|
-
before(:each) do
|
352
|
-
@job = Delayed::Job.create(
|
353
|
-
:payload_object => SimpleJob.new,
|
354
|
-
:locked_by => 'worker1',
|
355
|
-
:locked_at => Delayed::Job.db_time_now - 5.minutes)
|
356
|
-
end
|
357
|
-
|
358
|
-
it "should leave the queue in a consistent state and not run the job if locking fails" do
|
359
|
-
SimpleJob.runs.should == 0
|
360
|
-
@job.stub!(:lock_exclusively!).with(any_args).once.and_return(false)
|
361
|
-
Delayed::Job.should_receive(:find_available).once.and_return([@job])
|
362
|
-
Delayed::Job.work_off(1)
|
363
|
-
SimpleJob.runs.should == 0
|
364
|
-
end
|
365
|
-
|
366
|
-
end
|
367
|
-
|
368
|
-
context "while running alongside other workers that locked jobs, it" do
|
369
|
-
before(:each) do
|
370
|
-
Delayed::Job.worker_name = 'worker1'
|
371
|
-
Delayed::Job.create(:payload_object => SimpleJob.new, :locked_by => 'worker1', :locked_at => (Delayed::Job.db_time_now - 1.minutes))
|
372
|
-
Delayed::Job.create(:payload_object => SimpleJob.new, :locked_by => 'worker2', :locked_at => (Delayed::Job.db_time_now - 1.minutes))
|
373
|
-
Delayed::Job.create(:payload_object => SimpleJob.new)
|
374
|
-
Delayed::Job.create(:payload_object => SimpleJob.new, :locked_by => 'worker1', :locked_at => (Delayed::Job.db_time_now - 1.minutes))
|
375
|
-
end
|
376
|
-
|
377
|
-
it "should ingore locked jobs from other workers" do
|
378
|
-
Delayed::Job.worker_name = 'worker3'
|
379
|
-
SimpleJob.runs.should == 0
|
380
|
-
Delayed::Job.work_off
|
381
|
-
SimpleJob.runs.should == 1 # runs the one open job
|
382
|
-
end
|
383
|
-
|
384
|
-
it "should find our own jobs regardless of locks" do
|
385
|
-
Delayed::Job.worker_name = 'worker1'
|
386
|
-
SimpleJob.runs.should == 0
|
387
|
-
Delayed::Job.work_off
|
388
|
-
SimpleJob.runs.should == 3 # runs open job plus worker1 jobs that were already locked
|
389
|
-
end
|
390
|
-
end
|
391
|
-
|
392
|
-
context "while running with locked and expired jobs, it" do
|
393
|
-
before(:each) do
|
394
|
-
Delayed::Job.worker_name = 'worker1'
|
395
|
-
exp_time = Delayed::Job.db_time_now - (1.minutes + Delayed::Job::max_run_time)
|
396
|
-
Delayed::Job.create(:payload_object => SimpleJob.new, :locked_by => 'worker1', :locked_at => exp_time)
|
397
|
-
Delayed::Job.create(:payload_object => SimpleJob.new, :locked_by => 'worker2', :locked_at => (Delayed::Job.db_time_now - 1.minutes))
|
398
|
-
Delayed::Job.create(:payload_object => SimpleJob.new)
|
399
|
-
Delayed::Job.create(:payload_object => SimpleJob.new, :locked_by => 'worker1', :locked_at => (Delayed::Job.db_time_now - 1.minutes))
|
400
|
-
end
|
401
|
-
|
402
|
-
it "should only find unlocked and expired jobs" do
|
403
|
-
Delayed::Job.worker_name = 'worker3'
|
404
|
-
SimpleJob.runs.should == 0
|
405
|
-
Delayed::Job.work_off
|
406
|
-
SimpleJob.runs.should == 2 # runs the one open job and one expired job
|
407
|
-
end
|
408
|
-
|
409
|
-
it "should ignore locks when finding our own jobs" do
|
410
|
-
Delayed::Job.worker_name = 'worker1'
|
411
|
-
SimpleJob.runs.should == 0
|
412
|
-
Delayed::Job.work_off
|
413
|
-
SimpleJob.runs.should == 3 # runs open job plus worker1 jobs
|
414
|
-
# This is useful in the case of a crash/restart on worker1, but make sure multiple workers on the same host have unique names!
|
415
|
-
end
|
416
|
-
|
417
|
-
end
|
418
|
-
|
419
|
-
end
|