canvas-jobs 0.9.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (61) hide show
  1. checksums.yaml +7 -0
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +40 -0
  3. data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +14 -0
  4. data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +13 -0
  5. data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +14 -0
  6. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +26 -0
  7. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +40 -0
  8. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +52 -0
  9. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +31 -0
  10. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +15 -0
  11. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +80 -0
  12. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +15 -0
  13. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +15 -0
  14. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +13 -0
  15. data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +13 -0
  16. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +13 -0
  17. data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +13 -0
  18. data/db/migrate/20140512213941_add_source_to_jobs.rb +15 -0
  19. data/lib/canvas-jobs.rb +1 -0
  20. data/lib/delayed/backend/active_record.rb +297 -0
  21. data/lib/delayed/backend/base.rb +317 -0
  22. data/lib/delayed/backend/redis/bulk_update.lua +40 -0
  23. data/lib/delayed/backend/redis/destroy_job.lua +2 -0
  24. data/lib/delayed/backend/redis/enqueue.lua +29 -0
  25. data/lib/delayed/backend/redis/fail_job.lua +5 -0
  26. data/lib/delayed/backend/redis/find_available.lua +3 -0
  27. data/lib/delayed/backend/redis/functions.rb +57 -0
  28. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +17 -0
  29. data/lib/delayed/backend/redis/includes/jobs_common.lua +203 -0
  30. data/lib/delayed/backend/redis/job.rb +481 -0
  31. data/lib/delayed/backend/redis/set_running.lua +5 -0
  32. data/lib/delayed/backend/redis/tickle_strand.lua +2 -0
  33. data/lib/delayed/batch.rb +56 -0
  34. data/lib/delayed/engine.rb +4 -0
  35. data/lib/delayed/job_tracking.rb +31 -0
  36. data/lib/delayed/lifecycle.rb +83 -0
  37. data/lib/delayed/message_sending.rb +130 -0
  38. data/lib/delayed/performable_method.rb +42 -0
  39. data/lib/delayed/periodic.rb +81 -0
  40. data/lib/delayed/pool.rb +335 -0
  41. data/lib/delayed/settings.rb +32 -0
  42. data/lib/delayed/version.rb +3 -0
  43. data/lib/delayed/worker.rb +213 -0
  44. data/lib/delayed/yaml_extensions.rb +63 -0
  45. data/lib/delayed_job.rb +40 -0
  46. data/spec/active_record_job_spec.rb +61 -0
  47. data/spec/gemfiles/32.gemfile +6 -0
  48. data/spec/gemfiles/40.gemfile +6 -0
  49. data/spec/gemfiles/41.gemfile +6 -0
  50. data/spec/gemfiles/42.gemfile +6 -0
  51. data/spec/migrate/20140924140513_add_story_table.rb +7 -0
  52. data/spec/redis_job_spec.rb +77 -0
  53. data/spec/sample_jobs.rb +26 -0
  54. data/spec/shared/delayed_batch.rb +85 -0
  55. data/spec/shared/delayed_method.rb +419 -0
  56. data/spec/shared/performable_method.rb +52 -0
  57. data/spec/shared/shared_backend.rb +836 -0
  58. data/spec/shared/worker.rb +291 -0
  59. data/spec/shared_jobs_specs.rb +13 -0
  60. data/spec/spec_helper.rb +91 -0
  61. metadata +329 -0
@@ -0,0 +1,15 @@
1
+ class AddSourceToJobs < ActiveRecord::Migration
2
+ def self.connection
3
+ Delayed::Job.connection
4
+ end
5
+
6
+ def self.up
7
+ add_column :delayed_jobs, :source, :string
8
+ add_column :failed_jobs, :source, :string
9
+ end
10
+
11
+ def self.down
12
+ remove_column :delayed_jobs, :source
13
+ remove_column :failed_jobs, :source
14
+ end
15
+ end
@@ -0,0 +1 @@
1
+ require 'delayed_job'
@@ -0,0 +1,297 @@
1
+ class ActiveRecord::Base
2
+ def self.load_for_delayed_job(id)
3
+ if id
4
+ find(id)
5
+ else
6
+ super
7
+ end
8
+ end
9
+ end
10
+
11
+ module Delayed
12
+ module Backend
13
+ module ActiveRecord
14
+ # A job object that is persisted to the database.
15
+ # Contains the work object as a YAML field.
16
+ class Job < ::ActiveRecord::Base
17
+ include Delayed::Backend::Base
18
+ self.table_name = :delayed_jobs
19
+
20
+ def self.reconnect!
21
+ clear_all_connections!
22
+ end
23
+
24
+ # be aware that some strand functionality is controlled by triggers on
25
+ # the database. see
26
+ # db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb
27
+ #
28
+ # next_in_strand defaults to true. if we insert a new job, and it has a
29
+ # strand, and it's not the next in the strand, we set it to false.
30
+ #
31
+ # if we delete a job, and it has a strand, mark the next job in that
32
+ # strand to be next_in_strand
33
+ # (this is safe even if we're not deleting the job that was currently
34
+ # next_in_strand)
35
+
36
+ # postgresql needs this lock to be taken before the before_insert
37
+ # trigger starts, or we risk deadlock inside of the trigger when trying
38
+ # to raise the lock level
39
+ before_create :lock_strand_on_create
40
+ def lock_strand_on_create
41
+ if strand.present?
42
+ self.class.connection.execute("SELECT pg_advisory_xact_lock(half_md5_as_bigint(#{self.class.sanitize(strand)}))")
43
+ end
44
+ end
45
+
46
+ def self.current
47
+ where("run_at<=?", db_time_now)
48
+ end
49
+
50
+ def self.future
51
+ where("run_at>?", db_time_now)
52
+ end
53
+
54
+ def self.failed
55
+ where("failed_at IS NOT NULL")
56
+ end
57
+
58
+ def self.running
59
+ where("locked_at IS NOT NULL AND locked_by<>'on hold'")
60
+ end
61
+
62
+ # a nice stress test:
63
+ # 10_000.times { |i| Kernel.send_later_enqueue_args(:system, { :strand => 's1', :run_at => (24.hours.ago + (rand(24.hours.to_i))) }, "echo #{i} >> test1.txt") }
64
+ # 500.times { |i| "ohai".send_later_enqueue_args(:reverse, { :run_at => (12.hours.ago + (rand(24.hours.to_i))) }) }
65
+ # then fire up your workers
66
+ # you can check out strand correctness: diff test1.txt <(sort -n test1.txt)
67
+ def self.ready_to_run
68
+ where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", db_time_now, true)
69
+ end
70
+ def self.by_priority
71
+ order("priority ASC, run_at ASC")
72
+ end
73
+
74
+ # When a worker is exiting, make sure we don't have any locked jobs.
75
+ def self.clear_locks!(worker_name)
76
+ where(:locked_by => worker_name).update_all(:locked_by => nil, :locked_at => nil)
77
+ end
78
+
79
+ def self.strand_size(strand)
80
+ self.where(:strand => strand).count
81
+ end
82
+
83
+ def self.running_jobs()
84
+ self.running.order(:locked_at)
85
+ end
86
+
87
+ def self.scope_for_flavor(flavor, query)
88
+ scope = case flavor.to_s
89
+ when 'current'
90
+ self.current
91
+ when 'future'
92
+ self.future
93
+ when 'failed'
94
+ Delayed::Job::Failed
95
+ when 'strand'
96
+ self.where(:strand => query)
97
+ when 'tag'
98
+ self.where(:tag => query)
99
+ else
100
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
101
+ end
102
+
103
+ if %w(current future).include?(flavor.to_s)
104
+ queue = query.presence || Delayed::Settings.queue
105
+ scope = scope.where(:queue => queue)
106
+ end
107
+
108
+ scope
109
+ end
110
+
111
+ # get a list of jobs of the given flavor in the given queue
112
+ # flavor is :current, :future, :failed, :strand or :tag
113
+ # depending on the flavor, query has a different meaning:
114
+ # for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
115
+ # for :strand it's the strand name
116
+ # for :tag it's the tag name
117
+ # for :failed it's ignored
118
+ def self.list_jobs(flavor,
119
+ limit,
120
+ offset = 0,
121
+ query = nil)
122
+ scope = self.scope_for_flavor(flavor, query)
123
+ order = flavor.to_s == 'future' ? 'run_at' : 'id desc'
124
+ scope.order(order).limit(limit).offset(offset).to_a
125
+ end
126
+
127
+ # get the total job count for the given flavor
128
+ # see list_jobs for documentation on arguments
129
+ def self.jobs_count(flavor,
130
+ query = nil)
131
+ scope = self.scope_for_flavor(flavor, query)
132
+ scope.count
133
+ end
134
+
135
+ # perform a bulk update of a set of jobs
136
+ # action is :hold, :unhold, or :destroy
137
+ # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
138
+ # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
139
+ def self.bulk_update(action, opts)
140
+ scope = if opts[:flavor]
141
+ raise("Can't bulk update failed jobs") if opts[:flavor].to_s == 'failed'
142
+ self.scope_for_flavor(opts[:flavor], opts[:query])
143
+ elsif opts[:ids]
144
+ self.where(:id => opts[:ids])
145
+ end
146
+
147
+ return 0 unless scope
148
+
149
+ case action.to_s
150
+ when 'hold'
151
+ scope.update_all(:locked_by => ON_HOLD_LOCKED_BY, :locked_at => db_time_now, :attempts => ON_HOLD_COUNT)
152
+ when 'unhold'
153
+ now = db_time_now
154
+ scope.update_all(["locked_by = NULL, locked_at = NULL, attempts = 0, run_at = (CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at = NULL", now, now])
155
+ when 'destroy'
156
+ scope.delete_all
157
+ end
158
+ end
159
+
160
+ # returns a list of hashes { :tag => tag_name, :count => current_count }
161
+ # in descending count order
162
+ # flavor is :current or :all
163
+ def self.tag_counts(flavor,
164
+ limit,
165
+ offset = 0)
166
+ raise(ArgumentError, "invalid flavor: #{flavor}") unless %w(current all).include?(flavor.to_s)
167
+ scope = case flavor.to_s
168
+ when 'current'
169
+ self.current
170
+ when 'all'
171
+ self
172
+ end
173
+
174
+ scope = scope.group(:tag).offset(offset).limit(limit)
175
+ scope.order("COUNT(tag) DESC").count.map { |t,c| { :tag => t, :count => c } }
176
+ end
177
+
178
+ def self.get_and_lock_next_available(worker_name,
179
+ queue = Delayed::Settings.queue,
180
+ min_priority = nil,
181
+ max_priority = nil)
182
+
183
+ check_queue(queue)
184
+ check_priorities(min_priority, max_priority)
185
+
186
+ loop do
187
+ jobs = find_available(Settings.fetch_batch_size, queue, min_priority, max_priority)
188
+ return nil if jobs.empty?
189
+ if Settings.select_random_from_batch
190
+ jobs = jobs.sort_by { rand }
191
+ end
192
+ job = jobs.detect do |job|
193
+ job.lock_exclusively!(worker_name)
194
+ end
195
+ return job if job
196
+ end
197
+ end
198
+
199
+ def self.find_available(limit,
200
+ queue = Delayed::Settings.queue,
201
+ min_priority = nil,
202
+ max_priority = nil)
203
+ all_available(queue, min_priority, max_priority).limit(limit).to_a
204
+ end
205
+
206
+ def self.all_available(queue = Delayed::Settings.queue,
207
+ min_priority = nil,
208
+ max_priority = nil)
209
+ min_priority ||= Delayed::MIN_PRIORITY
210
+ max_priority ||= Delayed::MAX_PRIORITY
211
+
212
+ check_queue(queue)
213
+ check_priorities(min_priority, max_priority)
214
+
215
+ self.ready_to_run.
216
+ where(:priority => min_priority..max_priority, :queue => queue).
217
+ by_priority
218
+ end
219
+
220
+ # used internally by create_singleton to take the appropriate lock
221
+ # depending on the db driver
222
+ def self.transaction_for_singleton(strand)
223
+ self.transaction do
224
+ connection.execute(sanitize_sql(["SELECT pg_advisory_xact_lock(half_md5_as_bigint(?))", strand]))
225
+ yield
226
+ end
227
+ end
228
+
229
+ # Create the job on the specified strand, but only if there aren't any
230
+ # other non-running jobs on that strand.
231
+ # (in other words, the job will still be created if there's another job
232
+ # on the strand but it's already running)
233
+ def self.create_singleton(options)
234
+ strand = options[:strand]
235
+ transaction_for_singleton(strand) do
236
+ job = self.where(:strand => strand, :locked_at => nil).order(:id).first
237
+ job || self.create(options)
238
+ end
239
+ end
240
+
241
+ # Lock this job for this worker.
242
+ # Returns true if we have the lock, false otherwise.
243
+ #
244
+ # It's important to note that for performance reasons, this method does
245
+ # not re-check the strand constraints -- so you could manually lock a
246
+ # job using this method that isn't the next to run on its strand.
247
+ def lock_exclusively!(worker)
248
+ now = self.class.db_time_now
249
+ # We don't own this job so we will update the locked_by name and the locked_at
250
+ affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(:locked_at => now, :locked_by => worker)
251
+ if affected_rows == 1
252
+ mark_as_locked!(now, worker)
253
+ return true
254
+ else
255
+ return false
256
+ end
257
+ end
258
+
259
+ def mark_as_locked!(time, worker)
260
+ self.locked_at = time
261
+ self.locked_by = worker
262
+ end
263
+
264
+ def create_and_lock!(worker)
265
+ raise "job already exists" unless new_record?
266
+ self.locked_at = Delayed::Job.db_time_now
267
+ self.locked_by = worker
268
+ save!
269
+ end
270
+
271
+ def fail!
272
+ attrs = self.attributes
273
+ attrs['original_job_id'] = attrs.delete('id')
274
+ attrs['failed_at'] ||= self.class.db_time_now
275
+ attrs.delete('next_in_strand')
276
+ self.class.transaction do
277
+ failed_job = Failed.create(attrs)
278
+ self.destroy
279
+ failed_job
280
+ end
281
+ rescue
282
+ # we got an error while failing the job -- we need to at least get
283
+ # the job out of the queue
284
+ self.destroy
285
+ # re-raise so the worker logs the error, at least
286
+ raise
287
+ end
288
+
289
+ class Failed < Job
290
+ include Delayed::Backend::Base
291
+ self.table_name = :failed_jobs
292
+ end
293
+ end
294
+
295
+ end
296
+ end
297
+ end
@@ -0,0 +1,317 @@
1
+ module Delayed
2
+ module Backend
3
+ class DeserializationError < StandardError
4
+ end
5
+
6
+ class RecordNotFound < DeserializationError
7
+ end
8
+
9
+ module Base
10
+ ON_HOLD_LOCKED_BY = 'on hold'
11
+ ON_HOLD_COUNT = 50
12
+
13
+ def self.included(base)
14
+ base.extend ClassMethods
15
+ base.default_priority = Delayed::NORMAL_PRIORITY
16
+ base.before_save :initialize_defaults
17
+ end
18
+
19
+ module ClassMethods
20
+ attr_accessor :batches
21
+ attr_accessor :batch_enqueue_args
22
+ attr_accessor :default_priority
23
+
24
+ # Add a job to the queue
25
+ # The first argument should be an object that respond_to?(:perform)
26
+ # The rest should be named arguments, these keys are expected:
27
+ # :priority, :run_at, :queue, :strand, :singleton
28
+ # Example: Delayed::Job.enqueue(object, :priority => 0, :run_at => time, :queue => queue)
29
+ def enqueue(*args)
30
+ object = args.shift
31
+ unless object.respond_to?(:perform)
32
+ raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
33
+ end
34
+
35
+ options = Settings.default_job_options.merge(args.first || {})
36
+ options[:priority] ||= self.default_priority
37
+ options[:payload_object] = object
38
+ options[:queue] = Delayed::Settings.queue unless options.key?(:queue)
39
+ options[:max_attempts] ||= Delayed::Settings.max_attempts
40
+ options[:source] = Marginalia::Comment.construct_comment if defined?(Marginalia) && Marginalia::Comment.components
41
+
42
+ # If two parameters are given to n_strand, the first param is used
43
+ # as the strand name for looking up the Setting, while the second
44
+ # param is appended to make a unique set of strands.
45
+ #
46
+ # For instance, you can pass ["my_job_type", # root_account.global_id]
47
+ # to get a set of n strands per root account, and you can apply the
48
+ # same default to all.
49
+ if options[:n_strand]
50
+ strand_name, ext = options.delete(:n_strand)
51
+
52
+ if ext
53
+ full_strand_name = "#{strand_name}/#{ext}"
54
+ num_strands = Delayed::Settings.num_strands.call(full_strand_name)
55
+ else
56
+ full_strand_name = strand_name
57
+ end
58
+
59
+ num_strands ||= Delayed::Settings.num_strands.call(strand_name)
60
+ num_strands = num_strands ? num_strands.to_i : 1
61
+
62
+ strand_num = num_strands > 1 ? rand(num_strands) + 1 : 1
63
+ full_strand_name += ":#{strand_num}" if strand_num > 1
64
+ options[:strand] = full_strand_name
65
+ end
66
+
67
+ if options[:singleton]
68
+ options[:strand] = options.delete :singleton
69
+ job = self.create_singleton(options)
70
+ elsif batches && options.slice(:strand, :run_at).empty?
71
+ batch_enqueue_args = options.slice(*self.batch_enqueue_args)
72
+ batches[batch_enqueue_args] << options
73
+ return true
74
+ else
75
+ job = self.create(options)
76
+ end
77
+
78
+ JobTracking.job_created(job)
79
+
80
+ job
81
+ end
82
+
83
+ def in_delayed_job?
84
+ !!Thread.current[:in_delayed_job]
85
+ end
86
+
87
+ def in_delayed_job=(val)
88
+ Thread.current[:in_delayed_job] = val
89
+ end
90
+
91
+ def check_queue(queue)
92
+ raise(ArgumentError, "queue name can't be blank") if queue.blank?
93
+ end
94
+
95
+ def check_priorities(min_priority, max_priority)
96
+ if min_priority && min_priority < Delayed::MIN_PRIORITY
97
+ raise(ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}")
98
+ end
99
+ if max_priority && max_priority > Delayed::MAX_PRIORITY
100
+ raise(ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}")
101
+ end
102
+ end
103
+
104
+ # Get the current time (UTC)
105
+ # Note: This does not ping the DB to get the time, so all your clients
106
+ # must have syncronized clocks.
107
+ def db_time_now
108
+ Time.zone.now
109
+ end
110
+
111
+ def unlock_orphaned_jobs(pid = nil, name = nil)
112
+ begin
113
+ name ||= Socket.gethostname
114
+ rescue
115
+ return 0
116
+ end
117
+ pid_regex = pid || '(\d+)'
118
+ regex = Regexp.new("^#{Regexp.escape(name)}:#{pid_regex}$")
119
+ unlocked_jobs = 0
120
+ running = false if pid
121
+ self.running_jobs.each do |job|
122
+ next unless job.locked_by =~ regex
123
+ unless pid
124
+ job_pid = $1.to_i
125
+ running = Process.kill(0, job_pid) rescue false
126
+ end
127
+ if !running
128
+ unlocked_jobs += 1
129
+ job.reschedule("process died")
130
+ end
131
+ end
132
+ unlocked_jobs
133
+ end
134
+ end
135
+
136
+ def failed?
137
+ failed_at
138
+ end
139
+ alias_method :failed, :failed?
140
+
141
+ # Reschedule the job in the future (when a job fails).
142
+ # Uses an exponential scale depending on the number of failed attempts.
143
+ def reschedule(error = nil, time = nil)
144
+ begin
145
+ obj = payload_object
146
+ obj.on_failure(error) if obj && obj.respond_to?(:on_failure)
147
+ rescue DeserializationError
148
+ # don't allow a failed deserialization to prevent rescheduling
149
+ end
150
+
151
+ self.attempts += 1
152
+ if self.attempts >= (self.max_attempts || Delayed::Settings.max_attempts)
153
+ permanent_failure error || "max attempts reached"
154
+ else
155
+ time ||= self.reschedule_at
156
+ self.run_at = time
157
+ self.unlock
158
+ self.save!
159
+ end
160
+ end
161
+
162
+ def permanent_failure(error)
163
+ begin
164
+ # notify the payload_object of a permanent failure
165
+ obj = payload_object
166
+ obj.on_permanent_failure(error) if obj && obj.respond_to?(:on_permanent_failure)
167
+ rescue DeserializationError
168
+ # don't allow a failed deserialization to prevent destroying the job
169
+ end
170
+
171
+ # optionally destroy the object
172
+ destroy_self = true
173
+ if Delayed::Worker.on_max_failures
174
+ destroy_self = Delayed::Worker.on_max_failures.call(self, error)
175
+ end
176
+
177
+ if destroy_self
178
+ self.destroy
179
+ else
180
+ self.fail!
181
+ end
182
+ end
183
+
184
+ def payload_object
185
+ @payload_object ||= deserialize(self['handler'].untaint)
186
+ end
187
+
188
+ def name
189
+ @name ||= begin
190
+ payload = payload_object
191
+ if payload.respond_to?(:display_name)
192
+ payload.display_name
193
+ else
194
+ payload.class.name
195
+ end
196
+ end
197
+ end
198
+
199
+ def full_name
200
+ obj = payload_object rescue nil
201
+ if obj && obj.respond_to?(:full_name)
202
+ obj.full_name
203
+ else
204
+ name
205
+ end
206
+ end
207
+
208
+ def payload_object=(object)
209
+ @payload_object = object
210
+ self['handler'] = object.to_yaml
211
+ self['tag'] = if object.respond_to?(:tag)
212
+ object.tag
213
+ elsif object.is_a?(Module)
214
+ "#{object}.perform"
215
+ else
216
+ "#{object.class}#perform"
217
+ end
218
+ end
219
+
220
+ # Moved into its own method so that new_relic can trace it.
221
+ def invoke_job
222
+ Delayed::Job.in_delayed_job = true
223
+ begin
224
+ payload_object.perform
225
+ ensure
226
+ Delayed::Job.in_delayed_job = false
227
+ ::ActiveRecord::Base.clear_active_connections! unless Rails.env.test?
228
+ end
229
+ end
230
+
231
+ def batch?
232
+ payload_object.is_a?(Delayed::Batch::PerformableBatch)
233
+ end
234
+
235
+ # Unlock this job (note: not saved to DB)
236
+ def unlock
237
+ self.locked_at = nil
238
+ self.locked_by = nil
239
+ end
240
+
241
+ def locked?
242
+ !!(self.locked_at || self.locked_by)
243
+ end
244
+
245
+ def reschedule_at
246
+ new_time = self.class.db_time_now + (attempts ** 4) + 5
247
+ begin
248
+ if payload_object.respond_to?(:reschedule_at)
249
+ new_time = payload_object.reschedule_at(
250
+ self.class.db_time_now, attempts)
251
+ end
252
+ rescue
253
+ # TODO: just swallow errors from reschedule_at ?
254
+ end
255
+ new_time
256
+ end
257
+
258
+ def hold!
259
+ self.locked_by = ON_HOLD_LOCKED_BY
260
+ self.locked_at = self.class.db_time_now
261
+ self.attempts = ON_HOLD_COUNT
262
+ self.save!
263
+ end
264
+
265
+ def unhold!
266
+ self.locked_by = nil
267
+ self.locked_at = nil
268
+ self.attempts = 0
269
+ self.run_at = [self.class.db_time_now, self.run_at].max
270
+ self.failed_at = nil
271
+ self.save!
272
+ end
273
+
274
+ def on_hold?
275
+ self.locked_by == 'on hold' && self.locked_at && self.attempts == ON_HOLD_COUNT
276
+ end
277
+
278
+ private
279
+
280
+ ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
281
+
282
+ def deserialize(source)
283
+ handler = nil
284
+ begin
285
+ handler = _yaml_deserialize(source)
286
+ rescue TypeError
287
+ attempt_to_load_from_source(source)
288
+ handler = _yaml_deserialize(source)
289
+ end
290
+
291
+ return handler if handler.respond_to?(:perform)
292
+
293
+ raise DeserializationError,
294
+ 'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
295
+ rescue TypeError, LoadError, NameError => e
296
+ raise DeserializationError,
297
+ "Job failed to load: #{e.message}. Try to manually require the required file."
298
+ end
299
+
300
+ def _yaml_deserialize(source)
301
+ YAML.respond_to?(:unsafe_load) ? YAML.unsafe_load(source) : YAML.load(source)
302
+ end
303
+
304
+ def attempt_to_load_from_source(source)
305
+ if md = ParseObjectFromYaml.match(source)
306
+ md[1].constantize
307
+ end
308
+ end
309
+
310
+ public
311
+ def initialize_defaults
312
+ self.queue ||= Delayed::Settings.queue
313
+ self.run_at ||= self.class.db_time_now
314
+ end
315
+ end
316
+ end
317
+ end