inst-jobs 0.11.0

Sign up to get free protection for your applications and to get access to all the features.
Files changed (86) hide show
  1. checksums.yaml +7 -0
  2. data/bin/inst_job +4 -0
  3. data/db/migrate/20101216224513_create_delayed_jobs.rb +40 -0
  4. data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +14 -0
  5. data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +13 -0
  6. data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +14 -0
  7. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +26 -0
  8. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +40 -0
  9. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +52 -0
  10. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +31 -0
  11. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +15 -0
  12. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +80 -0
  13. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +15 -0
  14. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +15 -0
  15. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +13 -0
  16. data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +13 -0
  17. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +13 -0
  18. data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +13 -0
  19. data/db/migrate/20140512213941_add_source_to_jobs.rb +15 -0
  20. data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +70 -0
  21. data/db/migrate/20151123210429_add_expires_at_to_jobs.rb +15 -0
  22. data/db/migrate/20151210162949_improve_max_concurrent.rb +50 -0
  23. data/lib/delayed/backend/active_record.rb +340 -0
  24. data/lib/delayed/backend/base.rb +335 -0
  25. data/lib/delayed/backend/redis/bulk_update.lua +50 -0
  26. data/lib/delayed/backend/redis/destroy_job.lua +2 -0
  27. data/lib/delayed/backend/redis/enqueue.lua +29 -0
  28. data/lib/delayed/backend/redis/fail_job.lua +5 -0
  29. data/lib/delayed/backend/redis/find_available.lua +3 -0
  30. data/lib/delayed/backend/redis/functions.rb +57 -0
  31. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +17 -0
  32. data/lib/delayed/backend/redis/includes/jobs_common.lua +203 -0
  33. data/lib/delayed/backend/redis/job.rb +497 -0
  34. data/lib/delayed/backend/redis/set_running.lua +5 -0
  35. data/lib/delayed/backend/redis/tickle_strand.lua +2 -0
  36. data/lib/delayed/batch.rb +56 -0
  37. data/lib/delayed/cli.rb +101 -0
  38. data/lib/delayed/daemon.rb +103 -0
  39. data/lib/delayed/engine.rb +4 -0
  40. data/lib/delayed/job_tracking.rb +31 -0
  41. data/lib/delayed/lifecycle.rb +90 -0
  42. data/lib/delayed/log_tailer.rb +22 -0
  43. data/lib/delayed/message_sending.rb +134 -0
  44. data/lib/delayed/performable_method.rb +52 -0
  45. data/lib/delayed/periodic.rb +85 -0
  46. data/lib/delayed/plugin.rb +22 -0
  47. data/lib/delayed/pool.rb +161 -0
  48. data/lib/delayed/server/helpers.rb +28 -0
  49. data/lib/delayed/server/public/css/app.css +12 -0
  50. data/lib/delayed/server/public/js/app.js +132 -0
  51. data/lib/delayed/server/views/index.erb +90 -0
  52. data/lib/delayed/server/views/layout.erb +47 -0
  53. data/lib/delayed/server.rb +120 -0
  54. data/lib/delayed/settings.rb +90 -0
  55. data/lib/delayed/testing.rb +32 -0
  56. data/lib/delayed/version.rb +3 -0
  57. data/lib/delayed/work_queue/in_process.rb +13 -0
  58. data/lib/delayed/work_queue/parent_process.rb +180 -0
  59. data/lib/delayed/worker.rb +234 -0
  60. data/lib/delayed/yaml_extensions.rb +109 -0
  61. data/lib/delayed_job.rb +46 -0
  62. data/lib/inst-jobs.rb +1 -0
  63. data/spec/active_record_job_spec.rb +246 -0
  64. data/spec/delayed/cli_spec.rb +23 -0
  65. data/spec/delayed/daemon_spec.rb +35 -0
  66. data/spec/delayed/server_spec.rb +63 -0
  67. data/spec/delayed/settings_spec.rb +32 -0
  68. data/spec/delayed/work_queue/in_process_spec.rb +31 -0
  69. data/spec/delayed/work_queue/parent_process_spec.rb +159 -0
  70. data/spec/delayed/worker_spec.rb +16 -0
  71. data/spec/gemfiles/32.gemfile +6 -0
  72. data/spec/gemfiles/40.gemfile +5 -0
  73. data/spec/gemfiles/41.gemfile +5 -0
  74. data/spec/gemfiles/42.gemfile +5 -0
  75. data/spec/migrate/20140924140513_add_story_table.rb +7 -0
  76. data/spec/redis_job_spec.rb +140 -0
  77. data/spec/sample_jobs.rb +28 -0
  78. data/spec/shared/delayed_batch.rb +85 -0
  79. data/spec/shared/delayed_method.rb +419 -0
  80. data/spec/shared/performable_method.rb +66 -0
  81. data/spec/shared/shared_backend.rb +819 -0
  82. data/spec/shared/testing.rb +48 -0
  83. data/spec/shared/worker.rb +378 -0
  84. data/spec/shared_jobs_specs.rb +15 -0
  85. data/spec/spec_helper.rb +97 -0
  86. metadata +390 -0
@@ -0,0 +1,340 @@
1
+ class ActiveRecord::Base
2
+ def self.load_for_delayed_job(id)
3
+ if id
4
+ find(id)
5
+ else
6
+ super
7
+ end
8
+ end
9
+ end
10
+
11
+ module Delayed
12
+ module Backend
13
+ module ActiveRecord
14
+ # A job object that is persisted to the database.
15
+ # Contains the work object as a YAML field.
16
+ class Job < ::ActiveRecord::Base
17
+ include Delayed::Backend::Base
18
+ self.table_name = :delayed_jobs
19
+
20
+ def self.reconnect!
21
+ clear_all_connections!
22
+ end
23
+
24
+ # be aware that some strand functionality is controlled by triggers on
25
+ # the database. see
26
+ # db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb
27
+ #
28
+ # next_in_strand defaults to true. if we insert a new job, and it has a
29
+ # strand, and it's not the next in the strand, we set it to false.
30
+ #
31
+ # if we delete a job, and it has a strand, mark the next job in that
32
+ # strand to be next_in_strand
33
+ # (this is safe even if we're not deleting the job that was currently
34
+ # next_in_strand)
35
+
36
+ # postgresql needs this lock to be taken before the before_insert
37
+ # trigger starts, or we risk deadlock inside of the trigger when trying
38
+ # to raise the lock level
39
+ before_create :lock_strand_on_create
40
+ def lock_strand_on_create
41
+ if strand.present?
42
+ self.class.connection.execute("SELECT pg_advisory_xact_lock(#{self.class.connection.quote_table_name('half_md5_as_bigint')}(#{self.class.sanitize(strand)}))")
43
+ end
44
+ end
45
+
46
+ # This overwrites the previous behavior
47
+ # so rather than changing the strand and balancing at queue time,
48
+ # this keeps the strand intact and uses triggers to limit the number running
49
+ def self.n_strand_options(strand_name, num_strands)
50
+ {:strand => strand_name, :max_concurrent => num_strands}
51
+ end
52
+
53
+ def self.current
54
+ where("run_at<=?", db_time_now)
55
+ end
56
+
57
+ def self.future
58
+ where("run_at>?", db_time_now)
59
+ end
60
+
61
+ def self.failed
62
+ where("failed_at IS NOT NULL")
63
+ end
64
+
65
+ def self.running
66
+ where("locked_at IS NOT NULL AND locked_by<>'on hold'")
67
+ end
68
+
69
+ # a nice stress test:
70
+ # 10_000.times { |i| Kernel.send_later_enqueue_args(:system, { :strand => 's1', :run_at => (24.hours.ago + (rand(24.hours.to_i))) }, "echo #{i} >> test1.txt") }
71
+ # 500.times { |i| "ohai".send_later_enqueue_args(:reverse, { :run_at => (12.hours.ago + (rand(24.hours.to_i))) }) }
72
+ # then fire up your workers
73
+ # you can check out strand correctness: diff test1.txt <(sort -n test1.txt)
74
+ def self.ready_to_run
75
+ where("run_at<=? AND locked_at IS NULL AND next_in_strand=?", db_time_now, true)
76
+ end
77
+ def self.by_priority
78
+ order("priority ASC, run_at ASC")
79
+ end
80
+
81
+ # When a worker is exiting, make sure we don't have any locked jobs.
82
+ def self.clear_locks!(worker_name)
83
+ where(:locked_by => worker_name).update_all(:locked_by => nil, :locked_at => nil)
84
+ end
85
+
86
+ def self.strand_size(strand)
87
+ self.where(:strand => strand).count
88
+ end
89
+
90
+ def self.running_jobs()
91
+ self.running.order(:locked_at)
92
+ end
93
+
94
+ def self.scope_for_flavor(flavor, query)
95
+ scope = case flavor.to_s
96
+ when 'current'
97
+ self.current
98
+ when 'future'
99
+ self.future
100
+ when 'failed'
101
+ Delayed::Job::Failed
102
+ when 'strand'
103
+ self.where(:strand => query)
104
+ when 'tag'
105
+ self.where(:tag => query)
106
+ else
107
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
108
+ end
109
+
110
+ if %w(current future).include?(flavor.to_s)
111
+ queue = query.presence || Delayed::Settings.queue
112
+ scope = scope.where(:queue => queue)
113
+ end
114
+
115
+ scope
116
+ end
117
+
118
+ # get a list of jobs of the given flavor in the given queue
119
+ # flavor is :current, :future, :failed, :strand or :tag
120
+ # depending on the flavor, query has a different meaning:
121
+ # for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
122
+ # for :strand it's the strand name
123
+ # for :tag it's the tag name
124
+ # for :failed it's ignored
125
+ def self.list_jobs(flavor,
126
+ limit,
127
+ offset = 0,
128
+ query = nil)
129
+ scope = self.scope_for_flavor(flavor, query)
130
+ order = flavor.to_s == 'future' ? 'run_at' : 'id desc'
131
+ scope.order(order).limit(limit).offset(offset).to_a
132
+ end
133
+
134
+ # get the total job count for the given flavor
135
+ # see list_jobs for documentation on arguments
136
+ def self.jobs_count(flavor,
137
+ query = nil)
138
+ scope = self.scope_for_flavor(flavor, query)
139
+ scope.count
140
+ end
141
+
142
+ # perform a bulk update of a set of jobs
143
+ # action is :hold, :unhold, or :destroy
144
+ # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
145
+ # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
146
+ def self.bulk_update(action, opts)
147
+ raise("Can't #{action.to_s} failed jobs") if opts[:flavor].to_s == 'failed' && action.to_s != 'destroy'
148
+ scope = if opts[:ids]
149
+ if opts[:flavor] == 'failed'
150
+ Delayed::Job::Failed.where(:id => opts[:ids])
151
+ else
152
+ self.where(:id => opts[:ids])
153
+ end
154
+ elsif opts[:flavor]
155
+
156
+ self.scope_for_flavor(opts[:flavor], opts[:query])
157
+ end
158
+
159
+ return 0 unless scope
160
+
161
+ case action.to_s
162
+ when 'hold'
163
+ scope = scope.where(locked_by: nil)
164
+ scope.update_all(:locked_by => ON_HOLD_LOCKED_BY, :locked_at => db_time_now, :attempts => ON_HOLD_COUNT)
165
+ when 'unhold'
166
+ now = db_time_now
167
+ scope = scope.where(locked_by: ON_HOLD_LOCKED_BY)
168
+ scope.update_all(["locked_by = NULL, locked_at = NULL, attempts = 0, run_at = (CASE WHEN run_at > ? THEN run_at ELSE ? END), failed_at = NULL", now, now])
169
+ when 'destroy'
170
+ scope = scope.where("locked_by IS NULL OR locked_by=?", ON_HOLD_LOCKED_BY) unless opts[:flavor] == 'failed'
171
+ scope.delete_all
172
+ end
173
+ end
174
+
175
+ # returns a list of hashes { :tag => tag_name, :count => current_count }
176
+ # in descending count order
177
+ # flavor is :current or :all
178
+ def self.tag_counts(flavor,
179
+ limit,
180
+ offset = 0)
181
+ raise(ArgumentError, "invalid flavor: #{flavor}") unless %w(current all).include?(flavor.to_s)
182
+ scope = case flavor.to_s
183
+ when 'current'
184
+ self.current
185
+ when 'all'
186
+ self
187
+ end
188
+
189
+ scope = scope.group(:tag).offset(offset).limit(limit)
190
+ scope.order("COUNT(tag) DESC").count.map { |t,c| { :tag => t, :count => c } }
191
+ end
192
+
193
+ def self.maybe_silence_periodic_log(&block)
194
+ if Settings.silence_periodic_log
195
+ ::ActiveRecord::Base.logger.silence(&block)
196
+ else
197
+ block.call
198
+ end
199
+ end
200
+
201
+ def self.get_and_lock_next_available(worker_name,
202
+ queue = Delayed::Settings.queue,
203
+ min_priority = nil,
204
+ max_priority = nil)
205
+
206
+ check_queue(queue)
207
+ check_priorities(min_priority, max_priority)
208
+
209
+ loop do
210
+ jobs = maybe_silence_periodic_log do
211
+ find_available(Settings.fetch_batch_size, queue, min_priority, max_priority)
212
+ end
213
+ return nil if jobs.empty?
214
+ if Settings.select_random_from_batch
215
+ jobs = jobs.sort_by { rand }
216
+ end
217
+ job = jobs.detect do |job|
218
+ job.send(:lock_exclusively!, worker_name)
219
+ end
220
+ return job if job
221
+ end
222
+ end
223
+
224
+ def self.find_available(limit,
225
+ queue = Delayed::Settings.queue,
226
+ min_priority = nil,
227
+ max_priority = nil)
228
+ all_available(queue, min_priority, max_priority).limit(limit).to_a
229
+ end
230
+
231
+ def self.all_available(queue = Delayed::Settings.queue,
232
+ min_priority = nil,
233
+ max_priority = nil)
234
+ min_priority ||= Delayed::MIN_PRIORITY
235
+ max_priority ||= Delayed::MAX_PRIORITY
236
+
237
+ check_queue(queue)
238
+ check_priorities(min_priority, max_priority)
239
+
240
+ self.ready_to_run.
241
+ where(:priority => min_priority..max_priority, :queue => queue).
242
+ by_priority
243
+ end
244
+
245
+ # used internally by create_singleton to take the appropriate lock
246
+ # depending on the db driver
247
+ def self.transaction_for_singleton(strand)
248
+ self.transaction do
249
+ connection.execute(sanitize_sql(["SELECT pg_advisory_xact_lock(#{connection.quote_table_name('half_md5_as_bigint')}(?))", strand]))
250
+ yield
251
+ end
252
+ end
253
+
254
+ # Create the job on the specified strand, but only if there aren't any
255
+ # other non-running jobs on that strand.
256
+ # (in other words, the job will still be created if there's another job
257
+ # on the strand but it's already running)
258
+ def self.create_singleton(options)
259
+ strand = options[:strand]
260
+ transaction_for_singleton(strand) do
261
+ job = self.where(:strand => strand, :locked_at => nil).order(:id).first
262
+ new_job = new(options)
263
+ if job
264
+ new_job.initialize_defaults
265
+ job.run_at = [job.run_at, new_job.run_at].min
266
+ job.save! if job.changed?
267
+ else
268
+ new_job.save!
269
+ end
270
+ job || new_job
271
+ end
272
+ end
273
+
274
+ # Lock this job for this worker.
275
+ # Returns true if we have the lock, false otherwise.
276
+ #
277
+ # It's important to note that for performance reasons, this method does
278
+ # not re-check the strand constraints -- so you could manually lock a
279
+ # job using this method that isn't the next to run on its strand.
280
+ def lock_exclusively!(worker)
281
+ now = self.class.db_time_now
282
+ # We don't own this job so we will update the locked_by name and the locked_at
283
+ affected_rows = self.class.where("id=? AND locked_at IS NULL AND run_at<=?", self, now).update_all(:locked_at => now, :locked_by => worker)
284
+ if affected_rows == 1
285
+ mark_as_locked!(now, worker)
286
+ return true
287
+ else
288
+ return false
289
+ end
290
+ end
291
+
292
+ def mark_as_locked!(time, worker)
293
+ self.locked_at = time
294
+ self.locked_by = worker
295
+ # We cheated ActiveRecord::Dirty with the update_all calls above, so
296
+ # we'll fix things up here.
297
+ if respond_to?(:changes_applied)
298
+ changes_applied
299
+ else
300
+ changed_attributes['locked_at'] = time
301
+ changed_attributes['locked_by'] = worker
302
+ end
303
+ end
304
+ protected :lock_exclusively!, :mark_as_locked!
305
+
306
+ def create_and_lock!(worker)
307
+ raise "job already exists" unless new_record?
308
+ self.locked_at = Delayed::Job.db_time_now
309
+ self.locked_by = worker
310
+ save!
311
+ end
312
+
313
+ def fail!
314
+ attrs = self.attributes
315
+ attrs['original_job_id'] = attrs.delete('id')
316
+ attrs['failed_at'] ||= self.class.db_time_now
317
+ attrs.delete('next_in_strand')
318
+ attrs.delete('max_concurrent')
319
+ self.class.transaction do
320
+ failed_job = Failed.create(attrs)
321
+ self.destroy
322
+ failed_job
323
+ end
324
+ rescue
325
+ # we got an error while failing the job -- we need to at least get
326
+ # the job out of the queue
327
+ self.destroy
328
+ # re-raise so the worker logs the error, at least
329
+ raise
330
+ end
331
+
332
+ class Failed < Job
333
+ include Delayed::Backend::Base
334
+ self.table_name = :failed_jobs
335
+ end
336
+ end
337
+
338
+ end
339
+ end
340
+ end
@@ -0,0 +1,335 @@
1
+ module Delayed
2
+ module Backend
3
+ class DeserializationError < StandardError
4
+ end
5
+
6
+ class RecordNotFound < DeserializationError
7
+ end
8
+
9
+ class JobExpired < StandardError
10
+ end
11
+
12
+ module Base
13
+ ON_HOLD_LOCKED_BY = 'on hold'
14
+ ON_HOLD_COUNT = 50
15
+
16
+ def self.included(base)
17
+ base.extend ClassMethods
18
+ base.default_priority = Delayed::NORMAL_PRIORITY
19
+ base.before_save :initialize_defaults
20
+ end
21
+
22
+ module ClassMethods
23
+ attr_accessor :batches
24
+ attr_accessor :batch_enqueue_args
25
+ attr_accessor :default_priority
26
+
27
+ # Add a job to the queue
28
+ # The first argument should be an object that respond_to?(:perform)
29
+ # The rest should be named arguments, these keys are expected:
30
+ # :priority, :run_at, :queue, :strand, :singleton
31
+ # Example: Delayed::Job.enqueue(object, :priority => 0, :run_at => time, :queue => queue)
32
+ def enqueue(*args)
33
+ object = args.shift
34
+ unless object.respond_to?(:perform)
35
+ raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
36
+ end
37
+
38
+ options = Settings.default_job_options.merge(args.first || {})
39
+ options[:priority] ||= self.default_priority
40
+ options[:payload_object] = object
41
+ options[:queue] = Delayed::Settings.queue unless options.key?(:queue)
42
+ options[:max_attempts] ||= Delayed::Settings.max_attempts
43
+ options[:source] = Marginalia::Comment.construct_comment if defined?(Marginalia) && Marginalia::Comment.components
44
+
45
+ # If two parameters are given to n_strand, the first param is used
46
+ # as the strand name for looking up the Setting, while the second
47
+ # param is appended to make a unique set of strands.
48
+ #
49
+ # For instance, you can pass ["my_job_type", # root_account.global_id]
50
+ # to get a set of n strands per root account, and you can apply the
51
+ # same default to all.
52
+ if options[:n_strand]
53
+ strand_name, ext = options.delete(:n_strand)
54
+
55
+ if ext
56
+ full_strand_name = "#{strand_name}/#{ext}"
57
+ num_strands = Delayed::Settings.num_strands.call(full_strand_name)
58
+ else
59
+ full_strand_name = strand_name
60
+ end
61
+
62
+ num_strands ||= Delayed::Settings.num_strands.call(strand_name)
63
+ num_strands = num_strands ? num_strands.to_i : 1
64
+
65
+ options.merge!(n_strand_options(full_strand_name, num_strands))
66
+ end
67
+
68
+ if options[:singleton]
69
+ options[:strand] = options.delete :singleton
70
+ job = self.create_singleton(options)
71
+ elsif batches && options.slice(:strand, :run_at).empty?
72
+ batch_enqueue_args = options.slice(*self.batch_enqueue_args)
73
+ batches[batch_enqueue_args] << options
74
+ return true
75
+ else
76
+ job = self.create(options)
77
+ end
78
+
79
+ JobTracking.job_created(job)
80
+
81
+ job
82
+ end
83
+
84
+ # by default creates a new strand name randomly based on num_strands
85
+ # effectively balancing the load during queueing
86
+ # overwritten in ActiveRecord::Job to use triggers to balance at run time
87
+ def n_strand_options(strand_name, num_strands)
88
+ strand_num = num_strands > 1 ? rand(num_strands) + 1 : 1
89
+ strand_name += ":#{strand_num}" if strand_num > 1
90
+ {:strand => strand_name}
91
+ end
92
+
93
+ def in_delayed_job?
94
+ !!Thread.current[:in_delayed_job]
95
+ end
96
+
97
+ def in_delayed_job=(val)
98
+ Thread.current[:in_delayed_job] = val
99
+ end
100
+
101
+ def check_queue(queue)
102
+ raise(ArgumentError, "queue name can't be blank") if queue.blank?
103
+ end
104
+
105
+ def check_priorities(min_priority, max_priority)
106
+ if min_priority && min_priority < Delayed::MIN_PRIORITY
107
+ raise(ArgumentError, "min_priority #{min_priority} can't be less than #{Delayed::MIN_PRIORITY}")
108
+ end
109
+ if max_priority && max_priority > Delayed::MAX_PRIORITY
110
+ raise(ArgumentError, "max_priority #{max_priority} can't be greater than #{Delayed::MAX_PRIORITY}")
111
+ end
112
+ end
113
+
114
+ # Get the current time (UTC)
115
+ # Note: This does not ping the DB to get the time, so all your clients
116
+ # must have syncronized clocks.
117
+ def db_time_now
118
+ Time.now.utc
119
+ end
120
+
121
+ def unlock_orphaned_jobs(pid = nil, name = nil)
122
+ begin
123
+ name ||= Socket.gethostname
124
+ rescue
125
+ return 0
126
+ end
127
+ pid_regex = pid || '(\d+)'
128
+ regex = Regexp.new("^#{Regexp.escape(name)}:#{pid_regex}$")
129
+ unlocked_jobs = 0
130
+ running = false if pid
131
+ self.running_jobs.each do |job|
132
+ next unless job.locked_by =~ regex
133
+ unless pid
134
+ job_pid = $1.to_i
135
+ running = Process.kill(0, job_pid) rescue false
136
+ end
137
+ if !running
138
+ unlocked_jobs += 1
139
+ job.reschedule("process died")
140
+ end
141
+ end
142
+ unlocked_jobs
143
+ end
144
+ end
145
+
146
+ def failed?
147
+ failed_at
148
+ end
149
+ alias_method :failed, :failed?
150
+
151
+ def expired?
152
+ expires_at && (self.class.db_time_now >= expires_at)
153
+ end
154
+
155
+ # Reschedule the job in the future (when a job fails).
156
+ # Uses an exponential scale depending on the number of failed attempts.
157
+ def reschedule(error = nil, time = nil)
158
+ begin
159
+ obj = payload_object
160
+ obj.on_failure(error) if obj && obj.respond_to?(:on_failure)
161
+ rescue
162
+ # don't allow a failed deserialization to prevent rescheduling
163
+ end
164
+
165
+ self.attempts += 1
166
+ if self.attempts >= (self.max_attempts || Delayed::Settings.max_attempts)
167
+ permanent_failure error || "max attempts reached"
168
+ elsif expired?
169
+ permanent_failure error || "job has expired"
170
+ else
171
+ time ||= self.reschedule_at
172
+ self.run_at = time
173
+ self.unlock
174
+ self.save!
175
+ end
176
+ end
177
+
178
+ def permanent_failure(error)
179
+ begin
180
+ # notify the payload_object of a permanent failure
181
+ obj = payload_object
182
+ obj.on_permanent_failure(error) if obj && obj.respond_to?(:on_permanent_failure)
183
+ rescue DeserializationError
184
+ # don't allow a failed deserialization to prevent destroying the job
185
+ end
186
+
187
+ # optionally destroy the object
188
+ destroy_self = true
189
+ if Delayed::Worker.on_max_failures
190
+ destroy_self = Delayed::Worker.on_max_failures.call(self, error)
191
+ end
192
+
193
+ if destroy_self
194
+ self.destroy
195
+ else
196
+ self.fail!
197
+ end
198
+ end
199
+
200
+ def payload_object
201
+ @payload_object ||= deserialize(self['handler'].untaint)
202
+ end
203
+
204
+ def name
205
+ @name ||= begin
206
+ payload = payload_object
207
+ if payload.respond_to?(:display_name)
208
+ payload.display_name
209
+ else
210
+ payload.class.name
211
+ end
212
+ end
213
+ end
214
+
215
+ def full_name
216
+ obj = payload_object rescue nil
217
+ if obj && obj.respond_to?(:full_name)
218
+ obj.full_name
219
+ else
220
+ name
221
+ end
222
+ end
223
+
224
+ def payload_object=(object)
225
+ @payload_object = object
226
+ self['handler'] = object.to_yaml
227
+ self['tag'] = if object.respond_to?(:tag)
228
+ object.tag
229
+ elsif object.is_a?(Module)
230
+ "#{object}.perform"
231
+ else
232
+ "#{object.class}#perform"
233
+ end
234
+ end
235
+
236
+ # Moved into its own method so that new_relic can trace it.
237
+ def invoke_job
238
+ Delayed::Worker.lifecycle.run_callbacks(:invoke_job, self) do
239
+ Delayed::Job.in_delayed_job = true
240
+ begin
241
+ payload_object.perform
242
+ ensure
243
+ Delayed::Job.in_delayed_job = false
244
+ ::ActiveRecord::Base.clear_active_connections! unless Rails.env.test?
245
+ end
246
+ end
247
+ end
248
+
249
+ def batch?
250
+ payload_object.is_a?(Delayed::Batch::PerformableBatch)
251
+ end
252
+
253
+ # Unlock this job (note: not saved to DB)
254
+ def unlock
255
+ self.locked_at = nil
256
+ self.locked_by = nil
257
+ end
258
+
259
+ def locked?
260
+ !!(self.locked_at || self.locked_by)
261
+ end
262
+
263
+ def reschedule_at
264
+ new_time = self.class.db_time_now + (attempts ** 4) + 5
265
+ begin
266
+ if payload_object.respond_to?(:reschedule_at)
267
+ new_time = payload_object.reschedule_at(
268
+ self.class.db_time_now, attempts)
269
+ end
270
+ rescue
271
+ # TODO: just swallow errors from reschedule_at ?
272
+ end
273
+ new_time
274
+ end
275
+
276
+ def hold!
277
+ self.locked_by = ON_HOLD_LOCKED_BY
278
+ self.locked_at = self.class.db_time_now
279
+ self.attempts = ON_HOLD_COUNT
280
+ self.save!
281
+ end
282
+
283
+ def unhold!
284
+ self.locked_by = nil
285
+ self.locked_at = nil
286
+ self.attempts = 0
287
+ self.run_at = [self.class.db_time_now, self.run_at].max
288
+ self.failed_at = nil
289
+ self.save!
290
+ end
291
+
292
+ def on_hold?
293
+ self.locked_by == 'on hold' && self.locked_at && self.attempts == ON_HOLD_COUNT
294
+ end
295
+
296
+ private
297
+
298
+ ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
299
+
300
+ def deserialize(source)
301
+ handler = nil
302
+ begin
303
+ handler = _yaml_deserialize(source)
304
+ rescue TypeError, ArgumentError
305
+ attempt_to_load_from_source(source)
306
+ handler = _yaml_deserialize(source)
307
+ end
308
+
309
+ return handler if handler.respond_to?(:perform)
310
+
311
+ raise DeserializationError,
312
+ 'Job failed to load: Unknown handler. Try to manually require the appropriate file.'
313
+ rescue TypeError, LoadError, NameError => e
314
+ raise DeserializationError,
315
+ "Job failed to load: #{e.message}. Try to manually require the required file."
316
+ end
317
+
318
+ def _yaml_deserialize(source)
319
+ YAML.respond_to?(:unsafe_load) ? YAML.unsafe_load(source) : YAML.load(source)
320
+ end
321
+
322
+ def attempt_to_load_from_source(source)
323
+ if md = ParseObjectFromYaml.match(source)
324
+ md[1].constantize
325
+ end
326
+ end
327
+
328
+ public
329
+ def initialize_defaults
330
+ self.queue ||= Delayed::Settings.queue
331
+ self.run_at ||= self.class.db_time_now
332
+ end
333
+ end
334
+ end
335
+ end