drewda_delayed_job 3.0.3
Sign up to get free protection for your applications and to get access to all the features.
- data/MIT-LICENSE +20 -0
- data/README.textile +314 -0
- data/contrib/delayed_job.monitrc +14 -0
- data/contrib/delayed_job_multiple.monitrc +23 -0
- data/lib/delayed/backend/base.rb +184 -0
- data/lib/delayed/backend/shared_spec.rb +595 -0
- data/lib/delayed/command.rb +108 -0
- data/lib/delayed/deserialization_error.rb +4 -0
- data/lib/delayed/lifecycle.rb +84 -0
- data/lib/delayed/message_sending.rb +54 -0
- data/lib/delayed/performable_mailer.rb +21 -0
- data/lib/delayed/performable_method.rb +37 -0
- data/lib/delayed/plugin.rb +15 -0
- data/lib/delayed/plugins/clear_locks.rb +15 -0
- data/lib/delayed/psych_ext.rb +132 -0
- data/lib/delayed/railtie.rb +16 -0
- data/lib/delayed/recipes.rb +50 -0
- data/lib/delayed/serialization/active_record.rb +19 -0
- data/lib/delayed/syck_ext.rb +34 -0
- data/lib/delayed/tasks.rb +11 -0
- data/lib/delayed/worker.rb +242 -0
- data/lib/delayed/yaml_ext.rb +10 -0
- data/lib/delayed_job.rb +21 -0
- data/lib/generators/delayed_job/delayed_job_generator.rb +11 -0
- data/lib/generators/delayed_job/templates/script +5 -0
- data/recipes/delayed_job.rb +1 -0
- data/spec/autoloaded/clazz.rb +7 -0
- data/spec/autoloaded/instance_clazz.rb +6 -0
- data/spec/autoloaded/instance_struct.rb +6 -0
- data/spec/autoloaded/struct.rb +7 -0
- data/spec/delayed/backend/test.rb +113 -0
- data/spec/delayed/serialization/test.rb +0 -0
- data/spec/fixtures/bad_alias.yml +1 -0
- data/spec/lifecycle_spec.rb +67 -0
- data/spec/message_sending_spec.rb +113 -0
- data/spec/performable_mailer_spec.rb +44 -0
- data/spec/performable_method_spec.rb +89 -0
- data/spec/sample_jobs.rb +75 -0
- data/spec/spec_helper.rb +53 -0
- data/spec/test_backend_spec.rb +13 -0
- data/spec/worker_spec.rb +19 -0
- data/spec/yaml_ext_spec.rb +41 -0
- metadata +214 -0
data/MIT-LICENSE
ADDED
@@ -0,0 +1,20 @@
|
|
1
|
+
Copyright (c) 2005 Tobias Luetke
|
2
|
+
|
3
|
+
Permission is hereby granted, free of charge, to any person obtaining
|
4
|
+
a copy of this software and associated documentation files (the
|
5
|
+
"Software"), to deal in the Software without restriction, including
|
6
|
+
without limitation the rights to use, copy, modify, merge, publish,
|
7
|
+
distribute, sublicense, and/or sell copies of the Software, and to
|
8
|
+
permit persons to whom the Software is furnished to do so, subject to
|
9
|
+
the following conditions:
|
10
|
+
|
11
|
+
The above copyright notice and this permission notice shall be
|
12
|
+
included in all copies or substantial portions of the Software.
|
13
|
+
|
14
|
+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
15
|
+
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
16
|
+
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOa AND
|
17
|
+
NONINFRINGEMENT. IN NO EVENT SaALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
18
|
+
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
19
|
+
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
20
|
+
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
data/README.textile
ADDED
@@ -0,0 +1,314 @@
|
|
1
|
+
The key difference in drewda_delayed_job is that jobs can be unique. All the relevant changes are in `lib/delayed/backend/base.rb`.
|
2
|
+
|
3
|
+
This should be used with the drewda_delayed_job_active_record gem.
|
4
|
+
|
5
|
+
Note that I'm assuming you already have your migration set up to create a table that includes `is_locked` and `unique_id` fields:
|
6
|
+
|
7
|
+
create_table "delayed_jobs", :force => true do |t|
|
8
|
+
t.integer "priority", :default => 0
|
9
|
+
t.integer "attempts", :default => 0
|
10
|
+
t.text "handler", :limit => 2147483647
|
11
|
+
t.string "last_error"
|
12
|
+
t.datetime "run_at"
|
13
|
+
t.datetime "locked_at"
|
14
|
+
t.datetime "failed_at"
|
15
|
+
t.string "locked_by"
|
16
|
+
t.datetime "created_at", :null => false
|
17
|
+
t.datetime "updated_at", :null => false
|
18
|
+
t.string "unique_id"
|
19
|
+
t.boolean "is_locked", :default => false
|
20
|
+
t.string "queue"
|
21
|
+
end
|
22
|
+
|
23
|
+
h1. Delayed::Job "!http://travis-ci.org/collectiveidea/delayed_job.png!":http://travis-ci.org/collectiveidea/delayed_job "!https://gemnasium.com/collectiveidea/delayed_job.png?travis!":https://gemnasium.com/collectiveidea/delayed_job
|
24
|
+
|
25
|
+
Delayed_job (or DJ) encapsulates the common pattern of asynchronously executing longer tasks in the background.
|
26
|
+
|
27
|
+
It is a direct extraction from Shopify where the job table is responsible for a multitude of core tasks. Amongst those tasks are:
|
28
|
+
|
29
|
+
* sending massive newsletters
|
30
|
+
* image resizing
|
31
|
+
* http downloads
|
32
|
+
* updating smart collections
|
33
|
+
* updating solr, our search server, after product changes
|
34
|
+
* batch imports
|
35
|
+
* spam checks
|
36
|
+
|
37
|
+
"Follow us on Twitter":https://twitter.com/delayedjob to get updates and notices about new releases.
|
38
|
+
|
39
|
+
h2. Installation
|
40
|
+
|
41
|
+
delayed_job 3.0.0 only supports Rails 3.0+. See the "2.0 branch":https://github.com/collectiveidea/delayed_job/tree/v2.0 for Rails 2.
|
42
|
+
|
43
|
+
delayed_job supports multiple backends for storing the job queue. "See the wiki for other backends":http://wiki.github.com/collectiveidea/delayed_job/backends.
|
44
|
+
|
45
|
+
If you plan to use delayed_job with Active Record, add @delayed_job_active_record@ to your @Gemfile@.
|
46
|
+
|
47
|
+
<pre>
|
48
|
+
gem 'delayed_job_active_record'
|
49
|
+
</pre>
|
50
|
+
|
51
|
+
If you plan to use delayed_job with Mongoid, add @delayed_job_mongoid@ to your @Gemfile@.
|
52
|
+
|
53
|
+
<pre>
|
54
|
+
gem 'delayed_job_mongoid'
|
55
|
+
</pre>
|
56
|
+
|
57
|
+
Run @bundle install@ to install the backend and delayed_job gems.
|
58
|
+
|
59
|
+
The Active Record backend requires a jobs table. You can create that table by running the following command:
|
60
|
+
|
61
|
+
<pre>
|
62
|
+
$ rails generate delayed_job:active_record
|
63
|
+
$ rake db:migrate
|
64
|
+
</pre>
|
65
|
+
|
66
|
+
h3. Upgrading from 2.x to 3.0.0 on Active Record
|
67
|
+
|
68
|
+
Delayed Job 3.0.0 introduces a new column to the delayed_jobs table.
|
69
|
+
|
70
|
+
If you're upgrading from Delayed Job 2.x, run the upgrade generator to create a migration to add the column.
|
71
|
+
|
72
|
+
<pre>
|
73
|
+
$ rails generate delayed_job:upgrade
|
74
|
+
$ rake db:migrate
|
75
|
+
</pre>
|
76
|
+
|
77
|
+
h2. Queuing Jobs
|
78
|
+
|
79
|
+
Call @.delay.method(params)@ on any object and it will be processed in the background.
|
80
|
+
|
81
|
+
<pre>
|
82
|
+
# without delayed_job
|
83
|
+
@user.activate!(@device)
|
84
|
+
|
85
|
+
# with delayed_job
|
86
|
+
@user.delay.activate!(@device)
|
87
|
+
</pre>
|
88
|
+
|
89
|
+
If a method should always be run in the background, you can call @#handle_asynchronously@ after the method declaration:
|
90
|
+
|
91
|
+
<pre>
|
92
|
+
class Device
|
93
|
+
def deliver
|
94
|
+
# long running method
|
95
|
+
end
|
96
|
+
handle_asynchronously :deliver
|
97
|
+
end
|
98
|
+
|
99
|
+
device = Device.new
|
100
|
+
device.deliver
|
101
|
+
</pre>
|
102
|
+
|
103
|
+
handle_asynchronously can take as options anything you can pass to delay. In addition, the values can be Proc objects allowing call time evaluation of the value. For some examples:
|
104
|
+
|
105
|
+
<pre>
|
106
|
+
class LongTasks
|
107
|
+
def send_mailer
|
108
|
+
# Some other code
|
109
|
+
end
|
110
|
+
handle_asynchronously :send_mailer, :priority => 20
|
111
|
+
|
112
|
+
def in_the_future
|
113
|
+
# Some other code
|
114
|
+
end
|
115
|
+
# 5.minutes.from_now will be evaluated when in_the_future is called
|
116
|
+
handle_asynchronously :in_the_future, :run_at => Proc.new { 5.minutes.from_now }
|
117
|
+
|
118
|
+
def self.when_to_run
|
119
|
+
2.hours.from_now
|
120
|
+
end
|
121
|
+
|
122
|
+
def call_a_class_method
|
123
|
+
# Some other code
|
124
|
+
end
|
125
|
+
handle_asynchronously :call_a_class_method, :run_at => Proc.new { when_to_run }
|
126
|
+
|
127
|
+
attr_reader :how_important
|
128
|
+
|
129
|
+
def call_an_instance_method
|
130
|
+
# Some other code
|
131
|
+
end
|
132
|
+
handle_asynchronously :call_an_instance_method, :priority => Proc.new {|i| i.how_important }
|
133
|
+
end
|
134
|
+
</pre>
|
135
|
+
|
136
|
+
h3. Rails 3 Mailers
|
137
|
+
|
138
|
+
Due to how mailers are implemented in Rails 3, we had to do a little work around to get delayed_job to work.
|
139
|
+
|
140
|
+
<pre>
|
141
|
+
# without delayed_job
|
142
|
+
Notifier.signup(@user).deliver
|
143
|
+
|
144
|
+
# with delayed_job
|
145
|
+
Notifier.delay.signup(@user)
|
146
|
+
</pre>
|
147
|
+
|
148
|
+
Remove the @.deliver@ method to make it work. It's not ideal, but it's the best we could do for now.
|
149
|
+
|
150
|
+
h3. Named Queues
|
151
|
+
|
152
|
+
DJ 3 introduces Resque-style named queues while still retaining DJ-style priority. The goal is to provide a system for grouping tasks to be worked by separate pools of workers, which may be scaled and controlled individually.
|
153
|
+
|
154
|
+
Jobs can be assigned to a queue by setting the @queue@ option:
|
155
|
+
|
156
|
+
<pre>object.delay(:queue => 'tracking').method
|
157
|
+
|
158
|
+
Delayed::Job.enqueue job, :queue => 'tracking'
|
159
|
+
|
160
|
+
handle_asynchronously :tweet_later, :queue => 'tweets'
|
161
|
+
</pre>
|
162
|
+
|
163
|
+
h2. Running Jobs
|
164
|
+
|
165
|
+
@script/delayed_job@ can be used to manage a background process which will start working off jobs.
|
166
|
+
|
167
|
+
To do so, add @gem "daemons"@ to your @Gemfile@ and make sure you've run `rails generate delayed_job`.
|
168
|
+
|
169
|
+
You can then do the following:
|
170
|
+
|
171
|
+
<pre>
|
172
|
+
$ RAILS_ENV=production script/delayed_job start
|
173
|
+
$ RAILS_ENV=production script/delayed_job stop
|
174
|
+
|
175
|
+
# Runs two workers in separate processes.
|
176
|
+
$ RAILS_ENV=production script/delayed_job -n 2 start
|
177
|
+
$ RAILS_ENV=production script/delayed_job stop
|
178
|
+
|
179
|
+
# Set the --queue or --queues option to work from a particular queue.
|
180
|
+
$ RAILS_ENV=production script/delayed_job --queue=tracking start
|
181
|
+
$ RAILS_ENV=production script/delayed_job --queues=mailers,tasks start
|
182
|
+
</pre>
|
183
|
+
|
184
|
+
Workers can be running on any computer, as long as they have access to the database and their clock is in sync. Keep in mind that each worker will check the database at least every 5 seconds.
|
185
|
+
|
186
|
+
You can also invoke @rake jobs:work@ which will start working off jobs. You can cancel the rake task with @CTRL-C@.
|
187
|
+
|
188
|
+
Work off queues by setting the @QUEUE@ or @QUEUES@ environment variable.
|
189
|
+
|
190
|
+
<pre>
|
191
|
+
QUEUE=tracking rake jobs:work
|
192
|
+
QUEUES=mailers,tasks rake jobs:work
|
193
|
+
</pre>
|
194
|
+
|
195
|
+
h2. Custom Jobs
|
196
|
+
|
197
|
+
Jobs are simple ruby objects with a method called perform. Any object which responds to perform can be stuffed into the jobs table. Job objects are serialized to yaml so that they can later be resurrected by the job runner.
|
198
|
+
|
199
|
+
<pre>
|
200
|
+
class NewsletterJob < Struct.new(:text, :emails)
|
201
|
+
def perform
|
202
|
+
emails.each { |e| NewsletterMailer.deliver_text_to_email(text, e) }
|
203
|
+
end
|
204
|
+
end
|
205
|
+
|
206
|
+
Delayed::Job.enqueue NewsletterJob.new('lorem ipsum...', Customers.find(:all).collect(&:email))
|
207
|
+
</pre>
|
208
|
+
|
209
|
+
h2. Hooks
|
210
|
+
|
211
|
+
You can define hooks on your job that will be called at different stages in the process:
|
212
|
+
|
213
|
+
<pre>
|
214
|
+
class ParanoidNewsletterJob < NewsletterJob
|
215
|
+
def enqueue(job)
|
216
|
+
record_stat 'newsletter_job/enqueue'
|
217
|
+
end
|
218
|
+
|
219
|
+
def perform
|
220
|
+
emails.each { |e| NewsletterMailer.deliver_text_to_email(text, e) }
|
221
|
+
end
|
222
|
+
|
223
|
+
def before(job)
|
224
|
+
record_stat 'newsletter_job/start'
|
225
|
+
end
|
226
|
+
|
227
|
+
def after(job)
|
228
|
+
record_stat 'newsletter_job/after'
|
229
|
+
end
|
230
|
+
|
231
|
+
def success(job)
|
232
|
+
record_stat 'newsletter_job/success'
|
233
|
+
end
|
234
|
+
|
235
|
+
def error(job, exception)
|
236
|
+
Airbrake.notify(exception)
|
237
|
+
end
|
238
|
+
|
239
|
+
def failure
|
240
|
+
page_sysadmin_in_the_middle_of_the_night
|
241
|
+
end
|
242
|
+
end
|
243
|
+
</pre>
|
244
|
+
|
245
|
+
h2. Gory Details
|
246
|
+
|
247
|
+
The library revolves around a delayed_jobs table which looks as follows:
|
248
|
+
|
249
|
+
<pre>
|
250
|
+
create_table :delayed_jobs, :force => true do |table|
|
251
|
+
table.integer :priority, :default => 0 # Allows some jobs to jump to the front of the queue
|
252
|
+
table.integer :attempts, :default => 0 # Provides for retries, but still fail eventually.
|
253
|
+
table.text :handler # YAML-encoded string of the object that will do work
|
254
|
+
table.text :last_error # reason for last failure (See Note below)
|
255
|
+
table.datetime :run_at # When to run. Could be Time.zone.now for immediately, or sometime in the future.
|
256
|
+
table.datetime :locked_at # Set when a client is working on this object
|
257
|
+
table.datetime :failed_at # Set when all retries have failed (actually, by default, the record is deleted instead)
|
258
|
+
table.string :locked_by # Who is working on this object (if locked)
|
259
|
+
table.string :queue # The name of the queue this job is in
|
260
|
+
table.timestamps
|
261
|
+
end
|
262
|
+
</pre>
|
263
|
+
|
264
|
+
On failure, the job is scheduled again in 5 seconds + N ** 4, where N is the number of retries.
|
265
|
+
|
266
|
+
The default Worker.max_attempts is 25. After this, the job either deleted (default), or left in the database with "failed_at" set.
|
267
|
+
With the default of 25 attempts, the last retry will be 20 days later, with the last interval being almost 100 hours.
|
268
|
+
|
269
|
+
The default Worker.max_run_time is 4.hours. If your job takes longer than that, another computer could pick it up. It's up to you to
|
270
|
+
make sure your job doesn't exceed this time. You should set this to the longest time you think the job could take.
|
271
|
+
|
272
|
+
By default, it will delete failed jobs (and it always deletes successful jobs). If you want to keep failed jobs, set
|
273
|
+
Delayed::Worker.destroy_failed_jobs = false. The failed jobs will be marked with non-null failed_at.
|
274
|
+
|
275
|
+
By default all jobs are scheduled with priority = 0, which is top priority. You can change this by setting Delayed::Worker.default_priority to something else. Lower numbers have higher priority.
|
276
|
+
|
277
|
+
The default behavior is to read 5 jobs from the queue when finding an available job. You can configure this by setting Delayed::Worker.read_ahead.
|
278
|
+
|
279
|
+
It is possible to disable delayed jobs for testing purposes. Set Delayed::Worker.delay_jobs = false to execute all jobs realtime.
|
280
|
+
|
281
|
+
Here is an example of changing job parameters in Rails:
|
282
|
+
|
283
|
+
<pre>
|
284
|
+
# config/initializers/delayed_job_config.rb
|
285
|
+
Delayed::Worker.destroy_failed_jobs = false
|
286
|
+
Delayed::Worker.sleep_delay = 60
|
287
|
+
Delayed::Worker.max_attempts = 3
|
288
|
+
Delayed::Worker.max_run_time = 5.minutes
|
289
|
+
Delayed::Worker.read_ahead = 10
|
290
|
+
Delayed::Worker.delay_jobs = !Rails.env.test?
|
291
|
+
</pre>
|
292
|
+
|
293
|
+
h3. Cleaning up
|
294
|
+
|
295
|
+
You can invoke @rake jobs:clear@ to delete all jobs in the queue.
|
296
|
+
|
297
|
+
h2. Mailing List
|
298
|
+
|
299
|
+
Join us on the "mailing list":http://groups.google.com/group/delayed_job
|
300
|
+
|
301
|
+
h2. How to contribute
|
302
|
+
|
303
|
+
If you find what looks like a bug:
|
304
|
+
|
305
|
+
# Search the "mailing list":http://groups.google.com/group/delayed_job to see if anyone else had the same issue.
|
306
|
+
# Check the "GitHub issue tracker":http://github.com/collectiveidea/delayed_job/issues/ to see if anyone else has reported issue.
|
307
|
+
# If you don't see anything, create an issue with information on how to reproduce it.
|
308
|
+
|
309
|
+
If you want to contribute an enhancement or a fix:
|
310
|
+
|
311
|
+
# Fork the project on github.
|
312
|
+
# Make your changes with tests.
|
313
|
+
# Commit the changes without making changes to the Rakefile or any other files that aren't related to your enhancement or fix
|
314
|
+
# Send a pull request.
|
@@ -0,0 +1,14 @@
|
|
1
|
+
# an example Monit configuration file for delayed_job
|
2
|
+
# See: http://stackoverflow.com/questions/1226302/how-to-monitor-delayedjob-with-monit/1285611
|
3
|
+
#
|
4
|
+
# To use:
|
5
|
+
# 1. copy to /var/www/apps/{app_name}/shared/delayed_job.monitrc
|
6
|
+
# 2. replace {app_name} as appropriate
|
7
|
+
# 3. add this to your /etc/monit/monitrc
|
8
|
+
#
|
9
|
+
# include /var/www/apps/{app_name}/shared/delayed_job.monitrc
|
10
|
+
|
11
|
+
check process delayed_job
|
12
|
+
with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.pid
|
13
|
+
start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start"
|
14
|
+
stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop"
|
@@ -0,0 +1,23 @@
|
|
1
|
+
# an example Monit configuration file for delayed_job running multiple processes
|
2
|
+
#
|
3
|
+
# To use:
|
4
|
+
# 1. copy to /var/www/apps/{app_name}/shared/delayed_job.monitrc
|
5
|
+
# 2. replace {app_name} as appropriate
|
6
|
+
# 3. add this to your /etc/monit/monitrc
|
7
|
+
#
|
8
|
+
# include /var/www/apps/{app_name}/shared/delayed_job.monitrc
|
9
|
+
|
10
|
+
check process delayed_job_0
|
11
|
+
with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.0.pid
|
12
|
+
start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start -i 0"
|
13
|
+
stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop -i 0"
|
14
|
+
|
15
|
+
check process delayed_job_1
|
16
|
+
with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.1.pid
|
17
|
+
start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start -i 1"
|
18
|
+
stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop -i 1"
|
19
|
+
|
20
|
+
check process delayed_job_2
|
21
|
+
with pidfile /var/www/apps/{app_name}/shared/pids/delayed_job.2.pid
|
22
|
+
start program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job start -i 2"
|
23
|
+
stop program = "/usr/bin/env RAILS_ENV=production /var/www/apps/{app_name}/current/script/delayed_job stop -i 2"
|
@@ -0,0 +1,184 @@
|
|
1
|
+
module Delayed
|
2
|
+
module Backend
|
3
|
+
module Base
|
4
|
+
def self.included(base)
|
5
|
+
base.extend ClassMethods
|
6
|
+
end
|
7
|
+
|
8
|
+
module ClassMethods
|
9
|
+
# Add a job to the queue
|
10
|
+
def enqueue(*args)
|
11
|
+
options = {
|
12
|
+
:priority => Delayed::Worker.default_priority
|
13
|
+
}.merge!(args.extract_options!)
|
14
|
+
|
15
|
+
options[:payload_object] ||= args.shift
|
16
|
+
|
17
|
+
if args.size > 0
|
18
|
+
warn "[DEPRECATION] Passing multiple arguments to `#enqueue` is deprecated. Pass a hash with :priority and :run_at."
|
19
|
+
options[:priority] = args.first || options[:priority]
|
20
|
+
options[:run_at] = args[1]
|
21
|
+
end
|
22
|
+
|
23
|
+
unless options[:payload_object].respond_to?(:perform)
|
24
|
+
raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
|
25
|
+
end
|
26
|
+
|
27
|
+
if Delayed::Worker.delay_jobs
|
28
|
+
self.new(options).tap do |job|
|
29
|
+
Delayed::Worker.lifecycle.run_callbacks(:enqueue, job) do
|
30
|
+
job.hook(:enqueue)
|
31
|
+
job.save
|
32
|
+
end
|
33
|
+
end
|
34
|
+
else
|
35
|
+
Delayed::Job.new(:payload_object => options[:payload_object]).tap do |job|
|
36
|
+
job.invoke_job
|
37
|
+
end
|
38
|
+
end
|
39
|
+
end
|
40
|
+
|
41
|
+
# Add a unique job to the queue
|
42
|
+
def enqueue_unique(*args)
|
43
|
+
options = {
|
44
|
+
:priority => Delayed::Worker.default_priority
|
45
|
+
}.merge!(args.extract_options!)
|
46
|
+
|
47
|
+
options[:payload_object] ||= args.shift
|
48
|
+
|
49
|
+
if args.size > 0
|
50
|
+
warn "[DEPRECATION] Passing multiple arguments to `#enqueue` is deprecated. Pass a hash with :priority and :run_at."
|
51
|
+
options[:unique_id] = args.first
|
52
|
+
options[:priority] = args[1] || options[:priority]
|
53
|
+
options[:run_at] = args[2]
|
54
|
+
end
|
55
|
+
|
56
|
+
unless options[:payload_object].respond_to?(:perform)
|
57
|
+
raise ArgumentError, 'Cannot enqueue items which do not respond to perform'
|
58
|
+
end
|
59
|
+
|
60
|
+
uncached do
|
61
|
+
if Job.where(:unique_id => options[:unique_id], :is_locked => false).length == 0
|
62
|
+
if Delayed::Worker.delay_jobs
|
63
|
+
self.new(options).tap do |job|
|
64
|
+
Delayed::Worker.lifecycle.run_callbacks(:enqueue, job) do
|
65
|
+
job.hook(:enqueue)
|
66
|
+
job.save
|
67
|
+
end
|
68
|
+
end
|
69
|
+
else
|
70
|
+
Delayed::Job.new(:payload_object => options[:payload_object]).tap do |job|
|
71
|
+
job.invoke_job
|
72
|
+
end
|
73
|
+
end
|
74
|
+
end
|
75
|
+
end
|
76
|
+
end
|
77
|
+
|
78
|
+
def reserve(worker, max_run_time = Worker.max_run_time)
|
79
|
+
# We get up to 5 jobs from the db. In case we cannot get exclusive access to a job we try the next.
|
80
|
+
# this leads to a more even distribution of jobs across the worker processes
|
81
|
+
find_available(worker.name, worker.read_ahead, max_run_time).detect do |job|
|
82
|
+
job.lock_exclusively!(max_run_time, worker.name)
|
83
|
+
end
|
84
|
+
end
|
85
|
+
|
86
|
+
# Hook method that is called before a new worker is forked
|
87
|
+
def before_fork
|
88
|
+
end
|
89
|
+
|
90
|
+
# Hook method that is called after a new worker is forked
|
91
|
+
def after_fork
|
92
|
+
end
|
93
|
+
|
94
|
+
def work_off(num = 100)
|
95
|
+
warn "[DEPRECATION] `Delayed::Job.work_off` is deprecated. Use `Delayed::Worker.new.work_off instead."
|
96
|
+
Delayed::Worker.new.work_off(num)
|
97
|
+
end
|
98
|
+
end
|
99
|
+
|
100
|
+
def failed?
|
101
|
+
!!failed_at
|
102
|
+
end
|
103
|
+
alias_method :failed, :failed?
|
104
|
+
|
105
|
+
ParseObjectFromYaml = /\!ruby\/\w+\:([^\s]+)/
|
106
|
+
|
107
|
+
def name
|
108
|
+
@name ||= payload_object.respond_to?(:display_name) ?
|
109
|
+
payload_object.display_name :
|
110
|
+
payload_object.class.name
|
111
|
+
rescue DeserializationError
|
112
|
+
ParseObjectFromYaml.match(handler)[1]
|
113
|
+
end
|
114
|
+
|
115
|
+
def payload_object=(object)
|
116
|
+
@payload_object = object
|
117
|
+
self.handler = object.to_yaml
|
118
|
+
end
|
119
|
+
|
120
|
+
def payload_object
|
121
|
+
@payload_object ||= YAML.load(self.handler)
|
122
|
+
rescue TypeError, LoadError, NameError, ArgumentError => e
|
123
|
+
raise DeserializationError,
|
124
|
+
"Job failed to load: #{e.message}. Handler: #{handler.inspect}"
|
125
|
+
end
|
126
|
+
|
127
|
+
def invoke_job
|
128
|
+
Delayed::Worker.lifecycle.run_callbacks(:invoke_job, self) do
|
129
|
+
begin
|
130
|
+
hook :before
|
131
|
+
payload_object.perform
|
132
|
+
hook :success
|
133
|
+
rescue Exception => e
|
134
|
+
hook :error, e
|
135
|
+
raise e
|
136
|
+
ensure
|
137
|
+
hook :after
|
138
|
+
end
|
139
|
+
end
|
140
|
+
end
|
141
|
+
|
142
|
+
# Unlock this job (note: not saved to DB)
|
143
|
+
def unlock
|
144
|
+
self.locked_at = nil
|
145
|
+
self.locked_by = nil
|
146
|
+
self.is_locked = false
|
147
|
+
end
|
148
|
+
|
149
|
+
def hook(name, *args)
|
150
|
+
if payload_object.respond_to?(name)
|
151
|
+
method = payload_object.method(name)
|
152
|
+
method.arity == 0 ? method.call : method.call(self, *args)
|
153
|
+
end
|
154
|
+
rescue DeserializationError
|
155
|
+
# do nothing
|
156
|
+
end
|
157
|
+
|
158
|
+
def reschedule_at
|
159
|
+
payload_object.respond_to?(:reschedule_at) ?
|
160
|
+
payload_object.reschedule_at(self.class.db_time_now, attempts) :
|
161
|
+
self.class.db_time_now + (attempts ** 4) + 5
|
162
|
+
end
|
163
|
+
|
164
|
+
def max_attempts
|
165
|
+
payload_object.max_attempts if payload_object.respond_to?(:max_attempts)
|
166
|
+
end
|
167
|
+
|
168
|
+
def fail!
|
169
|
+
update_attributes(:failed_at => self.class.db_time_now)
|
170
|
+
end
|
171
|
+
|
172
|
+
protected
|
173
|
+
|
174
|
+
def set_default_run_at
|
175
|
+
self.run_at ||= self.class.db_time_now
|
176
|
+
end
|
177
|
+
|
178
|
+
# Call during reload operation to clear out internal state
|
179
|
+
def reset
|
180
|
+
@payload_object = nil
|
181
|
+
end
|
182
|
+
end
|
183
|
+
end
|
184
|
+
end
|