inst-jobs 0.11.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +7 -0
- data/bin/inst_job +4 -0
- data/db/migrate/20101216224513_create_delayed_jobs.rb +40 -0
- data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +14 -0
- data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +13 -0
- data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +14 -0
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +26 -0
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +40 -0
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +52 -0
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +31 -0
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +15 -0
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +80 -0
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +15 -0
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +15 -0
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +13 -0
- data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +13 -0
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +13 -0
- data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +13 -0
- data/db/migrate/20140512213941_add_source_to_jobs.rb +15 -0
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +70 -0
- data/db/migrate/20151123210429_add_expires_at_to_jobs.rb +15 -0
- data/db/migrate/20151210162949_improve_max_concurrent.rb +50 -0
- data/lib/delayed/backend/active_record.rb +340 -0
- data/lib/delayed/backend/base.rb +335 -0
- data/lib/delayed/backend/redis/bulk_update.lua +50 -0
- data/lib/delayed/backend/redis/destroy_job.lua +2 -0
- data/lib/delayed/backend/redis/enqueue.lua +29 -0
- data/lib/delayed/backend/redis/fail_job.lua +5 -0
- data/lib/delayed/backend/redis/find_available.lua +3 -0
- data/lib/delayed/backend/redis/functions.rb +57 -0
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +17 -0
- data/lib/delayed/backend/redis/includes/jobs_common.lua +203 -0
- data/lib/delayed/backend/redis/job.rb +497 -0
- data/lib/delayed/backend/redis/set_running.lua +5 -0
- data/lib/delayed/backend/redis/tickle_strand.lua +2 -0
- data/lib/delayed/batch.rb +56 -0
- data/lib/delayed/cli.rb +101 -0
- data/lib/delayed/daemon.rb +103 -0
- data/lib/delayed/engine.rb +4 -0
- data/lib/delayed/job_tracking.rb +31 -0
- data/lib/delayed/lifecycle.rb +90 -0
- data/lib/delayed/log_tailer.rb +22 -0
- data/lib/delayed/message_sending.rb +134 -0
- data/lib/delayed/performable_method.rb +52 -0
- data/lib/delayed/periodic.rb +85 -0
- data/lib/delayed/plugin.rb +22 -0
- data/lib/delayed/pool.rb +161 -0
- data/lib/delayed/server/helpers.rb +28 -0
- data/lib/delayed/server/public/css/app.css +12 -0
- data/lib/delayed/server/public/js/app.js +132 -0
- data/lib/delayed/server/views/index.erb +90 -0
- data/lib/delayed/server/views/layout.erb +47 -0
- data/lib/delayed/server.rb +120 -0
- data/lib/delayed/settings.rb +90 -0
- data/lib/delayed/testing.rb +32 -0
- data/lib/delayed/version.rb +3 -0
- data/lib/delayed/work_queue/in_process.rb +13 -0
- data/lib/delayed/work_queue/parent_process.rb +180 -0
- data/lib/delayed/worker.rb +234 -0
- data/lib/delayed/yaml_extensions.rb +109 -0
- data/lib/delayed_job.rb +46 -0
- data/lib/inst-jobs.rb +1 -0
- data/spec/active_record_job_spec.rb +246 -0
- data/spec/delayed/cli_spec.rb +23 -0
- data/spec/delayed/daemon_spec.rb +35 -0
- data/spec/delayed/server_spec.rb +63 -0
- data/spec/delayed/settings_spec.rb +32 -0
- data/spec/delayed/work_queue/in_process_spec.rb +31 -0
- data/spec/delayed/work_queue/parent_process_spec.rb +159 -0
- data/spec/delayed/worker_spec.rb +16 -0
- data/spec/gemfiles/32.gemfile +6 -0
- data/spec/gemfiles/40.gemfile +5 -0
- data/spec/gemfiles/41.gemfile +5 -0
- data/spec/gemfiles/42.gemfile +5 -0
- data/spec/migrate/20140924140513_add_story_table.rb +7 -0
- data/spec/redis_job_spec.rb +140 -0
- data/spec/sample_jobs.rb +28 -0
- data/spec/shared/delayed_batch.rb +85 -0
- data/spec/shared/delayed_method.rb +419 -0
- data/spec/shared/performable_method.rb +66 -0
- data/spec/shared/shared_backend.rb +819 -0
- data/spec/shared/testing.rb +48 -0
- data/spec/shared/worker.rb +378 -0
- data/spec/shared_jobs_specs.rb +15 -0
- data/spec/spec_helper.rb +97 -0
- metadata +390 -0
@@ -0,0 +1,497 @@
|
|
1
|
+
# This can't currently be made compatible with redis cluster, because the Lua functions
|
2
|
+
# access keys that aren't in their keys argument list (since they pop jobs off
|
3
|
+
# a queue and then update the job with that id).
|
4
|
+
|
5
|
+
# still TODO:
|
6
|
+
# * a consequence of our ignore-redis-failures code is that if redis is unavailable, creating delayed jobs silently fails, which is probably not what we want
|
7
|
+
# * need a way to migrate between jobs backends
|
8
|
+
# * we need some auditors:
|
9
|
+
# * fail jobs in running_jobs if they've timed out
|
10
|
+
# * have pools audit their workers and immediately fail jobs locked by dead workers (make sure this handles the restart case where two pools are running)
|
11
|
+
# * have a master auditor that fails jobs if a whole pool dies
|
12
|
+
# * audit strands ocasionally, look for any stuck strands where the strand queue isn't empty but there's no strand job running or queued
|
13
|
+
module Delayed::Backend::Redis
|
14
|
+
require 'delayed/backend/redis/functions'
|
15
|
+
|
16
|
+
class Job
|
17
|
+
extend ActiveModel::Callbacks
|
18
|
+
define_model_callbacks :create, :save
|
19
|
+
include ActiveModel::Dirty
|
20
|
+
include Delayed::Backend::Base
|
21
|
+
# This redis instance needs to be set by the application during jobs configuration
|
22
|
+
cattr_accessor :redis
|
23
|
+
|
24
|
+
# An overview of where and when things are stored in redis:
|
25
|
+
#
|
26
|
+
# Jobs are given a UUID for an id, rather than an incrementing integer id.
|
27
|
+
# The job attributes are then stored in a redis hash at job/<id>. Attribute
|
28
|
+
# values are generally stored as their json representation, except for
|
29
|
+
# timestamps, which as stored as floating point utc-time-since-unix-epoch
|
30
|
+
# values, so that we can compare timestamps in Lua without a date parser.
|
31
|
+
#
|
32
|
+
# Jobs that are schedule to run immediately (in the present/past) are
|
33
|
+
# inserted into the queue named queue/<queue_name>. The queue is a sorted
|
34
|
+
# set, with the value being the job id and the weight being a floating point
|
35
|
+
# value, <priority>.<run_at>. This formatting is key to efficient
|
36
|
+
# querying of the next job to run.
|
37
|
+
#
|
38
|
+
# Jobs that are scheduled to run in the future are not inserted into the
|
39
|
+
# queue, but rather a future queue named queue/<queue_name>/future. This
|
40
|
+
# queue is also a sorted set, with the value being the job id, but the weight
|
41
|
+
# is just the <run_at> value.
|
42
|
+
#
|
43
|
+
# If the job is on a strand, the flow is different. First, it's inserted into
|
44
|
+
# a list named strand/<strand>. When strand jobs are inserted into the
|
45
|
+
# current jobs queue, we check if they're next to run in the strand. If not,
|
46
|
+
# we give them a special priority that is greater than MAX_PRIORITY, so that
|
47
|
+
# they won't run. When a strand job is finished, failed or deleted,
|
48
|
+
# "tickle_strand" is called, which removes that job from the list and if that
|
49
|
+
# job was at the front of the list, changes the priority on the next job so
|
50
|
+
# that it's eligible to run.
|
51
|
+
#
|
52
|
+
# For singletons, the flow is the same as for other strand jobs, except that
|
53
|
+
# the job is thrown out if there are already any non-running jobs in the
|
54
|
+
# strand list.
|
55
|
+
#
|
56
|
+
# If a job fails, it's removed from the normal queues and inserted into the
|
57
|
+
# failed_jobs sorted set, with job id as the value and failure time as the
|
58
|
+
# key. The hash of job attributes is also renamed from job/<id> to
|
59
|
+
# failed_job/<id> -- use Delayed::Job::Failed to query those jobs, same as
|
60
|
+
# with AR jobs.
|
61
|
+
#
|
62
|
+
# We also insert into some other data structures for admin functionality.
|
63
|
+
# tag_counts/current and tag_counts/all are sorted sets storing the count of
|
64
|
+
# jobs for each tag. tag/<tag> is a set of existing job ids that have that tag.
|
65
|
+
#
|
66
|
+
# Most all of this happens in Lua functions, for atomicity. See the other
|
67
|
+
# files in this directory -- functions.rb is a wrapper to call the lua
|
68
|
+
# functions, and the individual functions are defined in .lua files in this
|
69
|
+
# directory.
|
70
|
+
|
71
|
+
# these key mappings are duplicated in the redis lua code, in include.lua
|
72
|
+
module Keys
|
73
|
+
RUNNING_JOBS = "running_jobs"
|
74
|
+
FAILED_JOBS = "failed_jobs"
|
75
|
+
JOB = proc { |id| "job/#{id}" }
|
76
|
+
FAILED_JOB = proc { |id| "failed_job/#{id}" }
|
77
|
+
QUEUE = proc { |name| "queue/#{name}" }
|
78
|
+
FUTURE_QUEUE = proc { |name| "#{QUEUE[name]}/future" }
|
79
|
+
STRAND = proc { |strand| strand ? "strand/#{strand}" : nil }
|
80
|
+
TAG_COUNTS = proc { |flavor| "tag_counts/#{flavor}" }
|
81
|
+
TAG = proc { |tag| "tag/#{tag}" }
|
82
|
+
end
|
83
|
+
|
84
|
+
WAITING_STRAND_JOB_PRIORITY = 2000000
|
85
|
+
if WAITING_STRAND_JOB_PRIORITY <= Delayed::MAX_PRIORITY
|
86
|
+
# if you change this, note that the value is duplicated in include.lua
|
87
|
+
raise("Delayed::MAX_PRIORITY must be less than #{WAITING_STRAND_JOB_PRIORITY}")
|
88
|
+
end
|
89
|
+
|
90
|
+
COLUMNS = []
|
91
|
+
|
92
|
+
# We store time attributes in redis as floats so we don't have to do
|
93
|
+
# timestamp parsing in lua.
|
94
|
+
TIMESTAMP_COLUMNS = []
|
95
|
+
INTEGER_COLUMNS = []
|
96
|
+
|
97
|
+
def self.column(name, type)
|
98
|
+
COLUMNS << name
|
99
|
+
|
100
|
+
if type == :timestamp
|
101
|
+
TIMESTAMP_COLUMNS << name
|
102
|
+
elsif type == :integer
|
103
|
+
INTEGER_COLUMNS << name
|
104
|
+
end
|
105
|
+
|
106
|
+
attr_reader(name)
|
107
|
+
define_attribute_methods([name])
|
108
|
+
# Custom attr_writer that updates the dirty status.
|
109
|
+
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
|
110
|
+
def #{name}=(new_value)
|
111
|
+
#{name}_will_change! unless new_value == self.#{name}
|
112
|
+
@#{name} = new_value
|
113
|
+
end
|
114
|
+
EOS
|
115
|
+
end
|
116
|
+
|
117
|
+
column(:id, :string)
|
118
|
+
column(:priority, :integer)
|
119
|
+
column(:attempts, :integer)
|
120
|
+
column(:handler, :string)
|
121
|
+
column(:last_error, :string)
|
122
|
+
column(:queue, :string)
|
123
|
+
column(:run_at, :timestamp)
|
124
|
+
column(:locked_at, :timestamp)
|
125
|
+
column(:failed_at, :timestamp)
|
126
|
+
column(:locked_by, :string)
|
127
|
+
column(:created_at, :timestamp)
|
128
|
+
column(:updated_at, :timestamp)
|
129
|
+
column(:tag, :string)
|
130
|
+
column(:max_attempts, :integer)
|
131
|
+
column(:strand, :string)
|
132
|
+
column(:source, :string)
|
133
|
+
column(:expires_at, :timestamp)
|
134
|
+
|
135
|
+
def initialize(attrs = {})
|
136
|
+
attrs.each { |k, v| self.send("#{k}=", v) }
|
137
|
+
self.priority ||= 0
|
138
|
+
self.attempts ||= 0
|
139
|
+
@new_record = true
|
140
|
+
end
|
141
|
+
|
142
|
+
def self.instantiate(attrs)
|
143
|
+
result = new(attrs)
|
144
|
+
result.instance_variable_set(:@new_record, false)
|
145
|
+
result
|
146
|
+
end
|
147
|
+
|
148
|
+
def self.create(attrs = {})
|
149
|
+
result = new(attrs)
|
150
|
+
result.save
|
151
|
+
result
|
152
|
+
end
|
153
|
+
|
154
|
+
def self.create!(attrs = {})
|
155
|
+
result = new(attrs)
|
156
|
+
result.save!
|
157
|
+
result
|
158
|
+
end
|
159
|
+
|
160
|
+
def [](key)
|
161
|
+
send(key)
|
162
|
+
end
|
163
|
+
|
164
|
+
def []=(key, value)
|
165
|
+
send("#{key}=", value)
|
166
|
+
end
|
167
|
+
|
168
|
+
def self.find(ids)
|
169
|
+
if Array === ids
|
170
|
+
find_some(ids, {})
|
171
|
+
else
|
172
|
+
find_one(ids, {})
|
173
|
+
end
|
174
|
+
end
|
175
|
+
|
176
|
+
def new_record?
|
177
|
+
!!@new_record
|
178
|
+
end
|
179
|
+
|
180
|
+
def destroyed?
|
181
|
+
!!@destroyed
|
182
|
+
end
|
183
|
+
|
184
|
+
def ==(other)
|
185
|
+
other.is_a?(self.class) && id == other.id
|
186
|
+
end
|
187
|
+
|
188
|
+
def hash
|
189
|
+
id.hash
|
190
|
+
end
|
191
|
+
|
192
|
+
def self.reconnect!
|
193
|
+
# redis cluster responds to reconnect directly,
|
194
|
+
# but individual redis needs it to be called on client
|
195
|
+
redis.respond_to?(:reconnect) ?
|
196
|
+
redis.reconnect :
|
197
|
+
redis.client.reconnect
|
198
|
+
end
|
199
|
+
|
200
|
+
def self.functions
|
201
|
+
@@functions ||= Delayed::Backend::Redis::Functions.new(redis)
|
202
|
+
end
|
203
|
+
|
204
|
+
def self.find_one(id, options)
|
205
|
+
job = self.get_with_ids([id]).first
|
206
|
+
job || raise(ActiveRecord::RecordNotFound, "Couldn't find Job with ID=#{id}")
|
207
|
+
end
|
208
|
+
|
209
|
+
def self.find_some(ids, options)
|
210
|
+
self.get_with_ids(ids).compact
|
211
|
+
end
|
212
|
+
|
213
|
+
def self.get_with_ids(ids)
|
214
|
+
ids.map { |id| self.instantiate_from_attrs(redis.hgetall(key_for_job_id(id))) }
|
215
|
+
end
|
216
|
+
|
217
|
+
def self.key_for_job_id(job_id)
|
218
|
+
Keys::JOB[job_id]
|
219
|
+
end
|
220
|
+
|
221
|
+
def self.get_and_lock_next_available(worker_name,
|
222
|
+
queue = Delayed::Settings.queue,
|
223
|
+
min_priority = Delayed::MIN_PRIORITY,
|
224
|
+
max_priority = Delayed::MAX_PRIORITY)
|
225
|
+
|
226
|
+
check_queue(queue)
|
227
|
+
check_priorities(min_priority, max_priority)
|
228
|
+
|
229
|
+
# as an optimization this lua function returns the hash of job attributes,
|
230
|
+
# rather than just a job id, saving a round trip
|
231
|
+
job_attrs = functions.get_and_lock_next_available(worker_name, queue, min_priority, max_priority, db_time_now)
|
232
|
+
instantiate_from_attrs(job_attrs) # will return nil if the attrs are blank
|
233
|
+
end
|
234
|
+
|
235
|
+
def self.find_available(limit,
|
236
|
+
queue = Delayed::Settings.queue,
|
237
|
+
min_priority = Delayed::MIN_PRIORITY,
|
238
|
+
max_priority = Delayed::MAX_PRIORITY)
|
239
|
+
|
240
|
+
check_queue(queue)
|
241
|
+
check_priorities(min_priority, max_priority)
|
242
|
+
|
243
|
+
self.find(functions.find_available(queue, limit, 0, min_priority, max_priority, db_time_now))
|
244
|
+
end
|
245
|
+
|
246
|
+
# get a list of jobs of the given flavor in the given queue
|
247
|
+
# flavor is :current, :future, :failed, :strand or :tag
|
248
|
+
# depending on the flavor, query has a different meaning:
|
249
|
+
# for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
|
250
|
+
# for :strand it's the strand name
|
251
|
+
# for :tag it's the tag name
|
252
|
+
# for :failed it's ignored
|
253
|
+
def self.list_jobs(flavor,
|
254
|
+
limit,
|
255
|
+
offset = 0,
|
256
|
+
query = nil)
|
257
|
+
case flavor.to_s
|
258
|
+
when 'current'
|
259
|
+
query ||= Delayed::Settings.queue
|
260
|
+
check_queue(query)
|
261
|
+
self.find(functions.find_available(query, limit, offset, 0, "+inf", db_time_now))
|
262
|
+
when 'future'
|
263
|
+
query ||= Delayed::Settings.queue
|
264
|
+
check_queue(query)
|
265
|
+
self.find(redis.zrangebyscore(Keys::FUTURE_QUEUE[query], 0, "+inf", :limit => [offset, limit]))
|
266
|
+
when 'failed'
|
267
|
+
Failed.find(redis.zrevrangebyscore(Keys::FAILED_JOBS, "+inf", 0, :limit => [offset, limit]))
|
268
|
+
when 'strand'
|
269
|
+
self.find(redis.lrange(Keys::STRAND[query], offset, offset + limit - 1))
|
270
|
+
when 'tag'
|
271
|
+
# This is optimized for writing, since list_jobs(:tag) will only ever happen in the admin UI
|
272
|
+
ids = redis.smembers(Keys::TAG[query])
|
273
|
+
self.find(ids[offset, limit])
|
274
|
+
else
|
275
|
+
raise ArgumentError, "invalid flavor: #{flavor.inspect}"
|
276
|
+
end
|
277
|
+
end
|
278
|
+
|
279
|
+
# get the total job count for the given flavor
|
280
|
+
# flavor is :current, :future or :failed
|
281
|
+
# for the :failed flavor, queue is currently ignored
|
282
|
+
def self.jobs_count(flavor,
|
283
|
+
queue = Delayed::Settings.queue)
|
284
|
+
case flavor.to_s
|
285
|
+
when 'current'
|
286
|
+
check_queue(queue)
|
287
|
+
redis.zcard(Keys::QUEUE[queue])
|
288
|
+
when 'future'
|
289
|
+
check_queue(queue)
|
290
|
+
redis.zcard(Keys::FUTURE_QUEUE[queue])
|
291
|
+
when 'failed'
|
292
|
+
redis.zcard(Keys::FAILED_JOBS)
|
293
|
+
else
|
294
|
+
raise ArgumentError, "invalid flavor: #{flavor.inspect}"
|
295
|
+
end
|
296
|
+
end
|
297
|
+
|
298
|
+
def self.strand_size(strand)
|
299
|
+
redis.llen(Keys::STRAND[strand])
|
300
|
+
end
|
301
|
+
|
302
|
+
def self.running_jobs()
|
303
|
+
self.find(redis.zrangebyscore(Keys::RUNNING_JOBS, 0, "+inf"))
|
304
|
+
end
|
305
|
+
|
306
|
+
def self.clear_locks!(worker_name)
|
307
|
+
self.running_jobs.each do |job|
|
308
|
+
# TODO: mark the job as failed one attempt
|
309
|
+
job.unlock! if job.locked_by == worker_name
|
310
|
+
end
|
311
|
+
nil
|
312
|
+
end
|
313
|
+
|
314
|
+
# returns a list of hashes { :tag => tag_name, :count => current_count }
|
315
|
+
# in descending count order
|
316
|
+
# flavor is :current or :all
|
317
|
+
def self.tag_counts(flavor,
|
318
|
+
limit,
|
319
|
+
offset = 0)
|
320
|
+
raise(ArgumentError, "invalid flavor: #{flavor.inspect}") unless %w(current all).include?(flavor.to_s)
|
321
|
+
key = Keys::TAG_COUNTS[flavor]
|
322
|
+
redis.zrevrangebyscore(key, '+inf', 1, :limit => [offset, limit], :withscores => true).map { |tag, count| { :tag => tag, :count => count } }
|
323
|
+
end
|
324
|
+
|
325
|
+
# perform a bulk update of a set of jobs
|
326
|
+
# action is :hold, :unhold, or :destroy
|
327
|
+
# to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
|
328
|
+
# or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
|
329
|
+
#
|
330
|
+
# see the list_jobs action for the list of available flavors and the meaning
|
331
|
+
# of opts[:query] for each
|
332
|
+
def self.bulk_update(action, opts)
|
333
|
+
if %w(current future).include?(opts[:flavor].to_s)
|
334
|
+
opts[:query] ||= Delayed::Settings.queue
|
335
|
+
end
|
336
|
+
functions.bulk_update(action, opts[:ids], opts[:flavor], opts[:query], db_time_now)
|
337
|
+
end
|
338
|
+
|
339
|
+
def self.create_singleton(options)
|
340
|
+
self.create!(options.merge(:singleton => true))
|
341
|
+
end
|
342
|
+
|
343
|
+
# not saved, just used as a marker when creating
|
344
|
+
attr_accessor :singleton
|
345
|
+
|
346
|
+
def lock_in_redis!(worker_name)
|
347
|
+
self.locked_at = self.class.db_time_now
|
348
|
+
self.locked_by = worker_name
|
349
|
+
save
|
350
|
+
end
|
351
|
+
|
352
|
+
def unlock!
|
353
|
+
self.locked_at = nil
|
354
|
+
self.locked_by = nil
|
355
|
+
save!
|
356
|
+
end
|
357
|
+
|
358
|
+
def save(*a)
|
359
|
+
return false if destroyed?
|
360
|
+
result = run_callbacks(:save) do
|
361
|
+
if new_record?
|
362
|
+
run_callbacks(:create) { create }
|
363
|
+
else
|
364
|
+
update
|
365
|
+
end
|
366
|
+
end
|
367
|
+
changes_applied
|
368
|
+
result
|
369
|
+
end
|
370
|
+
|
371
|
+
if Rails.version < "4.1"
|
372
|
+
def changes_applied
|
373
|
+
@previously_changed = changes
|
374
|
+
@changed_attributes.clear
|
375
|
+
end
|
376
|
+
end
|
377
|
+
|
378
|
+
def save!(*a)
|
379
|
+
save(*a) || raise(RecordNotSaved)
|
380
|
+
end
|
381
|
+
|
382
|
+
def destroy
|
383
|
+
self.class.functions.destroy_job(id, self.class.db_time_now)
|
384
|
+
@destroyed = true
|
385
|
+
freeze
|
386
|
+
end
|
387
|
+
|
388
|
+
# take this job off the strand, and queue up the next strand job if this job
|
389
|
+
# was at the front
|
390
|
+
def tickle_strand
|
391
|
+
if strand.present?
|
392
|
+
self.class.functions.tickle_strand(id, strand, self.class.db_time_now)
|
393
|
+
end
|
394
|
+
end
|
395
|
+
|
396
|
+
def create_and_lock!(worker_name)
|
397
|
+
raise "job already exists" unless new_record?
|
398
|
+
lock_in_redis!(worker_name)
|
399
|
+
end
|
400
|
+
|
401
|
+
def fail!
|
402
|
+
self.failed_at = self.class.db_time_now
|
403
|
+
save!
|
404
|
+
redis.rename Keys::JOB[id], Keys::FAILED_JOB[id]
|
405
|
+
tickle_strand
|
406
|
+
self
|
407
|
+
end
|
408
|
+
|
409
|
+
protected
|
410
|
+
|
411
|
+
def update_queues
|
412
|
+
if failed_at
|
413
|
+
self.class.functions.fail_job(id)
|
414
|
+
elsif locked_at
|
415
|
+
self.class.functions.set_running(id)
|
416
|
+
elsif singleton
|
417
|
+
job_id = self.class.functions.create_singleton(id, queue, strand, self.class.db_time_now)
|
418
|
+
# if create_singleton returns a different job id, that means this job got
|
419
|
+
# deleted because there was already that other job on the strand. so
|
420
|
+
# replace this job with the other for returning.
|
421
|
+
if job_id != self.id
|
422
|
+
singleton = self.class.find(job_id)
|
423
|
+
singleton.run_at = [singleton.run_at, run_at].min
|
424
|
+
singleton.save! if singleton.changed?
|
425
|
+
COLUMNS.each { |c| send("#{c}=", singleton.send(c)) }
|
426
|
+
end
|
427
|
+
else
|
428
|
+
self.class.functions.enqueue(id, queue, strand, self.class.db_time_now)
|
429
|
+
end
|
430
|
+
end
|
431
|
+
|
432
|
+
def create
|
433
|
+
self.id ||= SecureRandom.hex(16)
|
434
|
+
self.created_at = self.updated_at = Time.now.utc
|
435
|
+
save_job_to_redis
|
436
|
+
update_queues
|
437
|
+
|
438
|
+
@new_record = false
|
439
|
+
self.id
|
440
|
+
end
|
441
|
+
|
442
|
+
def update
|
443
|
+
self.updated_at = Time.now.utc
|
444
|
+
save_job_to_redis
|
445
|
+
update_queues
|
446
|
+
true
|
447
|
+
end
|
448
|
+
|
449
|
+
def queue_score
|
450
|
+
"#{priority}.#{run_at.to_i}".to_f
|
451
|
+
end
|
452
|
+
|
453
|
+
def save_job_to_redis
|
454
|
+
to_delete = []
|
455
|
+
attrs = {}
|
456
|
+
COLUMNS.each do |k|
|
457
|
+
v = send(k)
|
458
|
+
if v.nil?
|
459
|
+
to_delete << k if !new_record? && changed.include?(k.to_s)
|
460
|
+
elsif v.is_a?(ActiveSupport::TimeWithZone) || v.is_a?(Time)
|
461
|
+
attrs[k] = v.utc.to_f
|
462
|
+
else
|
463
|
+
attrs[k] = v.as_json
|
464
|
+
end
|
465
|
+
end
|
466
|
+
key = Keys::JOB[id]
|
467
|
+
redis.mapped_hmset(key, attrs)
|
468
|
+
redis.hdel(key, to_delete) unless to_delete.empty?
|
469
|
+
end
|
470
|
+
|
471
|
+
def self.instantiate_from_attrs(redis_attrs)
|
472
|
+
if redis_attrs['id'].present?
|
473
|
+
attrs = redis_attrs.with_indifferent_access
|
474
|
+
TIMESTAMP_COLUMNS.each { |k| attrs[k] = Time.zone.at(attrs[k].to_f) if attrs[k] }
|
475
|
+
INTEGER_COLUMNS.each { |k| attrs[k] = attrs[k].to_i if attrs[k] }
|
476
|
+
instantiate(attrs)
|
477
|
+
else
|
478
|
+
nil
|
479
|
+
end
|
480
|
+
end
|
481
|
+
|
482
|
+
def global_id
|
483
|
+
id
|
484
|
+
end
|
485
|
+
|
486
|
+
class Failed < Job
|
487
|
+
include Delayed::Backend::Base
|
488
|
+
def self.key_for_job_id(job_id)
|
489
|
+
Keys::FAILED_JOB[job_id]
|
490
|
+
end
|
491
|
+
|
492
|
+
def original_job_id
|
493
|
+
id
|
494
|
+
end
|
495
|
+
end
|
496
|
+
end
|
497
|
+
end
|
@@ -0,0 +1,56 @@
|
|
1
|
+
module Delayed
|
2
|
+
module Batch
|
3
|
+
class PerformableBatch < Struct.new(:mode, :items)
|
4
|
+
def initialize(mode, items)
|
5
|
+
raise "unsupported mode" unless mode == :serial
|
6
|
+
self.mode = mode
|
7
|
+
self.items = items
|
8
|
+
end
|
9
|
+
|
10
|
+
def display_name
|
11
|
+
"Delayed::Batch.#{mode}"
|
12
|
+
end
|
13
|
+
alias_method :tag, :display_name
|
14
|
+
alias_method :full_name, :display_name
|
15
|
+
|
16
|
+
def perform
|
17
|
+
raise "can't perform a batch directly"
|
18
|
+
end
|
19
|
+
|
20
|
+
def jobs
|
21
|
+
items.map { |opts| Delayed::Job.new(opts) }
|
22
|
+
end
|
23
|
+
end
|
24
|
+
|
25
|
+
class << self
|
26
|
+
def serial_batch(opts = {})
|
27
|
+
prepare_batches(:serial, opts){ yield }
|
28
|
+
end
|
29
|
+
|
30
|
+
private
|
31
|
+
def prepare_batches(mode, opts)
|
32
|
+
raise "nested batching is not supported" if Delayed::Job.batches
|
33
|
+
Delayed::Job.batches = Hash.new { |h,k| h[k] = [] }
|
34
|
+
batch_enqueue_args = [:queue]
|
35
|
+
batch_enqueue_args << :priority unless opts[:priority]
|
36
|
+
Delayed::Job.batch_enqueue_args = batch_enqueue_args
|
37
|
+
yield
|
38
|
+
ensure
|
39
|
+
batches = Delayed::Job.batches
|
40
|
+
Delayed::Job.batches = nil
|
41
|
+
batch_args = opts.slice(:priority)
|
42
|
+
batches.each do |enqueue_args, batch|
|
43
|
+
if batch.size == 0
|
44
|
+
next
|
45
|
+
elsif batch.size == 1
|
46
|
+
args = batch.first.merge(batch_args)
|
47
|
+
payload_object = args.delete(:payload_object)
|
48
|
+
Delayed::Job.enqueue(payload_object, args)
|
49
|
+
else
|
50
|
+
Delayed::Job.enqueue(Delayed::Batch::PerformableBatch.new(mode, batch), enqueue_args.merge(batch_args))
|
51
|
+
end
|
52
|
+
end
|
53
|
+
end
|
54
|
+
end
|
55
|
+
end
|
56
|
+
end
|
data/lib/delayed/cli.rb
ADDED
@@ -0,0 +1,101 @@
|
|
1
|
+
require 'optparse'
|
2
|
+
|
3
|
+
module Delayed
|
4
|
+
class CLI
|
5
|
+
def initialize(args = ARGV)
|
6
|
+
@args = args
|
7
|
+
# config that will be applied on Settings and passed to the created Pool
|
8
|
+
@config = {}
|
9
|
+
# CLI options that will be kept to this class
|
10
|
+
@options = {
|
11
|
+
:config_file => Settings.default_worker_config_name,
|
12
|
+
:pid_folder => Settings.expand_rails_path("tmp/pids"),
|
13
|
+
:tail_logs => true, # only in FG mode
|
14
|
+
}
|
15
|
+
end
|
16
|
+
|
17
|
+
def run
|
18
|
+
parse_cli_options!
|
19
|
+
load_and_apply_config!
|
20
|
+
|
21
|
+
command = @args.shift
|
22
|
+
case command
|
23
|
+
when 'start'
|
24
|
+
exit 1 if daemon.status(print: :alive) == :running
|
25
|
+
daemon.daemonize!
|
26
|
+
start
|
27
|
+
when 'stop'
|
28
|
+
daemon.stop(kill: @options[:kill])
|
29
|
+
when 'run'
|
30
|
+
start
|
31
|
+
when 'status'
|
32
|
+
if daemon.status
|
33
|
+
exit 0
|
34
|
+
else
|
35
|
+
exit 1
|
36
|
+
end
|
37
|
+
when 'restart'
|
38
|
+
daemon.stop(kill: @options[:kill])
|
39
|
+
daemon.daemonize!
|
40
|
+
start
|
41
|
+
when nil
|
42
|
+
puts option_parser.to_s
|
43
|
+
else
|
44
|
+
raise("Unknown command: #{command.inspect}")
|
45
|
+
end
|
46
|
+
end
|
47
|
+
|
48
|
+
def parse_cli_options!
|
49
|
+
option_parser.parse!(@args)
|
50
|
+
@options
|
51
|
+
end
|
52
|
+
|
53
|
+
protected
|
54
|
+
|
55
|
+
def load_and_apply_config!
|
56
|
+
@config = Settings.worker_config(@options[:config_file])
|
57
|
+
Settings.apply_worker_config!(@config)
|
58
|
+
end
|
59
|
+
|
60
|
+
def option_parser
|
61
|
+
@option_parser ||= OptionParser.new do |opts|
|
62
|
+
opts.banner = "Usage #{$0} <command> <options>"
|
63
|
+
opts.separator %{\nWhere <command> is one of:
|
64
|
+
start start the jobs daemon
|
65
|
+
stop stop the jobs daemon
|
66
|
+
run start and run in the foreground
|
67
|
+
restart stop and then start the jobs daemon
|
68
|
+
status show daemon status
|
69
|
+
}
|
70
|
+
|
71
|
+
opts.separator "\n<options>"
|
72
|
+
opts.on("-c", "--config [CONFIG_PATH]", "Use alternate config file (default #{@options[:config_file]})") { |c| @options[:config_file] = c }
|
73
|
+
opts.on("-p", "--pid", "Use alternate folder for PID files (default #{@options[:pid_folder]})") { |p| @options[:pid_folder] = p }
|
74
|
+
opts.on("--no-tail", "Don't tail the logs (only affects non-daemon mode)") { @options[:tail_logs] = false }
|
75
|
+
opts.on("--with-prejudice", "When stopping, interrupt jobs in progress, instead of letting them drain") { @options[:kill] ||= true }
|
76
|
+
opts.on("--with-extreme-prejudice", "When stopping, immediately kill jobs in progress, instead of letting them drain") { @options[:kill] = 9 }
|
77
|
+
opts.on_tail("-h", "--help", "Show this message") { puts opts; exit }
|
78
|
+
end
|
79
|
+
end
|
80
|
+
|
81
|
+
def daemon
|
82
|
+
@daemon ||= Delayed::Daemon.new(@options[:pid_folder])
|
83
|
+
end
|
84
|
+
|
85
|
+
def start
|
86
|
+
load_rails
|
87
|
+
tail_rails_log unless daemon.daemonized?
|
88
|
+
Delayed::Pool.new(@config).start
|
89
|
+
end
|
90
|
+
|
91
|
+
def load_rails
|
92
|
+
require(Settings.expand_rails_path("config/environment.rb"))
|
93
|
+
Dir.chdir(Rails.root)
|
94
|
+
end
|
95
|
+
|
96
|
+
def tail_rails_log
|
97
|
+
return if !@options[:tail_logs]
|
98
|
+
Delayed::LogTailer.new.run
|
99
|
+
end
|
100
|
+
end
|
101
|
+
end
|