canvas-jobs 0.9.0

This diff represents the content of publicly available package versions that have been released to one of the supported registries. The information contained in this diff is provided for informational purposes only and reflects changes between package versions as they appear in their respective public registries.
Files changed (61) hide show
  1. checksums.yaml +7 -0
  2. data/db/migrate/20101216224513_create_delayed_jobs.rb +40 -0
  3. data/db/migrate/20110208031356_add_delayed_jobs_tag.rb +14 -0
  4. data/db/migrate/20110426161613_add_delayed_jobs_max_attempts.rb +13 -0
  5. data/db/migrate/20110516225834_add_delayed_jobs_strand.rb +14 -0
  6. data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +26 -0
  7. data/db/migrate/20110610213249_optimize_delayed_jobs.rb +40 -0
  8. data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +52 -0
  9. data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +31 -0
  10. data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +15 -0
  11. data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +80 -0
  12. data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +15 -0
  13. data/db/migrate/20120608191051_add_jobs_run_at_index.rb +15 -0
  14. data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +13 -0
  15. data/db/migrate/20140505215131_add_failed_jobs_original_job_id.rb +13 -0
  16. data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +13 -0
  17. data/db/migrate/20140505223637_drop_failed_jobs_original_id.rb +13 -0
  18. data/db/migrate/20140512213941_add_source_to_jobs.rb +15 -0
  19. data/lib/canvas-jobs.rb +1 -0
  20. data/lib/delayed/backend/active_record.rb +297 -0
  21. data/lib/delayed/backend/base.rb +317 -0
  22. data/lib/delayed/backend/redis/bulk_update.lua +40 -0
  23. data/lib/delayed/backend/redis/destroy_job.lua +2 -0
  24. data/lib/delayed/backend/redis/enqueue.lua +29 -0
  25. data/lib/delayed/backend/redis/fail_job.lua +5 -0
  26. data/lib/delayed/backend/redis/find_available.lua +3 -0
  27. data/lib/delayed/backend/redis/functions.rb +57 -0
  28. data/lib/delayed/backend/redis/get_and_lock_next_available.lua +17 -0
  29. data/lib/delayed/backend/redis/includes/jobs_common.lua +203 -0
  30. data/lib/delayed/backend/redis/job.rb +481 -0
  31. data/lib/delayed/backend/redis/set_running.lua +5 -0
  32. data/lib/delayed/backend/redis/tickle_strand.lua +2 -0
  33. data/lib/delayed/batch.rb +56 -0
  34. data/lib/delayed/engine.rb +4 -0
  35. data/lib/delayed/job_tracking.rb +31 -0
  36. data/lib/delayed/lifecycle.rb +83 -0
  37. data/lib/delayed/message_sending.rb +130 -0
  38. data/lib/delayed/performable_method.rb +42 -0
  39. data/lib/delayed/periodic.rb +81 -0
  40. data/lib/delayed/pool.rb +335 -0
  41. data/lib/delayed/settings.rb +32 -0
  42. data/lib/delayed/version.rb +3 -0
  43. data/lib/delayed/worker.rb +213 -0
  44. data/lib/delayed/yaml_extensions.rb +63 -0
  45. data/lib/delayed_job.rb +40 -0
  46. data/spec/active_record_job_spec.rb +61 -0
  47. data/spec/gemfiles/32.gemfile +6 -0
  48. data/spec/gemfiles/40.gemfile +6 -0
  49. data/spec/gemfiles/41.gemfile +6 -0
  50. data/spec/gemfiles/42.gemfile +6 -0
  51. data/spec/migrate/20140924140513_add_story_table.rb +7 -0
  52. data/spec/redis_job_spec.rb +77 -0
  53. data/spec/sample_jobs.rb +26 -0
  54. data/spec/shared/delayed_batch.rb +85 -0
  55. data/spec/shared/delayed_method.rb +419 -0
  56. data/spec/shared/performable_method.rb +52 -0
  57. data/spec/shared/shared_backend.rb +836 -0
  58. data/spec/shared/worker.rb +291 -0
  59. data/spec/shared_jobs_specs.rb +13 -0
  60. data/spec/spec_helper.rb +91 -0
  61. metadata +329 -0
@@ -0,0 +1,40 @@
1
+ local action, id_string, flavor, query, now = unpack(ARGV)
2
+
3
+ local ids = {}
4
+
5
+ if string.len(flavor) > 0 then
6
+ if flavor == 'current' then
7
+ ids = redis.call('ZRANGE', Keys.queue(query), 0, -1)
8
+ elseif flavor == 'future' then
9
+ ids = redis.call('ZRANGE', Keys.future_queue(query), 0, -1)
10
+ elseif flavor == 'strand' then
11
+ ids = redis.call('LRANGE', Keys.strand(query), 0, -1)
12
+ elseif flavor == 'tag' then
13
+ ids = redis.call('SMEMBERS', Keys.tag(query))
14
+ end
15
+ else
16
+ -- can't pass an array to redis/lua, so we split the string here
17
+ for id in string.gmatch(id_string, "([%w-]+)") do
18
+ if job_exists(id) then
19
+ table.insert(ids, id)
20
+ end
21
+ end
22
+ end
23
+
24
+ for idx, job_id in ipairs(ids) do
25
+ if action == 'hold' then
26
+ local queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'strand'))
27
+ remove_from_queues(job_id, queue, strand)
28
+ redis.call('HMSET', Keys.job(job_id), 'locked_at', now, 'locked_by', 'on hold', 'attempts', 50)
29
+ elseif action == 'unhold' then
30
+ local queue, locked_by = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'locked_by'))
31
+ add_to_queues(job_id, queue, now)
32
+ redis.call('HDEL', Keys.job(job_id), 'locked_at', 'locked_by')
33
+ redis.call('HMSET', Keys.job(job_id), 'attempts', 0)
34
+ elseif action == 'destroy' then
35
+ destroy_job(job_id, now)
36
+ end
37
+ end
38
+
39
+ -- returns the # of jobs matching the query, not necessarily the # whose state was changed
40
+ return table.getn(ids)
@@ -0,0 +1,2 @@
1
+ local job_id, now = unpack(ARGV)
2
+ destroy_job(job_id, now)
@@ -0,0 +1,29 @@
1
+ local job_id, queue, strand, now, for_singleton = unpack(ARGV)
2
+ local strand_key = Keys.strand(strand)
3
+
4
+ -- if this is a singleton job, only queue it up if another doesn't exist on the strand
5
+ -- otherwise, delete it and return the other job id
6
+ if for_singleton then
7
+ local job_ids = redis.call('LRANGE', strand_key, 0, 1)
8
+ local job_to_check = 1
9
+ if job_exists(job_ids[1]) and redis.call('HGET', Keys.job(job_ids[1]), 'locked_at') then
10
+ job_to_check = 2
11
+ end
12
+
13
+ local job_to_check_id = job_ids[job_to_check]
14
+ if job_exists(job_to_check_id) then
15
+ -- delete the new job, we found a match
16
+ redis.call('DEL', Keys.job(job_id))
17
+ return job_to_check_id
18
+ end
19
+ end
20
+
21
+ -- if this job is in a strand, add it to the strand queue first
22
+ -- if it's not at the front of the strand, we won't enqueue it below
23
+ if strand_key then
24
+ add_to_strand(job_id, strand)
25
+ end
26
+
27
+ add_to_queues(job_id, queue, now)
28
+
29
+ return job_id
@@ -0,0 +1,5 @@
1
+ local job_id = unpack(ARGV)
2
+ local failed_at, queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'failed_at', 'queue', 'strand'))
3
+
4
+ remove_from_queues(job_id, queue, strand)
5
+ redis.call('ZADD', Keys.failed_jobs(), failed_at, job_id)
@@ -0,0 +1,3 @@
1
+ local queue, limit, offset, min_priority, max_priority, now = unpack(ARGV)
2
+
3
+ return find_available(queue, limit, offset, min_priority, max_priority, now)
@@ -0,0 +1,57 @@
1
+ require 'redis/scripting'
2
+
3
+ # This module handles loading the Lua functions into Redis and running them
4
+ module Delayed::Backend::Redis
5
+ class Functions < ::Redis::Scripting::Module
6
+ def initialize(redis)
7
+ super(redis, File.dirname(__FILE__))
8
+ end
9
+
10
+ def run_script(script, keys, argv)
11
+ result = nil
12
+ ms = Benchmark.ms { result = super }
13
+ line = 'Redis Jobs Timing: %s (%.1fms)' % [script.name, ms]
14
+ ActiveRecord::Base.logger.debug(line)
15
+ result
16
+ end
17
+
18
+ def find_available(queue, limit, offset, min_priority, max_priority, now)
19
+ run(:find_available, [], [queue, limit, offset, min_priority, max_priority, now.utc.to_f])
20
+ end
21
+
22
+ def get_and_lock_next_available(worker_name, queue, min_priority, max_priority, now)
23
+ attrs = run(:get_and_lock_next_available, [], [queue, min_priority, max_priority, worker_name, now.utc.to_f])
24
+ Hash[*attrs]
25
+ end
26
+
27
+ def enqueue(job_id, queue, strand, now)
28
+ run(:enqueue, [], [job_id, queue, strand, now.utc.to_f])
29
+ end
30
+
31
+ def create_singleton(job_id, queue, strand, now)
32
+ run(:enqueue, [], [job_id, queue, strand, now.utc.to_f, true])
33
+ end
34
+
35
+ def destroy_job(job_id, now)
36
+ run(:destroy_job, [], [job_id, now.utc.to_f])
37
+ end
38
+
39
+ def tickle_strand(job_id, strand, now)
40
+ run(:tickle_strand, [], [job_id, strand, now.utc.to_f])
41
+ end
42
+
43
+ def fail_job(job_id)
44
+ run(:fail_job, [], [job_id])
45
+ end
46
+
47
+ def set_running(job_id)
48
+ run(:set_running, [], [job_id])
49
+ end
50
+
51
+ def bulk_update(action, ids, flavor, query, now)
52
+ ids = (ids || []).join(",")
53
+ run(:bulk_update, [], [action, ids, flavor, query, now.utc.to_f])
54
+ end
55
+
56
+ end
57
+ end
@@ -0,0 +1,17 @@
1
+ local queue, min_priority, max_priority, worker_name, now = unpack(ARGV)
2
+ local job_id = find_available(queue, 1, 0, min_priority, max_priority, now)[1]
3
+
4
+ if job_exists(job_id) then
5
+ -- update the job with locked_by and locked_at
6
+ redis.call('HMSET', Keys.job(job_id), 'locked_by', worker_name, 'locked_at', now)
7
+
8
+ -- add the job to the running_jobs set
9
+ redis.call('ZADD', Keys.running_jobs(), now, job_id)
10
+ -- remove the job from the pending jobs queue
11
+ redis.call('ZREM', Keys.queue(queue), job_id)
12
+
13
+ -- return the list of job attributes
14
+ return redis.call('HGETALL', Keys.job(job_id))
15
+ else
16
+ return {}
17
+ end
@@ -0,0 +1,203 @@
1
+ -- Keys holds the various functions to map to redis keys
2
+ -- These are duplicated from job.rb
3
+ local Keys = {}
4
+
5
+ Keys.job = function(id)
6
+ return "job/" .. id
7
+ end
8
+
9
+ Keys.running_jobs = function()
10
+ return "running_jobs"
11
+ end
12
+
13
+ Keys.failed_jobs = function()
14
+ return "failed_jobs"
15
+ end
16
+
17
+ Keys.queue = function(queue)
18
+ return "queue/" .. (queue or '')
19
+ end
20
+
21
+ Keys.future_queue = function(queue)
22
+ return Keys.queue(queue) .. "/future"
23
+ end
24
+
25
+ Keys.strand = function(strand_name)
26
+ if strand_name and string.len(strand_name) > 0 then
27
+ return "strand/" .. strand_name
28
+ else
29
+ return nil
30
+ end
31
+ end
32
+
33
+ Keys.tag_counts = function(flavor)
34
+ return "tag_counts/" .. flavor
35
+ end
36
+
37
+ Keys.tag = function(tag)
38
+ return "tag/" .. tag
39
+ end
40
+
41
+ Keys.waiting_strand_job_priority = function()
42
+ return 2000000
43
+ end
44
+
45
+ -- remove the given job from the various queues
46
+ local remove_from_queues = function(job_id, queue, strand)
47
+ local tag = unpack(redis.call('HMGET', Keys.job(job_id), 'tag'))
48
+
49
+ redis.call("SREM", Keys.tag(tag), job_id)
50
+
51
+ local current_delta = -redis.call('ZREM', Keys.queue(queue), job_id)
52
+ redis.call('ZREM', Keys.running_jobs(), job_id)
53
+ local future_delta = -redis.call('ZREM', Keys.future_queue(queue), job_id)
54
+
55
+ if current_delta ~= 0 then
56
+ redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
57
+ end
58
+
59
+ local total_delta = current_delta + future_delta
60
+
61
+ if total_delta ~= 0 then
62
+ redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
63
+ end
64
+
65
+ local strand_key = Keys.strand(strand)
66
+ if strand_key then
67
+ redis.call('LREM', strand_key, 1, job_id)
68
+ end
69
+ end
70
+
71
+ -- returns the id for the first job on the strand, or nil if none
72
+ local strand_next_job_id = function(strand)
73
+ local strand_key = Keys.strand(strand)
74
+ if not strand_key then return nil end
75
+ return redis.call('LRANGE', strand_key, 0, 0)[1]
76
+ end
77
+
78
+ -- returns next_in_strand -- whether this added job is at the front of the strand
79
+ local add_to_strand = function(job_id, strand)
80
+ local strand_key = Keys.strand(strand)
81
+ if not strand_key then return end
82
+ redis.call('RPUSH', strand_key, job_id) -- add to strand list
83
+ local next_id = strand_next_job_id(strand)
84
+ return next_id == job_id
85
+ end
86
+
87
+ -- add this given job to the correct queues based on its state and the current time
88
+ -- also updates the tag counts and tag job lists
89
+ local add_to_queues = function(job_id, queue, now)
90
+ local run_at, priority, tag, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'run_at', 'priority', 'tag', 'strand'))
91
+
92
+ redis.call("SADD", Keys.tag(tag), job_id)
93
+
94
+ if strand then
95
+ local next_job_id = strand_next_job_id(strand)
96
+ if next_job_id and next_job_id ~= job_id then
97
+ priority = Keys.waiting_strand_job_priority()
98
+ end
99
+ end
100
+
101
+ local current_delta = 0
102
+ local future_delta = 0
103
+
104
+ if run_at > now then
105
+ future_delta = future_delta + redis.call('ZADD', Keys.future_queue(queue), run_at, job_id)
106
+ current_delta = current_delta - redis.call('ZREM', Keys.queue(queue), job_id)
107
+ else
108
+ -- floor the run_at so we don't have a float in our float
109
+ local sort_key = priority .. '.' .. math.floor(run_at)
110
+ current_delta = current_delta + redis.call('ZADD', Keys.queue(queue), sort_key, job_id)
111
+ future_delta = future_delta - redis.call('ZREM', Keys.future_queue(queue), job_id)
112
+ end
113
+
114
+ if current_delta ~= 0 then
115
+ redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
116
+ end
117
+
118
+ local total_delta = current_delta + future_delta
119
+
120
+ if total_delta ~= 0 then
121
+ redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
122
+ end
123
+ end
124
+
125
+ local job_exists = function(job_id)
126
+ return job_id and redis.call('HGET', Keys.job(job_id), 'id')
127
+ end
128
+
129
+ -- find jobs available for running
130
+ -- checks the future queue too, and moves and now-ready jobs
131
+ -- into the current queue
132
+ local find_available = function(queue, limit, offset, min_priority, max_priority, now)
133
+ local ready_future_jobs = redis.call('ZRANGEBYSCORE', Keys.future_queue(queue), 0, now, 'limit', 0, limit)
134
+ for i, job_id in ipairs(ready_future_jobs) do
135
+ add_to_queues(job_id, queue, now)
136
+ end
137
+
138
+ if not min_priority or min_priority == '' then
139
+ min_priority = '0'
140
+ end
141
+
142
+ if not max_priority or max_priority == '' then
143
+ max_priority = "+inf"
144
+ else
145
+ max_priority = "(" .. (max_priority + 1)
146
+ end
147
+ local job_ids = redis.call('ZRANGEBYSCORE', Keys.queue(queue), min_priority, max_priority, 'limit', offset, limit)
148
+ for idx = table.getn(job_ids), 1, -1 do
149
+ local job_id = job_ids[idx]
150
+ if not job_exists(job_id) then
151
+ table.remove(job_ids, idx)
152
+ redis.call('ZREM', Keys.queue(queue), job_id)
153
+ end
154
+ end
155
+ return job_ids
156
+ end
157
+
158
+ -- "tickle" the strand, removing the given job_id and setting the job at the
159
+ -- front of the strand as eligible to run, if it's not already
160
+ local tickle_strand = function(job_id, strand, now)
161
+ local strand_key = Keys.strand(strand)
162
+
163
+ -- this LREM could be (relatively) slow if the strand is very large and this
164
+ -- job isn't near the front. however, in normal usage, we only delete from the
165
+ -- front. also the linked list is in memory, so even with thousands of jobs on
166
+ -- the strand it'll be quite fast.
167
+ --
168
+ -- alternatively we could make strands sorted sets, which would avoid a
169
+ -- linear search to delete this job. jobs need to be sorted on insertion
170
+ -- order though, and we're using GUIDs for keys here rather than an
171
+ -- incrementing integer, so we'd have to use an artificial counter as the
172
+ -- sort key (through `incrby strand_name` probably).
173
+ redis.call('LREM', strand_key, 1, job_id)
174
+ -- normally this loop will only run once, but we loop so that if there's any
175
+ -- job ids on the strand that don't actually exist anymore, we'll throw them
176
+ -- out and keep searching until we find a legit job or the strand is empty
177
+ while true do
178
+ local next_id = redis.call('LRANGE', strand_key, 0, 0)[1]
179
+ if next_id == nil then
180
+ break
181
+ elseif job_exists(next_id) then
182
+ -- technically jobs on the same strand can be in different queues,
183
+ -- though that functionality isn't currently used
184
+ local queue = redis.call('HGET', Keys.job(next_id), 'queue')
185
+ add_to_queues(next_id, queue, now)
186
+ break
187
+ else
188
+ redis.call('LPOP', strand_key)
189
+ end
190
+ end
191
+ end
192
+
193
+ local destroy_job = function(job_id, now)
194
+ local queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'strand'))
195
+ remove_from_queues(job_id, queue, strand)
196
+
197
+ if Keys.strand(strand) then
198
+ tickle_strand(job_id, strand, now)
199
+ end
200
+
201
+ redis.call('ZREM', Keys.failed_jobs(), job_id)
202
+ redis.call('DEL', Keys.job(job_id))
203
+ end
@@ -0,0 +1,481 @@
1
+ # This can't currently be made compatible with redis cluster, because the Lua functions
2
+ # access keys that aren't in their keys argument list (since they pop jobs off
3
+ # a queue and then update the job with that id).
4
+
5
+ # still TODO:
6
+ # * a consequence of our ignore-redis-failures code is that if redis is unavailable, creating delayed jobs silently fails, which is probably not what we want
7
+ # * need a way to migrate between jobs backends
8
+ # * we need some auditors:
9
+ # * fail jobs in running_jobs if they've timed out
10
+ # * have pools audit their workers and immediately fail jobs locked by dead workers (make sure this handles the restart case where two pools are running)
11
+ # * have a master auditor that fails jobs if a whole pool dies
12
+ # * audit strands ocasionally, look for any stuck strands where the strand queue isn't empty but there's no strand job running or queued
13
+ module Delayed::Backend::Redis
14
+ require 'delayed/backend/redis/functions'
15
+
16
+ class Job
17
+ extend ActiveModel::Callbacks
18
+ define_model_callbacks :create, :save
19
+ include ActiveModel::Dirty
20
+ include Delayed::Backend::Base
21
+ # This redis instance needs to be set by the application during jobs configuration
22
+ cattr_accessor :redis
23
+
24
+ # An overview of where and when things are stored in redis:
25
+ #
26
+ # Jobs are given a UUID for an id, rather than an incrementing integer id.
27
+ # The job attributes are then stored in a redis hash at job/<id>. Attribute
28
+ # values are generally stored as their json representation, except for
29
+ # timestamps, which as stored as floating point utc-time-since-unix-epoch
30
+ # values, so that we can compare timestamps in Lua without a date parser.
31
+ #
32
+ # Jobs that are schedule to run immediately (in the present/past) are
33
+ # inserted into the queue named queue/<queue_name>. The queue is a sorted
34
+ # set, with the value being the job id and the weight being a floating point
35
+ # value, <priority>.<run_at>. This formatting is key to efficient
36
+ # querying of the next job to run.
37
+ #
38
+ # Jobs that are scheduled to run in the future are not inserted into the
39
+ # queue, but rather a future queue named queue/<queue_name>/future. This
40
+ # queue is also a sorted set, with the value being the job id, but the weight
41
+ # is just the <run_at> value.
42
+ #
43
+ # If the job is on a strand, the flow is different. First, it's inserted into
44
+ # a list named strand/<strand>. When strand jobs are inserted into the
45
+ # current jobs queue, we check if they're next to run in the strand. If not,
46
+ # we give them a special priority that is greater than MAX_PRIORITY, so that
47
+ # they won't run. When a strand job is finished, failed or deleted,
48
+ # "tickle_strand" is called, which removes that job from the list and if that
49
+ # job was at the front of the list, changes the priority on the next job so
50
+ # that it's eligible to run.
51
+ #
52
+ # For singletons, the flow is the same as for other strand jobs, except that
53
+ # the job is thrown out if there are already any non-running jobs in the
54
+ # strand list.
55
+ #
56
+ # If a job fails, it's removed from the normal queues and inserted into the
57
+ # failed_jobs sorted set, with job id as the value and failure time as the
58
+ # key. The hash of job attributes is also renamed from job/<id> to
59
+ # failed_job/<id> -- use Delayed::Job::Failed to query those jobs, same as
60
+ # with AR jobs.
61
+ #
62
+ # We also insert into some other data structures for admin functionality.
63
+ # tag_counts/current and tag_counts/all are sorted sets storing the count of
64
+ # jobs for each tag. tag/<tag> is a set of existing job ids that have that tag.
65
+ #
66
+ # Most all of this happens in Lua functions, for atomicity. See the other
67
+ # files in this directory -- functions.rb is a wrapper to call the lua
68
+ # functions, and the individual functions are defined in .lua files in this
69
+ # directory.
70
+
71
+ # these key mappings are duplicated in the redis lua code, in include.lua
72
+ module Keys
73
+ RUNNING_JOBS = "running_jobs"
74
+ FAILED_JOBS = "failed_jobs"
75
+ JOB = proc { |id| "job/#{id}" }
76
+ FAILED_JOB = proc { |id| "failed_job/#{id}" }
77
+ QUEUE = proc { |name| "queue/#{name}" }
78
+ FUTURE_QUEUE = proc { |name| "#{QUEUE[name]}/future" }
79
+ STRAND = proc { |strand| strand ? "strand/#{strand}" : nil }
80
+ TAG_COUNTS = proc { |flavor| "tag_counts/#{flavor}" }
81
+ TAG = proc { |tag| "tag/#{tag}" }
82
+ end
83
+
84
+ WAITING_STRAND_JOB_PRIORITY = 2000000
85
+ if WAITING_STRAND_JOB_PRIORITY <= Delayed::MAX_PRIORITY
86
+ # if you change this, note that the value is duplicated in include.lua
87
+ raise("Delayed::MAX_PRIORITY must be less than #{WAITING_STRAND_JOB_PRIORITY}")
88
+ end
89
+
90
+ COLUMNS = [
91
+ :id,
92
+ :priority,
93
+ :attempts,
94
+ :handler,
95
+ :last_error,
96
+ :queue,
97
+ :run_at,
98
+ :locked_at,
99
+ :failed_at,
100
+ :locked_by,
101
+ :created_at,
102
+ :updated_at,
103
+ :tag,
104
+ :max_attempts,
105
+ :strand,
106
+ :source,
107
+ ]
108
+
109
+ # We store time attributes in redis as floats so we don't have to do
110
+ # timestamp parsing in lua.
111
+ TIMESTAMP_COLUMNS = [:run_at, :locked_at, :failed_at, :created_at, :updated_at]
112
+ INTEGER_COLUMNS = [:priority, :attempts, :max_attempts]
113
+
114
+ attr_reader(*COLUMNS)
115
+ define_attribute_methods(COLUMNS)
116
+ COLUMNS.each do |c|
117
+ # Custom attr_writer that updates the dirty status.
118
+ class_eval(<<-EOS, __FILE__, __LINE__ + 1)
119
+ def #{c}=(new_value)
120
+ #{c}_will_change! unless new_value == self.#{c}
121
+ @#{c} = new_value
122
+ end
123
+ EOS
124
+ end
125
+
126
+ def initialize(attrs = {})
127
+ attrs.each { |k, v| self.send("#{k}=", v) }
128
+ self.priority ||= 0
129
+ self.attempts ||= 0
130
+ @new_record = true
131
+ end
132
+
133
+ def self.instantiate(attrs)
134
+ result = new(attrs)
135
+ result.instance_variable_set(:@new_record, false)
136
+ result
137
+ end
138
+
139
+ def self.create(attrs = {})
140
+ result = new(attrs)
141
+ result.save
142
+ result
143
+ end
144
+
145
+ def self.create!(attrs = {})
146
+ result = new(attrs)
147
+ result.save!
148
+ result
149
+ end
150
+
151
+ def [](key)
152
+ send(key)
153
+ end
154
+
155
+ def []=(key, value)
156
+ send("#{key}=", value)
157
+ end
158
+
159
+ def self.find(ids)
160
+ if Array === ids
161
+ find_some(ids, {})
162
+ else
163
+ find_one(ids, {})
164
+ end
165
+ end
166
+
167
+ def new_record?
168
+ !!@new_record
169
+ end
170
+
171
+ def destroyed?
172
+ !!@destroyed
173
+ end
174
+
175
+ def ==(other)
176
+ other.is_a?(self.class) && id == other.id
177
+ end
178
+
179
+ def hash
180
+ id.hash
181
+ end
182
+
183
+ def self.reconnect!
184
+ self.redis.reconnect
185
+ end
186
+
187
+ def self.functions
188
+ @@functions ||= Delayed::Backend::Redis::Functions.new(redis)
189
+ end
190
+
191
+ def self.find_one(id, options)
192
+ job = self.get_with_ids([id]).first
193
+ job || raise(ActiveRecord::RecordNotFound, "Couldn't find Job with ID=#{id}")
194
+ end
195
+
196
+ def self.find_some(ids, options)
197
+ self.get_with_ids(ids).compact
198
+ end
199
+
200
+ def self.get_with_ids(ids)
201
+ ids.map { |id| self.instantiate_from_attrs(redis.hgetall(key_for_job_id(id))) }
202
+ end
203
+
204
+ def self.key_for_job_id(job_id)
205
+ Keys::JOB[job_id]
206
+ end
207
+
208
+ def self.get_and_lock_next_available(worker_name,
209
+ queue = Delayed::Settings.queue,
210
+ min_priority = Delayed::MIN_PRIORITY,
211
+ max_priority = Delayed::MAX_PRIORITY)
212
+
213
+ check_queue(queue)
214
+ check_priorities(min_priority, max_priority)
215
+
216
+ # as an optimization this lua function returns the hash of job attributes,
217
+ # rather than just a job id, saving a round trip
218
+ job_attrs = functions.get_and_lock_next_available(worker_name, queue, min_priority, max_priority, db_time_now)
219
+ instantiate_from_attrs(job_attrs) # will return nil if the attrs are blank
220
+ end
221
+
222
+ def self.find_available(limit,
223
+ queue = Delayed::Settings.queue,
224
+ min_priority = Delayed::MIN_PRIORITY,
225
+ max_priority = Delayed::MAX_PRIORITY)
226
+
227
+ check_queue(queue)
228
+ check_priorities(min_priority, max_priority)
229
+
230
+ self.find(functions.find_available(queue, limit, 0, min_priority, max_priority, db_time_now))
231
+ end
232
+
233
+ # get a list of jobs of the given flavor in the given queue
234
+ # flavor is :current, :future, :failed, :strand or :tag
235
+ # depending on the flavor, query has a different meaning:
236
+ # for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
237
+ # for :strand it's the strand name
238
+ # for :tag it's the tag name
239
+ # for :failed it's ignored
240
+ def self.list_jobs(flavor,
241
+ limit,
242
+ offset = 0,
243
+ query = nil)
244
+ case flavor.to_s
245
+ when 'current'
246
+ query ||= Delayed::Settings.queue
247
+ check_queue(query)
248
+ self.find(functions.find_available(query, limit, offset, nil, nil, db_time_now))
249
+ when 'future'
250
+ query ||= Delayed::Settings.queue
251
+ check_queue(query)
252
+ self.find(redis.zrangebyscore(Keys::FUTURE_QUEUE[query], 0, "+inf", :limit => [offset, limit]))
253
+ when 'failed'
254
+ Failed.find(redis.zrevrangebyscore(Keys::FAILED_JOBS, "+inf", 0, :limit => [offset, limit]))
255
+ when 'strand'
256
+ self.find(redis.lrange(Keys::STRAND[query], offset, offset + limit - 1))
257
+ when 'tag'
258
+ # This is optimized for writing, since list_jobs(:tag) will only ever happen in the admin UI
259
+ ids = redis.smembers(Keys::TAG[query])
260
+ self.find(ids[offset, limit])
261
+ else
262
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
263
+ end
264
+ end
265
+
266
+ # get the total job count for the given flavor
267
+ # flavor is :current, :future or :failed
268
+ # for the :failed flavor, queue is currently ignored
269
+ def self.jobs_count(flavor,
270
+ queue = Delayed::Settings.queue)
271
+ case flavor.to_s
272
+ when 'current'
273
+ check_queue(queue)
274
+ redis.zcard(Keys::QUEUE[queue])
275
+ when 'future'
276
+ check_queue(queue)
277
+ redis.zcard(Keys::FUTURE_QUEUE[queue])
278
+ when 'failed'
279
+ redis.zcard(Keys::FAILED_JOBS)
280
+ else
281
+ raise ArgumentError, "invalid flavor: #{flavor.inspect}"
282
+ end
283
+ end
284
+
285
+ def self.strand_size(strand)
286
+ redis.llen(Keys::STRAND[strand])
287
+ end
288
+
289
+ def self.running_jobs()
290
+ self.find(redis.zrangebyscore(Keys::RUNNING_JOBS, 0, "+inf"))
291
+ end
292
+
293
+ def self.clear_locks!(worker_name)
294
+ self.running_jobs.each do |job|
295
+ # TODO: mark the job as failed one attempt
296
+ job.unlock! if job.locked_by == worker_name
297
+ end
298
+ end
299
+
300
+ # returns a list of hashes { :tag => tag_name, :count => current_count }
301
+ # in descending count order
302
+ # flavor is :current or :all
303
+ def self.tag_counts(flavor,
304
+ limit,
305
+ offset = 0)
306
+ raise(ArgumentError, "invalid flavor: #{flavor.inspect}") unless %w(current all).include?(flavor.to_s)
307
+ key = Keys::TAG_COUNTS[flavor]
308
+ redis.zrevrangebyscore(key, '+inf', 1, :limit => [offset, limit], :withscores => true).map { |tag, count| { :tag => tag, :count => count } }
309
+ end
310
+
311
+ # perform a bulk update of a set of jobs
312
+ # action is :hold, :unhold, or :destroy
313
+ # to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
314
+ # or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
315
+ #
316
+ # see the list_jobs action for the list of available flavors and the meaning
317
+ # of opts[:query] for each
318
+ def self.bulk_update(action, opts)
319
+ if %w(current future).include?(opts[:flavor].to_s)
320
+ opts[:query] ||= Delayed::Settings.queue
321
+ end
322
+ functions.bulk_update(action, opts[:ids], opts[:flavor], opts[:query], db_time_now)
323
+ end
324
+
325
+ def self.create_singleton(options)
326
+ self.create!(options.merge(:singleton => true))
327
+ end
328
+
329
+ # not saved, just used as a marker when creating
330
+ attr_accessor :singleton
331
+
332
+ def lock_in_redis!(worker_name)
333
+ self.locked_at = self.class.db_time_now
334
+ self.locked_by = worker_name
335
+ save
336
+ end
337
+
338
+ def unlock!
339
+ self.locked_at = nil
340
+ self.locked_by = nil
341
+ save!
342
+ end
343
+
344
+ def save(*a)
345
+ return false if destroyed?
346
+ result = run_callbacks(:save) do
347
+ if new_record?
348
+ run_callbacks(:create) { create }
349
+ else
350
+ update
351
+ end
352
+ end
353
+ changes_applied
354
+ result
355
+ end
356
+
357
+ if Rails.version < "4.1"
358
+ def changes_applied
359
+ @previously_changed = changes
360
+ @changed_attributes.clear
361
+ end
362
+ end
363
+
364
+ def save!(*a)
365
+ save(*a) || raise(RecordNotSaved)
366
+ end
367
+
368
+ def destroy
369
+ self.class.functions.destroy_job(id, self.class.db_time_now)
370
+ @destroyed = true
371
+ freeze
372
+ end
373
+
374
+ # take this job off the strand, and queue up the next strand job if this job
375
+ # was at the front
376
+ def tickle_strand
377
+ if strand.present?
378
+ self.class.functions.tickle_strand(id, strand, self.class.db_time_now)
379
+ end
380
+ end
381
+
382
+ def create_and_lock!(worker_name)
383
+ raise "job already exists" unless new_record?
384
+ lock_in_redis!(worker_name)
385
+ end
386
+
387
+ def fail!
388
+ self.failed_at = self.class.db_time_now
389
+ save!
390
+ redis.rename Keys::JOB[id], Keys::FAILED_JOB[id]
391
+ tickle_strand
392
+ self
393
+ end
394
+
395
+ protected
396
+
397
+ def update_queues
398
+ if failed_at
399
+ self.class.functions.fail_job(id)
400
+ elsif locked_at
401
+ self.class.functions.set_running(id)
402
+ elsif singleton
403
+ job_id = self.class.functions.create_singleton(id, queue, strand, self.class.db_time_now)
404
+ # if create_singleton returns a different job id, that means this job got
405
+ # deleted because there was already that other job on the strand. so
406
+ # replace this job with the other for returning.
407
+ if job_id != self.id
408
+ singleton = self.class.find(job_id)
409
+ COLUMNS.each { |c| send("#{c}=", singleton.send(c)) }
410
+ end
411
+ else
412
+ self.class.functions.enqueue(id, queue, strand, self.class.db_time_now)
413
+ end
414
+ end
415
+
416
+ def create
417
+ self.id ||= SecureRandom.hex(16)
418
+ self.created_at = self.updated_at = Time.now.utc
419
+ save_job_to_redis
420
+ update_queues
421
+
422
+ @new_record = false
423
+ self.id
424
+ end
425
+
426
+ def update
427
+ self.updated_at = Time.now.utc
428
+ save_job_to_redis
429
+ update_queues
430
+ true
431
+ end
432
+
433
+ def queue_score
434
+ "#{priority}.#{run_at.to_i}".to_f
435
+ end
436
+
437
+ def save_job_to_redis
438
+ to_delete = []
439
+ attrs = {}
440
+ COLUMNS.each do |k|
441
+ v = send(k)
442
+ if v.nil?
443
+ to_delete << k if !new_record? && changed.include?(k.to_s)
444
+ elsif v.is_a?(ActiveSupport::TimeWithZone)
445
+ attrs[k] = v.utc.to_f
446
+ else
447
+ attrs[k] = v.as_json
448
+ end
449
+ end
450
+ key = Keys::JOB[id]
451
+ redis.mapped_hmset(key, attrs)
452
+ redis.hdel(key, to_delete) unless to_delete.empty?
453
+ end
454
+
455
+ def self.instantiate_from_attrs(redis_attrs)
456
+ if redis_attrs['id'].present?
457
+ attrs = redis_attrs.with_indifferent_access
458
+ TIMESTAMP_COLUMNS.each { |k| attrs[k] = Time.zone.at(attrs[k].to_f) if attrs[k] }
459
+ INTEGER_COLUMNS.each { |k| attrs[k] = attrs[k].to_i if attrs[k] }
460
+ instantiate(attrs)
461
+ else
462
+ nil
463
+ end
464
+ end
465
+
466
+ def global_id
467
+ id
468
+ end
469
+
470
+ class Failed < Job
471
+ include Delayed::Backend::Base
472
+ def self.key_for_job_id(job_id)
473
+ Keys::FAILED_JOB[job_id]
474
+ end
475
+
476
+ def original_job_id
477
+ id
478
+ end
479
+ end
480
+ end
481
+ end