inst-jobs 2.2.1 → 2.4.0
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +187 -159
- data/lib/delayed/backend/base.rb +80 -69
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -24
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +42 -51
- data/lib/delayed/performable_method.rb +5 -7
- data/lib/delayed/periodic.rb +66 -65
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +198 -192
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +93 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -17
- data/lib/delayed/work_queue/parent_process/client.rb +55 -53
- data/lib/delayed/work_queue/parent_process/server.rb +215 -209
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +21 -19
- data/lib/delayed/worker/health_check.rb +21 -12
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +271 -261
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -37
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +142 -138
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +8 -8
- data/spec/delayed/message_sending_spec.rb +8 -9
- data/spec/delayed/periodic_spec.rb +13 -12
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -7
- data/spec/delayed/work_queue/parent_process/client_spec.rb +16 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
- data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
- data/spec/delayed/worker/health_check_spec.rb +51 -49
- data/spec/delayed/worker_spec.rb +28 -25
- data/spec/gemfiles/52.gemfile +5 -3
- data/spec/gemfiles/52.gemfile.lock +240 -0
- data/spec/gemfiles/60.gemfile +5 -3
- data/spec/gemfiles/60.gemfile.lock +246 -0
- data/spec/gemfiles/61.gemfile +9 -0
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +520 -437
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +156 -148
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +43 -51
- metadata +101 -70
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -535
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/gemfiles/42.gemfile +0 -7
- data/spec/gemfiles/50.gemfile +0 -7
- data/spec/gemfiles/51.gemfile +0 -7
- data/spec/redis_job_spec.rb +0 -148
@@ -1,50 +0,0 @@
|
|
1
|
-
local action, id_string, flavor, query, now = unpack(ARGV)
|
2
|
-
|
3
|
-
local ids = {}
|
4
|
-
|
5
|
-
if string.len(flavor) > 0 then
|
6
|
-
if flavor == 'current' then
|
7
|
-
ids = redis.call('ZRANGE', Keys.queue(query), 0, -1)
|
8
|
-
elseif flavor == 'future' then
|
9
|
-
ids = redis.call('ZRANGE', Keys.future_queue(query), 0, -1)
|
10
|
-
elseif flavor == 'strand' then
|
11
|
-
ids = redis.call('LRANGE', Keys.strand(query), 0, -1)
|
12
|
-
elseif flavor == 'tag' then
|
13
|
-
ids = redis.call('SMEMBERS', Keys.tag(query))
|
14
|
-
end
|
15
|
-
else
|
16
|
-
-- can't pass an array to redis/lua, so we split the string here
|
17
|
-
for id in string.gmatch(id_string, "([%w-]+)") do
|
18
|
-
if job_exists(id) then
|
19
|
-
table.insert(ids, id)
|
20
|
-
end
|
21
|
-
end
|
22
|
-
end
|
23
|
-
|
24
|
-
local count = 0
|
25
|
-
for idx, job_id in ipairs(ids) do
|
26
|
-
if action == 'hold' then
|
27
|
-
local queue, strand, locked_by = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'strand', 'locked_by'))
|
28
|
-
if not locked_by then
|
29
|
-
count = count + 1
|
30
|
-
remove_from_queues(job_id, queue, strand)
|
31
|
-
redis.call('HMSET', Keys.job(job_id), 'locked_at', now, 'locked_by', 'on hold', 'attempts', 50)
|
32
|
-
end
|
33
|
-
elseif action == 'unhold' then
|
34
|
-
local queue, locked_by = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'locked_by'))
|
35
|
-
if locked_by == 'on hold' then
|
36
|
-
count = count + 1
|
37
|
-
add_to_queues(job_id, queue, now)
|
38
|
-
redis.call('HDEL', Keys.job(job_id), 'locked_at', 'locked_by')
|
39
|
-
redis.call('HMSET', Keys.job(job_id), 'attempts', 0)
|
40
|
-
end
|
41
|
-
elseif action == 'destroy' then
|
42
|
-
local locked_by = redis.call('HGET', Keys.job(job_id), 'locked_by')
|
43
|
-
if not locked_by or locked_by == 'on hold' then
|
44
|
-
count = count + 1
|
45
|
-
destroy_job(job_id, now)
|
46
|
-
end
|
47
|
-
end
|
48
|
-
end
|
49
|
-
|
50
|
-
return count
|
@@ -1,29 +0,0 @@
|
|
1
|
-
local job_id, queue, strand, now, for_singleton = unpack(ARGV)
|
2
|
-
local strand_key = Keys.strand(strand)
|
3
|
-
|
4
|
-
-- if this is a singleton job, only queue it up if another doesn't exist on the strand
|
5
|
-
-- otherwise, delete it and return the other job id
|
6
|
-
if for_singleton then
|
7
|
-
local job_ids = redis.call('LRANGE', strand_key, 0, 1)
|
8
|
-
local job_to_check = 1
|
9
|
-
if job_exists(job_ids[1]) and redis.call('HGET', Keys.job(job_ids[1]), 'locked_at') then
|
10
|
-
job_to_check = 2
|
11
|
-
end
|
12
|
-
|
13
|
-
local job_to_check_id = job_ids[job_to_check]
|
14
|
-
if job_exists(job_to_check_id) then
|
15
|
-
-- delete the new job, we found a match
|
16
|
-
redis.call('DEL', Keys.job(job_id))
|
17
|
-
return job_to_check_id
|
18
|
-
end
|
19
|
-
end
|
20
|
-
|
21
|
-
-- if this job is in a strand, add it to the strand queue first
|
22
|
-
-- if it's not at the front of the strand, we won't enqueue it below
|
23
|
-
if strand_key then
|
24
|
-
add_to_strand(job_id, strand)
|
25
|
-
end
|
26
|
-
|
27
|
-
add_to_queues(job_id, queue, now)
|
28
|
-
|
29
|
-
return job_id
|
@@ -1,59 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'redis/scripting'
|
4
|
-
|
5
|
-
# This module handles loading the Lua functions into Redis and running them
|
6
|
-
module Delayed::Backend::Redis
|
7
|
-
class Functions < ::Redis::Scripting::Module
|
8
|
-
def initialize(redis)
|
9
|
-
super(redis, File.dirname(__FILE__))
|
10
|
-
end
|
11
|
-
|
12
|
-
def run_script(script, keys, argv)
|
13
|
-
result = nil
|
14
|
-
ms = Benchmark.ms { result = super }
|
15
|
-
line = 'Redis Jobs Timing: %s (%.1fms)' % [script.name, ms]
|
16
|
-
ActiveRecord::Base.logger.debug(line)
|
17
|
-
result
|
18
|
-
end
|
19
|
-
|
20
|
-
def find_available(queue, limit, offset, min_priority, max_priority, now)
|
21
|
-
run(:find_available, [], [queue, limit, offset, min_priority, max_priority, now.utc.to_f])
|
22
|
-
end
|
23
|
-
|
24
|
-
def get_and_lock_next_available(worker_name, queue, min_priority, max_priority, now)
|
25
|
-
attrs = run(:get_and_lock_next_available, [], [queue, min_priority, max_priority, worker_name, now.utc.to_f])
|
26
|
-
Hash[*attrs]
|
27
|
-
end
|
28
|
-
|
29
|
-
def enqueue(job_id, queue, strand, now)
|
30
|
-
run(:enqueue, [], [job_id, queue, strand, now.utc.to_f])
|
31
|
-
end
|
32
|
-
|
33
|
-
def create_singleton(job_id, queue, strand, now)
|
34
|
-
run(:enqueue, [], [job_id, queue, strand, now.utc.to_f, true])
|
35
|
-
end
|
36
|
-
|
37
|
-
def destroy_job(job_id, now)
|
38
|
-
run(:destroy_job, [], [job_id, now.utc.to_f])
|
39
|
-
end
|
40
|
-
|
41
|
-
def tickle_strand(job_id, strand, now)
|
42
|
-
run(:tickle_strand, [], [job_id, strand, now.utc.to_f])
|
43
|
-
end
|
44
|
-
|
45
|
-
def fail_job(job_id)
|
46
|
-
run(:fail_job, [], [job_id])
|
47
|
-
end
|
48
|
-
|
49
|
-
def set_running(job_id)
|
50
|
-
run(:set_running, [], [job_id])
|
51
|
-
end
|
52
|
-
|
53
|
-
def bulk_update(action, ids, flavor, query, now)
|
54
|
-
ids = (ids || []).join(",")
|
55
|
-
run(:bulk_update, [], [action, ids, flavor, query, now.utc.to_f])
|
56
|
-
end
|
57
|
-
|
58
|
-
end
|
59
|
-
end
|
@@ -1,17 +0,0 @@
|
|
1
|
-
local queue, min_priority, max_priority, worker_name, now = unpack(ARGV)
|
2
|
-
local job_id = find_available(queue, 1, 0, min_priority, max_priority, now)[1]
|
3
|
-
|
4
|
-
if job_exists(job_id) then
|
5
|
-
-- update the job with locked_by and locked_at
|
6
|
-
redis.call('HMSET', Keys.job(job_id), 'locked_by', worker_name, 'locked_at', now)
|
7
|
-
|
8
|
-
-- add the job to the running_jobs set
|
9
|
-
redis.call('ZADD', Keys.running_jobs(), now, job_id)
|
10
|
-
-- remove the job from the pending jobs queue
|
11
|
-
redis.call('ZREM', Keys.queue(queue), job_id)
|
12
|
-
|
13
|
-
-- return the list of job attributes
|
14
|
-
return redis.call('HGETALL', Keys.job(job_id))
|
15
|
-
else
|
16
|
-
return {}
|
17
|
-
end
|
@@ -1,203 +0,0 @@
|
|
1
|
-
-- Keys holds the various functions to map to redis keys
|
2
|
-
-- These are duplicated from job.rb
|
3
|
-
local Keys = {}
|
4
|
-
|
5
|
-
Keys.job = function(id)
|
6
|
-
return "job/" .. id
|
7
|
-
end
|
8
|
-
|
9
|
-
Keys.running_jobs = function()
|
10
|
-
return "running_jobs"
|
11
|
-
end
|
12
|
-
|
13
|
-
Keys.failed_jobs = function()
|
14
|
-
return "failed_jobs"
|
15
|
-
end
|
16
|
-
|
17
|
-
Keys.queue = function(queue)
|
18
|
-
return "queue/" .. (queue or '')
|
19
|
-
end
|
20
|
-
|
21
|
-
Keys.future_queue = function(queue)
|
22
|
-
return Keys.queue(queue) .. "/future"
|
23
|
-
end
|
24
|
-
|
25
|
-
Keys.strand = function(strand_name)
|
26
|
-
if strand_name and string.len(strand_name) > 0 then
|
27
|
-
return "strand/" .. strand_name
|
28
|
-
else
|
29
|
-
return nil
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
Keys.tag_counts = function(flavor)
|
34
|
-
return "tag_counts/" .. flavor
|
35
|
-
end
|
36
|
-
|
37
|
-
Keys.tag = function(tag)
|
38
|
-
return "tag/" .. tag
|
39
|
-
end
|
40
|
-
|
41
|
-
Keys.waiting_strand_job_priority = function()
|
42
|
-
return 2000000
|
43
|
-
end
|
44
|
-
|
45
|
-
-- remove the given job from the various queues
|
46
|
-
local remove_from_queues = function(job_id, queue, strand)
|
47
|
-
local tag = unpack(redis.call('HMGET', Keys.job(job_id), 'tag'))
|
48
|
-
|
49
|
-
redis.call("SREM", Keys.tag(tag), job_id)
|
50
|
-
|
51
|
-
local current_delta = -redis.call('ZREM', Keys.queue(queue), job_id)
|
52
|
-
redis.call('ZREM', Keys.running_jobs(), job_id)
|
53
|
-
local future_delta = -redis.call('ZREM', Keys.future_queue(queue), job_id)
|
54
|
-
|
55
|
-
if current_delta ~= 0 then
|
56
|
-
redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
|
57
|
-
end
|
58
|
-
|
59
|
-
local total_delta = current_delta + future_delta
|
60
|
-
|
61
|
-
if total_delta ~= 0 then
|
62
|
-
redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
|
63
|
-
end
|
64
|
-
|
65
|
-
local strand_key = Keys.strand(strand)
|
66
|
-
if strand_key then
|
67
|
-
redis.call('LREM', strand_key, 1, job_id)
|
68
|
-
end
|
69
|
-
end
|
70
|
-
|
71
|
-
-- returns the id for the first job on the strand, or nil if none
|
72
|
-
local strand_next_job_id = function(strand)
|
73
|
-
local strand_key = Keys.strand(strand)
|
74
|
-
if not strand_key then return nil end
|
75
|
-
return redis.call('LRANGE', strand_key, 0, 0)[1]
|
76
|
-
end
|
77
|
-
|
78
|
-
-- returns next_in_strand -- whether this added job is at the front of the strand
|
79
|
-
local add_to_strand = function(job_id, strand)
|
80
|
-
local strand_key = Keys.strand(strand)
|
81
|
-
if not strand_key then return end
|
82
|
-
redis.call('RPUSH', strand_key, job_id) -- add to strand list
|
83
|
-
local next_id = strand_next_job_id(strand)
|
84
|
-
return next_id == job_id
|
85
|
-
end
|
86
|
-
|
87
|
-
-- add this given job to the correct queues based on its state and the current time
|
88
|
-
-- also updates the tag counts and tag job lists
|
89
|
-
local add_to_queues = function(job_id, queue, now)
|
90
|
-
local run_at, priority, tag, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'run_at', 'priority', 'tag', 'strand'))
|
91
|
-
|
92
|
-
redis.call("SADD", Keys.tag(tag), job_id)
|
93
|
-
|
94
|
-
if strand then
|
95
|
-
local next_job_id = strand_next_job_id(strand)
|
96
|
-
if next_job_id and next_job_id ~= job_id then
|
97
|
-
priority = Keys.waiting_strand_job_priority()
|
98
|
-
end
|
99
|
-
end
|
100
|
-
|
101
|
-
local current_delta = 0
|
102
|
-
local future_delta = 0
|
103
|
-
|
104
|
-
if run_at > now then
|
105
|
-
future_delta = future_delta + redis.call('ZADD', Keys.future_queue(queue), run_at, job_id)
|
106
|
-
current_delta = current_delta - redis.call('ZREM', Keys.queue(queue), job_id)
|
107
|
-
else
|
108
|
-
-- floor the run_at so we don't have a float in our float
|
109
|
-
local sort_key = priority .. '.' .. math.floor(run_at)
|
110
|
-
current_delta = current_delta + redis.call('ZADD', Keys.queue(queue), sort_key, job_id)
|
111
|
-
future_delta = future_delta - redis.call('ZREM', Keys.future_queue(queue), job_id)
|
112
|
-
end
|
113
|
-
|
114
|
-
if current_delta ~= 0 then
|
115
|
-
redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
|
116
|
-
end
|
117
|
-
|
118
|
-
local total_delta = current_delta + future_delta
|
119
|
-
|
120
|
-
if total_delta ~= 0 then
|
121
|
-
redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
|
122
|
-
end
|
123
|
-
end
|
124
|
-
|
125
|
-
local job_exists = function(job_id)
|
126
|
-
return job_id and redis.call('HGET', Keys.job(job_id), 'id')
|
127
|
-
end
|
128
|
-
|
129
|
-
-- find jobs available for running
|
130
|
-
-- checks the future queue too, and moves and now-ready jobs
|
131
|
-
-- into the current queue
|
132
|
-
local find_available = function(queue, limit, offset, min_priority, max_priority, now)
|
133
|
-
local ready_future_jobs = redis.call('ZRANGEBYSCORE', Keys.future_queue(queue), 0, now, 'limit', 0, limit)
|
134
|
-
for i, job_id in ipairs(ready_future_jobs) do
|
135
|
-
add_to_queues(job_id, queue, now)
|
136
|
-
end
|
137
|
-
|
138
|
-
if not min_priority or min_priority == '' then
|
139
|
-
min_priority = '0'
|
140
|
-
end
|
141
|
-
|
142
|
-
if not max_priority or max_priority == '' then
|
143
|
-
max_priority = "(" .. Keys.waiting_strand_job_priority()
|
144
|
-
else
|
145
|
-
max_priority = "(" .. (max_priority + 1)
|
146
|
-
end
|
147
|
-
local job_ids = redis.call('ZRANGEBYSCORE', Keys.queue(queue), min_priority, max_priority, 'limit', offset, limit)
|
148
|
-
for idx = table.getn(job_ids), 1, -1 do
|
149
|
-
local job_id = job_ids[idx]
|
150
|
-
if not job_exists(job_id) then
|
151
|
-
table.remove(job_ids, idx)
|
152
|
-
redis.call('ZREM', Keys.queue(queue), job_id)
|
153
|
-
end
|
154
|
-
end
|
155
|
-
return job_ids
|
156
|
-
end
|
157
|
-
|
158
|
-
-- "tickle" the strand, removing the given job_id and setting the job at the
|
159
|
-
-- front of the strand as eligible to run, if it's not already
|
160
|
-
local tickle_strand = function(job_id, strand, now)
|
161
|
-
local strand_key = Keys.strand(strand)
|
162
|
-
|
163
|
-
-- this LREM could be (relatively) slow if the strand is very large and this
|
164
|
-
-- job isn't near the front. however, in normal usage, we only delete from the
|
165
|
-
-- front. also the linked list is in memory, so even with thousands of jobs on
|
166
|
-
-- the strand it'll be quite fast.
|
167
|
-
--
|
168
|
-
-- alternatively we could make strands sorted sets, which would avoid a
|
169
|
-
-- linear search to delete this job. jobs need to be sorted on insertion
|
170
|
-
-- order though, and we're using GUIDs for keys here rather than an
|
171
|
-
-- incrementing integer, so we'd have to use an artificial counter as the
|
172
|
-
-- sort key (through `incrby strand_name` probably).
|
173
|
-
redis.call('LREM', strand_key, 1, job_id)
|
174
|
-
-- normally this loop will only run once, but we loop so that if there's any
|
175
|
-
-- job ids on the strand that don't actually exist anymore, we'll throw them
|
176
|
-
-- out and keep searching until we find a legit job or the strand is empty
|
177
|
-
while true do
|
178
|
-
local next_id = redis.call('LRANGE', strand_key, 0, 0)[1]
|
179
|
-
if next_id == nil then
|
180
|
-
break
|
181
|
-
elseif job_exists(next_id) then
|
182
|
-
-- technically jobs on the same strand can be in different queues,
|
183
|
-
-- though that functionality isn't currently used
|
184
|
-
local queue = redis.call('HGET', Keys.job(next_id), 'queue')
|
185
|
-
add_to_queues(next_id, queue, now)
|
186
|
-
break
|
187
|
-
else
|
188
|
-
redis.call('LPOP', strand_key)
|
189
|
-
end
|
190
|
-
end
|
191
|
-
end
|
192
|
-
|
193
|
-
local destroy_job = function(job_id, now)
|
194
|
-
local queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'strand'))
|
195
|
-
remove_from_queues(job_id, queue, strand)
|
196
|
-
|
197
|
-
if Keys.strand(strand) then
|
198
|
-
tickle_strand(job_id, strand, now)
|
199
|
-
end
|
200
|
-
|
201
|
-
redis.call('ZREM', Keys.failed_jobs(), job_id)
|
202
|
-
redis.call('DEL', Keys.job(job_id))
|
203
|
-
end
|
@@ -1,535 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
# This can't currently be made compatible with redis cluster, because the Lua functions
|
4
|
-
# access keys that aren't in their keys argument list (since they pop jobs off
|
5
|
-
# a queue and then update the job with that id).
|
6
|
-
|
7
|
-
# still TODO:
|
8
|
-
# * a consequence of our ignore-redis-failures code is that if redis is unavailable, creating delayed jobs silently fails, which is probably not what we want
|
9
|
-
# * need a way to migrate between jobs backends
|
10
|
-
# * we need some auditors:
|
11
|
-
# * fail jobs in running_jobs if they've timed out
|
12
|
-
# * have pools audit their workers and immediately fail jobs locked by dead workers (make sure this handles the restart case where two pools are running)
|
13
|
-
# * have a master auditor that fails jobs if a whole pool dies
|
14
|
-
# * audit strands ocasionally, look for any stuck strands where the strand queue isn't empty but there's no strand job running or queued
|
15
|
-
module Delayed::Backend::Redis
|
16
|
-
require 'delayed/backend/redis/functions'
|
17
|
-
|
18
|
-
class Job
|
19
|
-
extend ActiveModel::Callbacks
|
20
|
-
define_model_callbacks :create, :save
|
21
|
-
include ActiveModel::Dirty
|
22
|
-
include Delayed::Backend::Base
|
23
|
-
# This redis instance needs to be set by the application during jobs configuration
|
24
|
-
cattr_accessor :redis
|
25
|
-
|
26
|
-
# An overview of where and when things are stored in redis:
|
27
|
-
#
|
28
|
-
# Jobs are given a UUID for an id, rather than an incrementing integer id.
|
29
|
-
# The job attributes are then stored in a redis hash at job/<id>. Attribute
|
30
|
-
# values are generally stored as their json representation, except for
|
31
|
-
# timestamps, which as stored as floating point utc-time-since-unix-epoch
|
32
|
-
# values, so that we can compare timestamps in Lua without a date parser.
|
33
|
-
#
|
34
|
-
# Jobs that are schedule to run immediately (in the present/past) are
|
35
|
-
# inserted into the queue named queue/<queue_name>. The queue is a sorted
|
36
|
-
# set, with the value being the job id and the weight being a floating point
|
37
|
-
# value, <priority>.<run_at>. This formatting is key to efficient
|
38
|
-
# querying of the next job to run.
|
39
|
-
#
|
40
|
-
# Jobs that are scheduled to run in the future are not inserted into the
|
41
|
-
# queue, but rather a future queue named queue/<queue_name>/future. This
|
42
|
-
# queue is also a sorted set, with the value being the job id, but the weight
|
43
|
-
# is just the <run_at> value.
|
44
|
-
#
|
45
|
-
# If the job is on a strand, the flow is different. First, it's inserted into
|
46
|
-
# a list named strand/<strand>. When strand jobs are inserted into the
|
47
|
-
# current jobs queue, we check if they're next to run in the strand. If not,
|
48
|
-
# we give them a special priority that is greater than MAX_PRIORITY, so that
|
49
|
-
# they won't run. When a strand job is finished, failed or deleted,
|
50
|
-
# "tickle_strand" is called, which removes that job from the list and if that
|
51
|
-
# job was at the front of the list, changes the priority on the next job so
|
52
|
-
# that it's eligible to run.
|
53
|
-
#
|
54
|
-
# For singletons, the flow is the same as for other strand jobs, except that
|
55
|
-
# the job is thrown out if there are already any non-running jobs in the
|
56
|
-
# strand list.
|
57
|
-
#
|
58
|
-
# If a job fails, it's removed from the normal queues and inserted into the
|
59
|
-
# failed_jobs sorted set, with job id as the value and failure time as the
|
60
|
-
# key. The hash of job attributes is also renamed from job/<id> to
|
61
|
-
# failed_job/<id> -- use Delayed::Job::Failed to query those jobs, same as
|
62
|
-
# with AR jobs.
|
63
|
-
#
|
64
|
-
# We also insert into some other data structures for admin functionality.
|
65
|
-
# tag_counts/current and tag_counts/all are sorted sets storing the count of
|
66
|
-
# jobs for each tag. tag/<tag> is a set of existing job ids that have that tag.
|
67
|
-
#
|
68
|
-
# Most all of this happens in Lua functions, for atomicity. See the other
|
69
|
-
# files in this directory -- functions.rb is a wrapper to call the lua
|
70
|
-
# functions, and the individual functions are defined in .lua files in this
|
71
|
-
# directory.
|
72
|
-
|
73
|
-
# these key mappings are duplicated in the redis lua code, in include.lua
|
74
|
-
module Keys
|
75
|
-
RUNNING_JOBS = "running_jobs"
|
76
|
-
FAILED_JOBS = "failed_jobs"
|
77
|
-
JOB = proc { |id| "job/#{id}" }
|
78
|
-
FAILED_JOB = proc { |id| "failed_job/#{id}" }
|
79
|
-
QUEUE = proc { |name| "queue/#{name}" }
|
80
|
-
FUTURE_QUEUE = proc { |name| "#{QUEUE[name]}/future" }
|
81
|
-
STRAND = proc { |strand| strand ? "strand/#{strand}" : nil }
|
82
|
-
TAG_COUNTS = proc { |flavor| "tag_counts/#{flavor}" }
|
83
|
-
TAG = proc { |tag| "tag/#{tag}" }
|
84
|
-
end
|
85
|
-
|
86
|
-
WAITING_STRAND_JOB_PRIORITY = 2000000
|
87
|
-
if WAITING_STRAND_JOB_PRIORITY <= Delayed::MAX_PRIORITY
|
88
|
-
# if you change this, note that the value is duplicated in include.lua
|
89
|
-
raise("Delayed::MAX_PRIORITY must be less than #{WAITING_STRAND_JOB_PRIORITY}")
|
90
|
-
end
|
91
|
-
|
92
|
-
COLUMNS = []
|
93
|
-
|
94
|
-
# We store time attributes in redis as floats so we don't have to do
|
95
|
-
# timestamp parsing in lua.
|
96
|
-
TIMESTAMP_COLUMNS = []
|
97
|
-
INTEGER_COLUMNS = []
|
98
|
-
|
99
|
-
def self.column(name, type)
|
100
|
-
COLUMNS << name
|
101
|
-
|
102
|
-
if type == :timestamp
|
103
|
-
TIMESTAMP_COLUMNS << name
|
104
|
-
elsif type == :integer
|
105
|
-
INTEGER_COLUMNS << name
|
106
|
-
end
|
107
|
-
|
108
|
-
attr_reader(name)
|
109
|
-
define_attribute_methods([name])
|
110
|
-
# Custom attr_writer that updates the dirty status.
|
111
|
-
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
|
112
|
-
def #{name}=(new_value)
|
113
|
-
#{name}_will_change! unless new_value == self.#{name}
|
114
|
-
@#{name} = new_value
|
115
|
-
end
|
116
|
-
EOS
|
117
|
-
end
|
118
|
-
|
119
|
-
column(:id, :string)
|
120
|
-
column(:priority, :integer)
|
121
|
-
column(:attempts, :integer)
|
122
|
-
column(:handler, :string)
|
123
|
-
column(:last_error, :string)
|
124
|
-
column(:queue, :string)
|
125
|
-
column(:run_at, :timestamp)
|
126
|
-
column(:locked_at, :timestamp)
|
127
|
-
column(:failed_at, :timestamp)
|
128
|
-
column(:locked_by, :string)
|
129
|
-
column(:created_at, :timestamp)
|
130
|
-
column(:updated_at, :timestamp)
|
131
|
-
column(:tag, :string)
|
132
|
-
column(:max_attempts, :integer)
|
133
|
-
column(:strand, :string)
|
134
|
-
column(:source, :string)
|
135
|
-
column(:expires_at, :timestamp)
|
136
|
-
|
137
|
-
def initialize(attrs = {})
|
138
|
-
attrs.each { |k, v| self.send("#{k}=", v) }
|
139
|
-
self.priority ||= 0
|
140
|
-
self.attempts ||= 0
|
141
|
-
@new_record = true
|
142
|
-
end
|
143
|
-
|
144
|
-
def self.instantiate(attrs)
|
145
|
-
result = new(attrs)
|
146
|
-
result.instance_variable_set(:@new_record, false)
|
147
|
-
result
|
148
|
-
end
|
149
|
-
|
150
|
-
def self.create(attrs = {})
|
151
|
-
result = new(attrs)
|
152
|
-
result.save
|
153
|
-
result
|
154
|
-
end
|
155
|
-
|
156
|
-
def self.create!(attrs = {})
|
157
|
-
result = new(attrs)
|
158
|
-
result.save!
|
159
|
-
result
|
160
|
-
end
|
161
|
-
|
162
|
-
def [](key)
|
163
|
-
send(key)
|
164
|
-
end
|
165
|
-
|
166
|
-
def []=(key, value)
|
167
|
-
send("#{key}=", value)
|
168
|
-
end
|
169
|
-
|
170
|
-
def self.find(ids)
|
171
|
-
if Array === ids
|
172
|
-
find_some(ids, {})
|
173
|
-
else
|
174
|
-
find_one(ids, {})
|
175
|
-
end
|
176
|
-
end
|
177
|
-
|
178
|
-
def new_record?
|
179
|
-
!!@new_record
|
180
|
-
end
|
181
|
-
|
182
|
-
def destroyed?
|
183
|
-
!!@destroyed
|
184
|
-
end
|
185
|
-
|
186
|
-
def ==(other)
|
187
|
-
other.is_a?(self.class) && id == other.id
|
188
|
-
end
|
189
|
-
|
190
|
-
def hash
|
191
|
-
id.hash
|
192
|
-
end
|
193
|
-
|
194
|
-
def self.reconnect!
|
195
|
-
# redis cluster responds to reconnect directly,
|
196
|
-
# but individual redis needs it to be called on client
|
197
|
-
redis.respond_to?(:reconnect) ?
|
198
|
-
redis.reconnect :
|
199
|
-
redis.client.reconnect
|
200
|
-
end
|
201
|
-
|
202
|
-
def self.functions
|
203
|
-
@@functions ||= Delayed::Backend::Redis::Functions.new(redis)
|
204
|
-
end
|
205
|
-
|
206
|
-
def self.find_one(id, options)
|
207
|
-
job = self.get_with_ids([id]).first
|
208
|
-
job || raise(ActiveRecord::RecordNotFound, "Couldn't find Job with ID=#{id}")
|
209
|
-
end
|
210
|
-
|
211
|
-
def self.find_some(ids, options)
|
212
|
-
self.get_with_ids(ids).compact
|
213
|
-
end
|
214
|
-
|
215
|
-
def self.get_with_ids(ids)
|
216
|
-
ids.map { |id| self.instantiate_from_attrs(redis.hgetall(key_for_job_id(id))) }
|
217
|
-
end
|
218
|
-
|
219
|
-
def self.key_for_job_id(job_id)
|
220
|
-
Keys::JOB[job_id]
|
221
|
-
end
|
222
|
-
|
223
|
-
def self.get_and_lock_next_available(worker_name,
|
224
|
-
queue = Delayed::Settings.queue,
|
225
|
-
min_priority = Delayed::MIN_PRIORITY,
|
226
|
-
max_priority = Delayed::MAX_PRIORITY,
|
227
|
-
prefetch: nil,
|
228
|
-
prefetch_owner: nil,
|
229
|
-
forced_latency: nil)
|
230
|
-
|
231
|
-
check_queue(queue)
|
232
|
-
check_priorities(min_priority, max_priority)
|
233
|
-
if worker_name.is_a?(Array)
|
234
|
-
multiple_workers = true
|
235
|
-
worker_name = worker_name.first
|
236
|
-
end
|
237
|
-
|
238
|
-
# as an optimization this lua function returns the hash of job attributes,
|
239
|
-
# rather than just a job id, saving a round trip
|
240
|
-
now = db_time_now
|
241
|
-
now -= forced_latency if forced_latency
|
242
|
-
job_attrs = functions.get_and_lock_next_available(worker_name, queue, min_priority, max_priority, now)
|
243
|
-
job = instantiate_from_attrs(job_attrs) # will return nil if the attrs are blank
|
244
|
-
if multiple_workers
|
245
|
-
if job.nil?
|
246
|
-
job = {}
|
247
|
-
else
|
248
|
-
job = { worker_name => job }
|
249
|
-
end
|
250
|
-
end
|
251
|
-
job
|
252
|
-
end
|
253
|
-
|
254
|
-
def self.find_available(limit,
|
255
|
-
queue = Delayed::Settings.queue,
|
256
|
-
min_priority = Delayed::MIN_PRIORITY,
|
257
|
-
max_priority = Delayed::MAX_PRIORITY)
|
258
|
-
|
259
|
-
check_queue(queue)
|
260
|
-
check_priorities(min_priority, max_priority)
|
261
|
-
|
262
|
-
self.find(functions.find_available(queue, limit, 0, min_priority, max_priority, db_time_now))
|
263
|
-
end
|
264
|
-
|
265
|
-
# get a list of jobs of the given flavor in the given queue
|
266
|
-
# flavor is :current, :future, :failed, :strand or :tag
|
267
|
-
# depending on the flavor, query has a different meaning:
|
268
|
-
# for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
|
269
|
-
# for :strand it's the strand name
|
270
|
-
# for :tag it's the tag name
|
271
|
-
# for :failed it's ignored
|
272
|
-
def self.list_jobs(flavor,
|
273
|
-
limit,
|
274
|
-
offset = 0,
|
275
|
-
query = nil)
|
276
|
-
case flavor.to_s
|
277
|
-
when 'current'
|
278
|
-
query ||= Delayed::Settings.queue
|
279
|
-
check_queue(query)
|
280
|
-
self.find(functions.find_available(query, limit, offset, 0, "+inf", db_time_now))
|
281
|
-
when 'future'
|
282
|
-
query ||= Delayed::Settings.queue
|
283
|
-
check_queue(query)
|
284
|
-
self.find(redis.zrangebyscore(Keys::FUTURE_QUEUE[query], 0, "+inf", :limit => [offset, limit]))
|
285
|
-
when 'failed'
|
286
|
-
Failed.find(redis.zrevrangebyscore(Keys::FAILED_JOBS, "+inf", 0, :limit => [offset, limit]))
|
287
|
-
when 'strand'
|
288
|
-
self.find(redis.lrange(Keys::STRAND[query], offset, offset + limit - 1))
|
289
|
-
when 'tag'
|
290
|
-
# This is optimized for writing, since list_jobs(:tag) will only ever happen in the admin UI
|
291
|
-
ids = redis.smembers(Keys::TAG[query])
|
292
|
-
self.find(ids[offset, limit])
|
293
|
-
else
|
294
|
-
raise ArgumentError, "invalid flavor: #{flavor.inspect}"
|
295
|
-
end
|
296
|
-
end
|
297
|
-
|
298
|
-
# get the total job count for the given flavor
|
299
|
-
# flavor is :current, :future or :failed
|
300
|
-
# for the :failed flavor, queue is currently ignored
|
301
|
-
def self.jobs_count(flavor,
|
302
|
-
queue = Delayed::Settings.queue)
|
303
|
-
case flavor.to_s
|
304
|
-
when 'current'
|
305
|
-
check_queue(queue)
|
306
|
-
redis.zcard(Keys::QUEUE[queue])
|
307
|
-
when 'future'
|
308
|
-
check_queue(queue)
|
309
|
-
redis.zcard(Keys::FUTURE_QUEUE[queue])
|
310
|
-
when 'failed'
|
311
|
-
redis.zcard(Keys::FAILED_JOBS)
|
312
|
-
else
|
313
|
-
raise ArgumentError, "invalid flavor: #{flavor.inspect}"
|
314
|
-
end
|
315
|
-
end
|
316
|
-
|
317
|
-
def self.strand_size(strand)
|
318
|
-
redis.llen(Keys::STRAND[strand])
|
319
|
-
end
|
320
|
-
|
321
|
-
def self.running_jobs()
|
322
|
-
self.find(redis.zrangebyscore(Keys::RUNNING_JOBS, 0, "+inf"))
|
323
|
-
end
|
324
|
-
|
325
|
-
def self.clear_locks!(worker_name)
|
326
|
-
self.running_jobs.each do |job|
|
327
|
-
# TODO: mark the job as failed one attempt
|
328
|
-
job.unlock! if job.locked_by == worker_name
|
329
|
-
end
|
330
|
-
nil
|
331
|
-
end
|
332
|
-
|
333
|
-
# returns a list of hashes { :tag => tag_name, :count => current_count }
|
334
|
-
# in descending count order
|
335
|
-
# flavor is :current or :all
|
336
|
-
def self.tag_counts(flavor,
|
337
|
-
limit,
|
338
|
-
offset = 0)
|
339
|
-
raise(ArgumentError, "invalid flavor: #{flavor.inspect}") unless %w(current all).include?(flavor.to_s)
|
340
|
-
key = Keys::TAG_COUNTS[flavor]
|
341
|
-
redis.zrevrangebyscore(key, '+inf', 1, :limit => [offset, limit], :withscores => true).map { |tag, count| { :tag => tag, :count => count } }
|
342
|
-
end
|
343
|
-
|
344
|
-
# perform a bulk update of a set of jobs
|
345
|
-
# action is :hold, :unhold, or :destroy
|
346
|
-
# to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
|
347
|
-
# or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
|
348
|
-
#
|
349
|
-
# see the list_jobs action for the list of available flavors and the meaning
|
350
|
-
# of opts[:query] for each
|
351
|
-
def self.bulk_update(action, opts)
|
352
|
-
if %w(current future).include?(opts[:flavor].to_s)
|
353
|
-
opts[:query] ||= Delayed::Settings.queue
|
354
|
-
end
|
355
|
-
functions.bulk_update(action, opts[:ids], opts[:flavor], opts[:query], db_time_now)
|
356
|
-
end
|
357
|
-
|
358
|
-
def self.create_singleton(options)
|
359
|
-
self.create!(options.merge(:singleton => true))
|
360
|
-
end
|
361
|
-
|
362
|
-
def self.unlock(jobs)
|
363
|
-
jobs.each(&:unlock!)
|
364
|
-
jobs.length
|
365
|
-
end
|
366
|
-
|
367
|
-
# not saved, just used as a marker when creating
|
368
|
-
attr_accessor :singleton, :on_conflict
|
369
|
-
|
370
|
-
def transfer_lock!(from:, to:)
|
371
|
-
lock_in_redis!(to)
|
372
|
-
end
|
373
|
-
|
374
|
-
def lock_in_redis!(worker_name)
|
375
|
-
self.locked_at = self.class.db_time_now
|
376
|
-
self.locked_by = worker_name
|
377
|
-
save
|
378
|
-
end
|
379
|
-
|
380
|
-
def unlock!
|
381
|
-
unlock
|
382
|
-
save!
|
383
|
-
end
|
384
|
-
|
385
|
-
def save(*a)
|
386
|
-
return false if destroyed?
|
387
|
-
result = run_callbacks(:save) do
|
388
|
-
if new_record?
|
389
|
-
run_callbacks(:create) { create }
|
390
|
-
else
|
391
|
-
update
|
392
|
-
end
|
393
|
-
end
|
394
|
-
changes_applied
|
395
|
-
result
|
396
|
-
end
|
397
|
-
|
398
|
-
if Rails.version < "4.1"
|
399
|
-
def changes_applied
|
400
|
-
@previously_changed = changes
|
401
|
-
@changed_attributes.clear
|
402
|
-
end
|
403
|
-
end
|
404
|
-
|
405
|
-
def save!(*a)
|
406
|
-
save(*a) || raise(RecordNotSaved)
|
407
|
-
end
|
408
|
-
|
409
|
-
def destroy
|
410
|
-
self.class.functions.destroy_job(id, self.class.db_time_now)
|
411
|
-
@destroyed = true
|
412
|
-
freeze
|
413
|
-
end
|
414
|
-
|
415
|
-
# take this job off the strand, and queue up the next strand job if this job
|
416
|
-
# was at the front
|
417
|
-
def tickle_strand
|
418
|
-
if strand.present?
|
419
|
-
self.class.functions.tickle_strand(id, strand, self.class.db_time_now)
|
420
|
-
end
|
421
|
-
end
|
422
|
-
|
423
|
-
def create_and_lock!(worker_name)
|
424
|
-
raise "job already exists" unless new_record?
|
425
|
-
lock_in_redis!(worker_name)
|
426
|
-
end
|
427
|
-
|
428
|
-
def fail!
|
429
|
-
self.failed_at = self.class.db_time_now
|
430
|
-
save!
|
431
|
-
redis.rename Keys::JOB[id], Keys::FAILED_JOB[id]
|
432
|
-
tickle_strand
|
433
|
-
self
|
434
|
-
end
|
435
|
-
|
436
|
-
protected
|
437
|
-
|
438
|
-
def update_queues
|
439
|
-
if failed_at
|
440
|
-
self.class.functions.fail_job(id)
|
441
|
-
elsif locked_at
|
442
|
-
self.class.functions.set_running(id)
|
443
|
-
elsif singleton
|
444
|
-
job_id = self.class.functions.create_singleton(id, queue, strand, self.class.db_time_now)
|
445
|
-
# if create_singleton returns a different job id, that means this job got
|
446
|
-
# deleted because there was already that other job on the strand. so
|
447
|
-
# replace this job with the other for returning.
|
448
|
-
if job_id != self.id
|
449
|
-
singleton = self.class.find(job_id)
|
450
|
-
|
451
|
-
self.on_conflict ||= :use_earliest
|
452
|
-
singleton.run_at =
|
453
|
-
case self.on_conflict
|
454
|
-
when :use_earliest
|
455
|
-
[singleton.run_at, run_at].min
|
456
|
-
when :overwrite
|
457
|
-
run_at
|
458
|
-
when :loose
|
459
|
-
singleton.run_at
|
460
|
-
end
|
461
|
-
singleton.handler = self.handler if self.on_conflict == :overwrite
|
462
|
-
singleton.save! if singleton.changed?
|
463
|
-
COLUMNS.each { |c| send("#{c}=", singleton.send(c)) }
|
464
|
-
end
|
465
|
-
else
|
466
|
-
self.class.functions.enqueue(id, queue, strand, self.class.db_time_now)
|
467
|
-
end
|
468
|
-
end
|
469
|
-
|
470
|
-
def create
|
471
|
-
self.id ||= SecureRandom.hex(16)
|
472
|
-
self.created_at = self.updated_at = Time.now.utc
|
473
|
-
save_job_to_redis
|
474
|
-
update_queues
|
475
|
-
|
476
|
-
@new_record = false
|
477
|
-
self.id
|
478
|
-
end
|
479
|
-
|
480
|
-
def update
|
481
|
-
self.updated_at = Time.now.utc
|
482
|
-
save_job_to_redis
|
483
|
-
update_queues
|
484
|
-
true
|
485
|
-
end
|
486
|
-
|
487
|
-
def queue_score
|
488
|
-
"#{priority}.#{run_at.to_i}".to_f
|
489
|
-
end
|
490
|
-
|
491
|
-
def save_job_to_redis
|
492
|
-
to_delete = []
|
493
|
-
attrs = {}
|
494
|
-
COLUMNS.each do |k|
|
495
|
-
v = send(k)
|
496
|
-
if v.nil?
|
497
|
-
to_delete << k if !new_record? && changed.include?(k.to_s)
|
498
|
-
elsif v.is_a?(ActiveSupport::TimeWithZone) || v.is_a?(Time)
|
499
|
-
attrs[k] = v.utc.to_f
|
500
|
-
else
|
501
|
-
attrs[k] = v.as_json
|
502
|
-
end
|
503
|
-
end
|
504
|
-
key = Keys::JOB[id]
|
505
|
-
redis.mapped_hmset(key, attrs)
|
506
|
-
redis.hdel(key, to_delete) unless to_delete.empty?
|
507
|
-
end
|
508
|
-
|
509
|
-
def self.instantiate_from_attrs(redis_attrs)
|
510
|
-
if redis_attrs['id'].present?
|
511
|
-
attrs = redis_attrs.with_indifferent_access
|
512
|
-
TIMESTAMP_COLUMNS.each { |k| attrs[k] = Time.zone.at(attrs[k].to_f) if attrs[k] }
|
513
|
-
INTEGER_COLUMNS.each { |k| attrs[k] = attrs[k].to_i if attrs[k] }
|
514
|
-
instantiate(attrs)
|
515
|
-
else
|
516
|
-
nil
|
517
|
-
end
|
518
|
-
end
|
519
|
-
|
520
|
-
def global_id
|
521
|
-
id
|
522
|
-
end
|
523
|
-
|
524
|
-
class Failed < Job
|
525
|
-
include Delayed::Backend::Base
|
526
|
-
def self.key_for_job_id(job_id)
|
527
|
-
Keys::FAILED_JOB[job_id]
|
528
|
-
end
|
529
|
-
|
530
|
-
def original_job_id
|
531
|
-
id
|
532
|
-
end
|
533
|
-
end
|
534
|
-
end
|
535
|
-
end
|