inst-jobs 2.3.2 → 2.4.2
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +12 -0
- data/db/migrate/20210812210128_add_singleton_column.rb +203 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +182 -148
- data/lib/delayed/backend/base.rb +79 -74
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -24
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +42 -51
- data/lib/delayed/performable_method.rb +5 -7
- data/lib/delayed/periodic.rb +66 -65
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +198 -193
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +93 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -18
- data/lib/delayed/work_queue/parent_process/client.rb +54 -55
- data/lib/delayed/work_queue/parent_process/server.rb +215 -209
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +21 -19
- data/lib/delayed/worker/health_check.rb +21 -12
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +271 -265
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -38
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +128 -135
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +8 -8
- data/spec/delayed/message_sending_spec.rb +16 -9
- data/spec/delayed/periodic_spec.rb +13 -12
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -7
- data/spec/delayed/work_queue/parent_process/client_spec.rb +15 -11
- data/spec/delayed/work_queue/parent_process/server_spec.rb +43 -40
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -21
- data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
- data/spec/delayed/worker/health_check_spec.rb +51 -49
- data/spec/delayed/worker_spec.rb +28 -25
- data/spec/gemfiles/52.gemfile +5 -3
- data/spec/gemfiles/52.gemfile.lock +240 -0
- data/spec/gemfiles/60.gemfile +5 -3
- data/spec/gemfiles/60.gemfile.lock +1 -1
- data/spec/gemfiles/61.gemfile +5 -3
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +534 -441
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +155 -147
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +43 -40
- metadata +75 -56
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -528
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/redis_job_spec.rb +0 -148
@@ -1,29 +0,0 @@
|
|
1
|
-
local job_id, queue, strand, now, for_singleton = unpack(ARGV)
|
2
|
-
local strand_key = Keys.strand(strand)
|
3
|
-
|
4
|
-
-- if this is a singleton job, only queue it up if another doesn't exist on the strand
|
5
|
-
-- otherwise, delete it and return the other job id
|
6
|
-
if for_singleton then
|
7
|
-
local job_ids = redis.call('LRANGE', strand_key, 0, 1)
|
8
|
-
local job_to_check = 1
|
9
|
-
if job_exists(job_ids[1]) and redis.call('HGET', Keys.job(job_ids[1]), 'locked_at') then
|
10
|
-
job_to_check = 2
|
11
|
-
end
|
12
|
-
|
13
|
-
local job_to_check_id = job_ids[job_to_check]
|
14
|
-
if job_exists(job_to_check_id) then
|
15
|
-
-- delete the new job, we found a match
|
16
|
-
redis.call('DEL', Keys.job(job_id))
|
17
|
-
return job_to_check_id
|
18
|
-
end
|
19
|
-
end
|
20
|
-
|
21
|
-
-- if this job is in a strand, add it to the strand queue first
|
22
|
-
-- if it's not at the front of the strand, we won't enqueue it below
|
23
|
-
if strand_key then
|
24
|
-
add_to_strand(job_id, strand)
|
25
|
-
end
|
26
|
-
|
27
|
-
add_to_queues(job_id, queue, now)
|
28
|
-
|
29
|
-
return job_id
|
@@ -1,59 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
require 'redis/scripting'
|
4
|
-
|
5
|
-
# This module handles loading the Lua functions into Redis and running them
|
6
|
-
module Delayed::Backend::Redis
|
7
|
-
class Functions < ::Redis::Scripting::Module
|
8
|
-
def initialize(redis)
|
9
|
-
super(redis, File.dirname(__FILE__))
|
10
|
-
end
|
11
|
-
|
12
|
-
def run_script(script, keys, argv)
|
13
|
-
result = nil
|
14
|
-
ms = Benchmark.ms { result = super }
|
15
|
-
line = 'Redis Jobs Timing: %s (%.1fms)' % [script.name, ms]
|
16
|
-
ActiveRecord::Base.logger.debug(line)
|
17
|
-
result
|
18
|
-
end
|
19
|
-
|
20
|
-
def find_available(queue, limit, offset, min_priority, max_priority, now)
|
21
|
-
run(:find_available, [], [queue, limit, offset, min_priority, max_priority, now.utc.to_f])
|
22
|
-
end
|
23
|
-
|
24
|
-
def get_and_lock_next_available(worker_name, queue, min_priority, max_priority, now)
|
25
|
-
attrs = run(:get_and_lock_next_available, [], [queue, min_priority, max_priority, worker_name, now.utc.to_f])
|
26
|
-
Hash[*attrs]
|
27
|
-
end
|
28
|
-
|
29
|
-
def enqueue(job_id, queue, strand, now)
|
30
|
-
run(:enqueue, [], [job_id, queue, strand, now.utc.to_f])
|
31
|
-
end
|
32
|
-
|
33
|
-
def create_singleton(job_id, queue, strand, now)
|
34
|
-
run(:enqueue, [], [job_id, queue, strand, now.utc.to_f, true])
|
35
|
-
end
|
36
|
-
|
37
|
-
def destroy_job(job_id, now)
|
38
|
-
run(:destroy_job, [], [job_id, now.utc.to_f])
|
39
|
-
end
|
40
|
-
|
41
|
-
def tickle_strand(job_id, strand, now)
|
42
|
-
run(:tickle_strand, [], [job_id, strand, now.utc.to_f])
|
43
|
-
end
|
44
|
-
|
45
|
-
def fail_job(job_id)
|
46
|
-
run(:fail_job, [], [job_id])
|
47
|
-
end
|
48
|
-
|
49
|
-
def set_running(job_id)
|
50
|
-
run(:set_running, [], [job_id])
|
51
|
-
end
|
52
|
-
|
53
|
-
def bulk_update(action, ids, flavor, query, now)
|
54
|
-
ids = (ids || []).join(",")
|
55
|
-
run(:bulk_update, [], [action, ids, flavor, query, now.utc.to_f])
|
56
|
-
end
|
57
|
-
|
58
|
-
end
|
59
|
-
end
|
@@ -1,17 +0,0 @@
|
|
1
|
-
local queue, min_priority, max_priority, worker_name, now = unpack(ARGV)
|
2
|
-
local job_id = find_available(queue, 1, 0, min_priority, max_priority, now)[1]
|
3
|
-
|
4
|
-
if job_exists(job_id) then
|
5
|
-
-- update the job with locked_by and locked_at
|
6
|
-
redis.call('HMSET', Keys.job(job_id), 'locked_by', worker_name, 'locked_at', now)
|
7
|
-
|
8
|
-
-- add the job to the running_jobs set
|
9
|
-
redis.call('ZADD', Keys.running_jobs(), now, job_id)
|
10
|
-
-- remove the job from the pending jobs queue
|
11
|
-
redis.call('ZREM', Keys.queue(queue), job_id)
|
12
|
-
|
13
|
-
-- return the list of job attributes
|
14
|
-
return redis.call('HGETALL', Keys.job(job_id))
|
15
|
-
else
|
16
|
-
return {}
|
17
|
-
end
|
@@ -1,203 +0,0 @@
|
|
1
|
-
-- Keys holds the various functions to map to redis keys
|
2
|
-
-- These are duplicated from job.rb
|
3
|
-
local Keys = {}
|
4
|
-
|
5
|
-
Keys.job = function(id)
|
6
|
-
return "job/" .. id
|
7
|
-
end
|
8
|
-
|
9
|
-
Keys.running_jobs = function()
|
10
|
-
return "running_jobs"
|
11
|
-
end
|
12
|
-
|
13
|
-
Keys.failed_jobs = function()
|
14
|
-
return "failed_jobs"
|
15
|
-
end
|
16
|
-
|
17
|
-
Keys.queue = function(queue)
|
18
|
-
return "queue/" .. (queue or '')
|
19
|
-
end
|
20
|
-
|
21
|
-
Keys.future_queue = function(queue)
|
22
|
-
return Keys.queue(queue) .. "/future"
|
23
|
-
end
|
24
|
-
|
25
|
-
Keys.strand = function(strand_name)
|
26
|
-
if strand_name and string.len(strand_name) > 0 then
|
27
|
-
return "strand/" .. strand_name
|
28
|
-
else
|
29
|
-
return nil
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
Keys.tag_counts = function(flavor)
|
34
|
-
return "tag_counts/" .. flavor
|
35
|
-
end
|
36
|
-
|
37
|
-
Keys.tag = function(tag)
|
38
|
-
return "tag/" .. tag
|
39
|
-
end
|
40
|
-
|
41
|
-
Keys.waiting_strand_job_priority = function()
|
42
|
-
return 2000000
|
43
|
-
end
|
44
|
-
|
45
|
-
-- remove the given job from the various queues
|
46
|
-
local remove_from_queues = function(job_id, queue, strand)
|
47
|
-
local tag = unpack(redis.call('HMGET', Keys.job(job_id), 'tag'))
|
48
|
-
|
49
|
-
redis.call("SREM", Keys.tag(tag), job_id)
|
50
|
-
|
51
|
-
local current_delta = -redis.call('ZREM', Keys.queue(queue), job_id)
|
52
|
-
redis.call('ZREM', Keys.running_jobs(), job_id)
|
53
|
-
local future_delta = -redis.call('ZREM', Keys.future_queue(queue), job_id)
|
54
|
-
|
55
|
-
if current_delta ~= 0 then
|
56
|
-
redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
|
57
|
-
end
|
58
|
-
|
59
|
-
local total_delta = current_delta + future_delta
|
60
|
-
|
61
|
-
if total_delta ~= 0 then
|
62
|
-
redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
|
63
|
-
end
|
64
|
-
|
65
|
-
local strand_key = Keys.strand(strand)
|
66
|
-
if strand_key then
|
67
|
-
redis.call('LREM', strand_key, 1, job_id)
|
68
|
-
end
|
69
|
-
end
|
70
|
-
|
71
|
-
-- returns the id for the first job on the strand, or nil if none
|
72
|
-
local strand_next_job_id = function(strand)
|
73
|
-
local strand_key = Keys.strand(strand)
|
74
|
-
if not strand_key then return nil end
|
75
|
-
return redis.call('LRANGE', strand_key, 0, 0)[1]
|
76
|
-
end
|
77
|
-
|
78
|
-
-- returns next_in_strand -- whether this added job is at the front of the strand
|
79
|
-
local add_to_strand = function(job_id, strand)
|
80
|
-
local strand_key = Keys.strand(strand)
|
81
|
-
if not strand_key then return end
|
82
|
-
redis.call('RPUSH', strand_key, job_id) -- add to strand list
|
83
|
-
local next_id = strand_next_job_id(strand)
|
84
|
-
return next_id == job_id
|
85
|
-
end
|
86
|
-
|
87
|
-
-- add this given job to the correct queues based on its state and the current time
|
88
|
-
-- also updates the tag counts and tag job lists
|
89
|
-
local add_to_queues = function(job_id, queue, now)
|
90
|
-
local run_at, priority, tag, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'run_at', 'priority', 'tag', 'strand'))
|
91
|
-
|
92
|
-
redis.call("SADD", Keys.tag(tag), job_id)
|
93
|
-
|
94
|
-
if strand then
|
95
|
-
local next_job_id = strand_next_job_id(strand)
|
96
|
-
if next_job_id and next_job_id ~= job_id then
|
97
|
-
priority = Keys.waiting_strand_job_priority()
|
98
|
-
end
|
99
|
-
end
|
100
|
-
|
101
|
-
local current_delta = 0
|
102
|
-
local future_delta = 0
|
103
|
-
|
104
|
-
if run_at > now then
|
105
|
-
future_delta = future_delta + redis.call('ZADD', Keys.future_queue(queue), run_at, job_id)
|
106
|
-
current_delta = current_delta - redis.call('ZREM', Keys.queue(queue), job_id)
|
107
|
-
else
|
108
|
-
-- floor the run_at so we don't have a float in our float
|
109
|
-
local sort_key = priority .. '.' .. math.floor(run_at)
|
110
|
-
current_delta = current_delta + redis.call('ZADD', Keys.queue(queue), sort_key, job_id)
|
111
|
-
future_delta = future_delta - redis.call('ZREM', Keys.future_queue(queue), job_id)
|
112
|
-
end
|
113
|
-
|
114
|
-
if current_delta ~= 0 then
|
115
|
-
redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
|
116
|
-
end
|
117
|
-
|
118
|
-
local total_delta = current_delta + future_delta
|
119
|
-
|
120
|
-
if total_delta ~= 0 then
|
121
|
-
redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
|
122
|
-
end
|
123
|
-
end
|
124
|
-
|
125
|
-
local job_exists = function(job_id)
|
126
|
-
return job_id and redis.call('HGET', Keys.job(job_id), 'id')
|
127
|
-
end
|
128
|
-
|
129
|
-
-- find jobs available for running
|
130
|
-
-- checks the future queue too, and moves and now-ready jobs
|
131
|
-
-- into the current queue
|
132
|
-
local find_available = function(queue, limit, offset, min_priority, max_priority, now)
|
133
|
-
local ready_future_jobs = redis.call('ZRANGEBYSCORE', Keys.future_queue(queue), 0, now, 'limit', 0, limit)
|
134
|
-
for i, job_id in ipairs(ready_future_jobs) do
|
135
|
-
add_to_queues(job_id, queue, now)
|
136
|
-
end
|
137
|
-
|
138
|
-
if not min_priority or min_priority == '' then
|
139
|
-
min_priority = '0'
|
140
|
-
end
|
141
|
-
|
142
|
-
if not max_priority or max_priority == '' then
|
143
|
-
max_priority = "(" .. Keys.waiting_strand_job_priority()
|
144
|
-
else
|
145
|
-
max_priority = "(" .. (max_priority + 1)
|
146
|
-
end
|
147
|
-
local job_ids = redis.call('ZRANGEBYSCORE', Keys.queue(queue), min_priority, max_priority, 'limit', offset, limit)
|
148
|
-
for idx = table.getn(job_ids), 1, -1 do
|
149
|
-
local job_id = job_ids[idx]
|
150
|
-
if not job_exists(job_id) then
|
151
|
-
table.remove(job_ids, idx)
|
152
|
-
redis.call('ZREM', Keys.queue(queue), job_id)
|
153
|
-
end
|
154
|
-
end
|
155
|
-
return job_ids
|
156
|
-
end
|
157
|
-
|
158
|
-
-- "tickle" the strand, removing the given job_id and setting the job at the
|
159
|
-
-- front of the strand as eligible to run, if it's not already
|
160
|
-
local tickle_strand = function(job_id, strand, now)
|
161
|
-
local strand_key = Keys.strand(strand)
|
162
|
-
|
163
|
-
-- this LREM could be (relatively) slow if the strand is very large and this
|
164
|
-
-- job isn't near the front. however, in normal usage, we only delete from the
|
165
|
-
-- front. also the linked list is in memory, so even with thousands of jobs on
|
166
|
-
-- the strand it'll be quite fast.
|
167
|
-
--
|
168
|
-
-- alternatively we could make strands sorted sets, which would avoid a
|
169
|
-
-- linear search to delete this job. jobs need to be sorted on insertion
|
170
|
-
-- order though, and we're using GUIDs for keys here rather than an
|
171
|
-
-- incrementing integer, so we'd have to use an artificial counter as the
|
172
|
-
-- sort key (through `incrby strand_name` probably).
|
173
|
-
redis.call('LREM', strand_key, 1, job_id)
|
174
|
-
-- normally this loop will only run once, but we loop so that if there's any
|
175
|
-
-- job ids on the strand that don't actually exist anymore, we'll throw them
|
176
|
-
-- out and keep searching until we find a legit job or the strand is empty
|
177
|
-
while true do
|
178
|
-
local next_id = redis.call('LRANGE', strand_key, 0, 0)[1]
|
179
|
-
if next_id == nil then
|
180
|
-
break
|
181
|
-
elseif job_exists(next_id) then
|
182
|
-
-- technically jobs on the same strand can be in different queues,
|
183
|
-
-- though that functionality isn't currently used
|
184
|
-
local queue = redis.call('HGET', Keys.job(next_id), 'queue')
|
185
|
-
add_to_queues(next_id, queue, now)
|
186
|
-
break
|
187
|
-
else
|
188
|
-
redis.call('LPOP', strand_key)
|
189
|
-
end
|
190
|
-
end
|
191
|
-
end
|
192
|
-
|
193
|
-
local destroy_job = function(job_id, now)
|
194
|
-
local queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'strand'))
|
195
|
-
remove_from_queues(job_id, queue, strand)
|
196
|
-
|
197
|
-
if Keys.strand(strand) then
|
198
|
-
tickle_strand(job_id, strand, now)
|
199
|
-
end
|
200
|
-
|
201
|
-
redis.call('ZREM', Keys.failed_jobs(), job_id)
|
202
|
-
redis.call('DEL', Keys.job(job_id))
|
203
|
-
end
|
@@ -1,528 +0,0 @@
|
|
1
|
-
# frozen_string_literal: true
|
2
|
-
|
3
|
-
# This can't currently be made compatible with redis cluster, because the Lua functions
|
4
|
-
# access keys that aren't in their keys argument list (since they pop jobs off
|
5
|
-
# a queue and then update the job with that id).
|
6
|
-
|
7
|
-
# still TODO:
|
8
|
-
# * a consequence of our ignore-redis-failures code is that if redis is unavailable, creating delayed jobs silently fails, which is probably not what we want
|
9
|
-
# * need a way to migrate between jobs backends
|
10
|
-
# * we need some auditors:
|
11
|
-
# * fail jobs in running_jobs if they've timed out
|
12
|
-
# * have pools audit their workers and immediately fail jobs locked by dead workers (make sure this handles the restart case where two pools are running)
|
13
|
-
# * have a master auditor that fails jobs if a whole pool dies
|
14
|
-
# * audit strands ocasionally, look for any stuck strands where the strand queue isn't empty but there's no strand job running or queued
|
15
|
-
module Delayed::Backend::Redis
|
16
|
-
require 'delayed/backend/redis/functions'
|
17
|
-
|
18
|
-
class Job
|
19
|
-
extend ActiveModel::Callbacks
|
20
|
-
define_model_callbacks :create, :save
|
21
|
-
include ActiveModel::Dirty
|
22
|
-
include Delayed::Backend::Base
|
23
|
-
# This redis instance needs to be set by the application during jobs configuration
|
24
|
-
cattr_accessor :redis
|
25
|
-
|
26
|
-
# An overview of where and when things are stored in redis:
|
27
|
-
#
|
28
|
-
# Jobs are given a UUID for an id, rather than an incrementing integer id.
|
29
|
-
# The job attributes are then stored in a redis hash at job/<id>. Attribute
|
30
|
-
# values are generally stored as their json representation, except for
|
31
|
-
# timestamps, which as stored as floating point utc-time-since-unix-epoch
|
32
|
-
# values, so that we can compare timestamps in Lua without a date parser.
|
33
|
-
#
|
34
|
-
# Jobs that are schedule to run immediately (in the present/past) are
|
35
|
-
# inserted into the queue named queue/<queue_name>. The queue is a sorted
|
36
|
-
# set, with the value being the job id and the weight being a floating point
|
37
|
-
# value, <priority>.<run_at>. This formatting is key to efficient
|
38
|
-
# querying of the next job to run.
|
39
|
-
#
|
40
|
-
# Jobs that are scheduled to run in the future are not inserted into the
|
41
|
-
# queue, but rather a future queue named queue/<queue_name>/future. This
|
42
|
-
# queue is also a sorted set, with the value being the job id, but the weight
|
43
|
-
# is just the <run_at> value.
|
44
|
-
#
|
45
|
-
# If the job is on a strand, the flow is different. First, it's inserted into
|
46
|
-
# a list named strand/<strand>. When strand jobs are inserted into the
|
47
|
-
# current jobs queue, we check if they're next to run in the strand. If not,
|
48
|
-
# we give them a special priority that is greater than MAX_PRIORITY, so that
|
49
|
-
# they won't run. When a strand job is finished, failed or deleted,
|
50
|
-
# "tickle_strand" is called, which removes that job from the list and if that
|
51
|
-
# job was at the front of the list, changes the priority on the next job so
|
52
|
-
# that it's eligible to run.
|
53
|
-
#
|
54
|
-
# For singletons, the flow is the same as for other strand jobs, except that
|
55
|
-
# the job is thrown out if there are already any non-running jobs in the
|
56
|
-
# strand list.
|
57
|
-
#
|
58
|
-
# If a job fails, it's removed from the normal queues and inserted into the
|
59
|
-
# failed_jobs sorted set, with job id as the value and failure time as the
|
60
|
-
# key. The hash of job attributes is also renamed from job/<id> to
|
61
|
-
# failed_job/<id> -- use Delayed::Job::Failed to query those jobs, same as
|
62
|
-
# with AR jobs.
|
63
|
-
#
|
64
|
-
# We also insert into some other data structures for admin functionality.
|
65
|
-
# tag_counts/current and tag_counts/all are sorted sets storing the count of
|
66
|
-
# jobs for each tag. tag/<tag> is a set of existing job ids that have that tag.
|
67
|
-
#
|
68
|
-
# Most all of this happens in Lua functions, for atomicity. See the other
|
69
|
-
# files in this directory -- functions.rb is a wrapper to call the lua
|
70
|
-
# functions, and the individual functions are defined in .lua files in this
|
71
|
-
# directory.
|
72
|
-
|
73
|
-
# these key mappings are duplicated in the redis lua code, in include.lua
|
74
|
-
module Keys
|
75
|
-
RUNNING_JOBS = "running_jobs"
|
76
|
-
FAILED_JOBS = "failed_jobs"
|
77
|
-
JOB = proc { |id| "job/#{id}" }
|
78
|
-
FAILED_JOB = proc { |id| "failed_job/#{id}" }
|
79
|
-
QUEUE = proc { |name| "queue/#{name}" }
|
80
|
-
FUTURE_QUEUE = proc { |name| "#{QUEUE[name]}/future" }
|
81
|
-
STRAND = proc { |strand| strand ? "strand/#{strand}" : nil }
|
82
|
-
TAG_COUNTS = proc { |flavor| "tag_counts/#{flavor}" }
|
83
|
-
TAG = proc { |tag| "tag/#{tag}" }
|
84
|
-
end
|
85
|
-
|
86
|
-
WAITING_STRAND_JOB_PRIORITY = 2000000
|
87
|
-
if WAITING_STRAND_JOB_PRIORITY <= Delayed::MAX_PRIORITY
|
88
|
-
# if you change this, note that the value is duplicated in include.lua
|
89
|
-
raise("Delayed::MAX_PRIORITY must be less than #{WAITING_STRAND_JOB_PRIORITY}")
|
90
|
-
end
|
91
|
-
|
92
|
-
COLUMNS = []
|
93
|
-
|
94
|
-
# We store time attributes in redis as floats so we don't have to do
|
95
|
-
# timestamp parsing in lua.
|
96
|
-
TIMESTAMP_COLUMNS = []
|
97
|
-
INTEGER_COLUMNS = []
|
98
|
-
|
99
|
-
def self.column(name, type)
|
100
|
-
COLUMNS << name
|
101
|
-
|
102
|
-
if type == :timestamp
|
103
|
-
TIMESTAMP_COLUMNS << name
|
104
|
-
elsif type == :integer
|
105
|
-
INTEGER_COLUMNS << name
|
106
|
-
end
|
107
|
-
|
108
|
-
attr_reader(name)
|
109
|
-
define_attribute_methods([name])
|
110
|
-
# Custom attr_writer that updates the dirty status.
|
111
|
-
class_eval(<<-EOS, __FILE__, __LINE__ + 1)
|
112
|
-
def #{name}=(new_value)
|
113
|
-
#{name}_will_change! unless new_value == self.#{name}
|
114
|
-
@#{name} = new_value
|
115
|
-
end
|
116
|
-
EOS
|
117
|
-
end
|
118
|
-
|
119
|
-
column(:id, :string)
|
120
|
-
column(:priority, :integer)
|
121
|
-
column(:attempts, :integer)
|
122
|
-
column(:handler, :string)
|
123
|
-
column(:last_error, :string)
|
124
|
-
column(:queue, :string)
|
125
|
-
column(:run_at, :timestamp)
|
126
|
-
column(:locked_at, :timestamp)
|
127
|
-
column(:failed_at, :timestamp)
|
128
|
-
column(:locked_by, :string)
|
129
|
-
column(:created_at, :timestamp)
|
130
|
-
column(:updated_at, :timestamp)
|
131
|
-
column(:tag, :string)
|
132
|
-
column(:max_attempts, :integer)
|
133
|
-
column(:strand, :string)
|
134
|
-
column(:source, :string)
|
135
|
-
column(:expires_at, :timestamp)
|
136
|
-
|
137
|
-
def initialize(attrs = {})
|
138
|
-
attrs.each { |k, v| self.send("#{k}=", v) }
|
139
|
-
self.priority ||= 0
|
140
|
-
self.attempts ||= 0
|
141
|
-
@new_record = true
|
142
|
-
end
|
143
|
-
|
144
|
-
def self.instantiate(attrs)
|
145
|
-
result = new(attrs)
|
146
|
-
result.instance_variable_set(:@new_record, false)
|
147
|
-
result
|
148
|
-
end
|
149
|
-
|
150
|
-
def self.create(attrs = {})
|
151
|
-
result = new(attrs)
|
152
|
-
result.save
|
153
|
-
result
|
154
|
-
end
|
155
|
-
|
156
|
-
def self.create!(attrs = {})
|
157
|
-
result = new(attrs)
|
158
|
-
result.save!
|
159
|
-
result
|
160
|
-
end
|
161
|
-
|
162
|
-
def [](key)
|
163
|
-
send(key)
|
164
|
-
end
|
165
|
-
|
166
|
-
def []=(key, value)
|
167
|
-
send("#{key}=", value)
|
168
|
-
end
|
169
|
-
|
170
|
-
def self.find(ids)
|
171
|
-
if Array === ids
|
172
|
-
find_some(ids, {})
|
173
|
-
else
|
174
|
-
find_one(ids, {})
|
175
|
-
end
|
176
|
-
end
|
177
|
-
|
178
|
-
def new_record?
|
179
|
-
!!@new_record
|
180
|
-
end
|
181
|
-
|
182
|
-
def destroyed?
|
183
|
-
!!@destroyed
|
184
|
-
end
|
185
|
-
|
186
|
-
def ==(other)
|
187
|
-
other.is_a?(self.class) && id == other.id
|
188
|
-
end
|
189
|
-
|
190
|
-
def hash
|
191
|
-
id.hash
|
192
|
-
end
|
193
|
-
|
194
|
-
def self.reconnect!
|
195
|
-
# redis cluster responds to reconnect directly,
|
196
|
-
# but individual redis needs it to be called on client
|
197
|
-
redis.respond_to?(:reconnect) ?
|
198
|
-
redis.reconnect :
|
199
|
-
redis.client.reconnect
|
200
|
-
end
|
201
|
-
|
202
|
-
def self.functions
|
203
|
-
@@functions ||= Delayed::Backend::Redis::Functions.new(redis)
|
204
|
-
end
|
205
|
-
|
206
|
-
def self.find_one(id, options)
|
207
|
-
job = self.get_with_ids([id]).first
|
208
|
-
job || raise(ActiveRecord::RecordNotFound, "Couldn't find Job with ID=#{id}")
|
209
|
-
end
|
210
|
-
|
211
|
-
def self.find_some(ids, options)
|
212
|
-
self.get_with_ids(ids).compact
|
213
|
-
end
|
214
|
-
|
215
|
-
def self.get_with_ids(ids)
|
216
|
-
ids.map { |id| self.instantiate_from_attrs(redis.hgetall(key_for_job_id(id))) }
|
217
|
-
end
|
218
|
-
|
219
|
-
def self.key_for_job_id(job_id)
|
220
|
-
Keys::JOB[job_id]
|
221
|
-
end
|
222
|
-
|
223
|
-
def self.get_and_lock_next_available(worker_name,
|
224
|
-
queue = Delayed::Settings.queue,
|
225
|
-
min_priority = Delayed::MIN_PRIORITY,
|
226
|
-
max_priority = Delayed::MAX_PRIORITY,
|
227
|
-
prefetch: nil,
|
228
|
-
prefetch_owner: nil,
|
229
|
-
forced_latency: nil)
|
230
|
-
|
231
|
-
check_queue(queue)
|
232
|
-
check_priorities(min_priority, max_priority)
|
233
|
-
if worker_name.is_a?(Array)
|
234
|
-
multiple_workers = true
|
235
|
-
worker_name = worker_name.first
|
236
|
-
end
|
237
|
-
|
238
|
-
# as an optimization this lua function returns the hash of job attributes,
|
239
|
-
# rather than just a job id, saving a round trip
|
240
|
-
now = db_time_now
|
241
|
-
now -= forced_latency if forced_latency
|
242
|
-
job_attrs = functions.get_and_lock_next_available(worker_name, queue, min_priority, max_priority, now)
|
243
|
-
job = instantiate_from_attrs(job_attrs) # will return nil if the attrs are blank
|
244
|
-
if multiple_workers
|
245
|
-
if job.nil?
|
246
|
-
job = {}
|
247
|
-
else
|
248
|
-
job = { worker_name => job }
|
249
|
-
end
|
250
|
-
end
|
251
|
-
job
|
252
|
-
end
|
253
|
-
|
254
|
-
def self.find_available(limit,
|
255
|
-
queue = Delayed::Settings.queue,
|
256
|
-
min_priority = Delayed::MIN_PRIORITY,
|
257
|
-
max_priority = Delayed::MAX_PRIORITY)
|
258
|
-
|
259
|
-
check_queue(queue)
|
260
|
-
check_priorities(min_priority, max_priority)
|
261
|
-
|
262
|
-
self.find(functions.find_available(queue, limit, 0, min_priority, max_priority, db_time_now))
|
263
|
-
end
|
264
|
-
|
265
|
-
# get a list of jobs of the given flavor in the given queue
|
266
|
-
# flavor is :current, :future, :failed, :strand or :tag
|
267
|
-
# depending on the flavor, query has a different meaning:
|
268
|
-
# for :current and :future, it's the queue name (defaults to Delayed::Settings.queue)
|
269
|
-
# for :strand it's the strand name
|
270
|
-
# for :tag it's the tag name
|
271
|
-
# for :failed it's ignored
|
272
|
-
def self.list_jobs(flavor,
|
273
|
-
limit,
|
274
|
-
offset = 0,
|
275
|
-
query = nil)
|
276
|
-
case flavor.to_s
|
277
|
-
when 'current'
|
278
|
-
query ||= Delayed::Settings.queue
|
279
|
-
check_queue(query)
|
280
|
-
self.find(functions.find_available(query, limit, offset, 0, "+inf", db_time_now))
|
281
|
-
when 'future'
|
282
|
-
query ||= Delayed::Settings.queue
|
283
|
-
check_queue(query)
|
284
|
-
self.find(redis.zrangebyscore(Keys::FUTURE_QUEUE[query], 0, "+inf", :limit => [offset, limit]))
|
285
|
-
when 'failed'
|
286
|
-
Failed.find(redis.zrevrangebyscore(Keys::FAILED_JOBS, "+inf", 0, :limit => [offset, limit]))
|
287
|
-
when 'strand'
|
288
|
-
self.find(redis.lrange(Keys::STRAND[query], offset, offset + limit - 1))
|
289
|
-
when 'tag'
|
290
|
-
# This is optimized for writing, since list_jobs(:tag) will only ever happen in the admin UI
|
291
|
-
ids = redis.smembers(Keys::TAG[query])
|
292
|
-
self.find(ids[offset, limit])
|
293
|
-
else
|
294
|
-
raise ArgumentError, "invalid flavor: #{flavor.inspect}"
|
295
|
-
end
|
296
|
-
end
|
297
|
-
|
298
|
-
# get the total job count for the given flavor
|
299
|
-
# flavor is :current, :future or :failed
|
300
|
-
# for the :failed flavor, queue is currently ignored
|
301
|
-
def self.jobs_count(flavor,
|
302
|
-
queue = Delayed::Settings.queue)
|
303
|
-
case flavor.to_s
|
304
|
-
when 'current'
|
305
|
-
check_queue(queue)
|
306
|
-
redis.zcard(Keys::QUEUE[queue])
|
307
|
-
when 'future'
|
308
|
-
check_queue(queue)
|
309
|
-
redis.zcard(Keys::FUTURE_QUEUE[queue])
|
310
|
-
when 'failed'
|
311
|
-
redis.zcard(Keys::FAILED_JOBS)
|
312
|
-
else
|
313
|
-
raise ArgumentError, "invalid flavor: #{flavor.inspect}"
|
314
|
-
end
|
315
|
-
end
|
316
|
-
|
317
|
-
def self.strand_size(strand)
|
318
|
-
redis.llen(Keys::STRAND[strand])
|
319
|
-
end
|
320
|
-
|
321
|
-
def self.running_jobs()
|
322
|
-
self.find(redis.zrangebyscore(Keys::RUNNING_JOBS, 0, "+inf"))
|
323
|
-
end
|
324
|
-
|
325
|
-
def self.clear_locks!(worker_name)
|
326
|
-
self.running_jobs.each do |job|
|
327
|
-
# TODO: mark the job as failed one attempt
|
328
|
-
job.unlock! if job.locked_by == worker_name
|
329
|
-
end
|
330
|
-
nil
|
331
|
-
end
|
332
|
-
|
333
|
-
# returns a list of hashes { :tag => tag_name, :count => current_count }
|
334
|
-
# in descending count order
|
335
|
-
# flavor is :current or :all
|
336
|
-
def self.tag_counts(flavor,
|
337
|
-
limit,
|
338
|
-
offset = 0)
|
339
|
-
raise(ArgumentError, "invalid flavor: #{flavor.inspect}") unless %w(current all).include?(flavor.to_s)
|
340
|
-
key = Keys::TAG_COUNTS[flavor]
|
341
|
-
redis.zrevrangebyscore(key, '+inf', 1, :limit => [offset, limit], :withscores => true).map { |tag, count| { :tag => tag, :count => count } }
|
342
|
-
end
|
343
|
-
|
344
|
-
# perform a bulk update of a set of jobs
|
345
|
-
# action is :hold, :unhold, or :destroy
|
346
|
-
# to specify the jobs to act on, either pass opts[:ids] = [list of job ids]
|
347
|
-
# or opts[:flavor] = <some flavor> to perform on all jobs of that flavor
|
348
|
-
#
|
349
|
-
# see the list_jobs action for the list of available flavors and the meaning
|
350
|
-
# of opts[:query] for each
|
351
|
-
def self.bulk_update(action, opts)
|
352
|
-
if %w(current future).include?(opts[:flavor].to_s)
|
353
|
-
opts[:query] ||= Delayed::Settings.queue
|
354
|
-
end
|
355
|
-
functions.bulk_update(action, opts[:ids], opts[:flavor], opts[:query], db_time_now)
|
356
|
-
end
|
357
|
-
|
358
|
-
def self.create_singleton(options)
|
359
|
-
self.create!(options.merge(:singleton => true))
|
360
|
-
end
|
361
|
-
|
362
|
-
def self.unlock(jobs)
|
363
|
-
jobs.each(&:unlock!)
|
364
|
-
jobs.length
|
365
|
-
end
|
366
|
-
|
367
|
-
# not saved, just used as a marker when creating
|
368
|
-
attr_accessor :singleton, :on_conflict
|
369
|
-
|
370
|
-
def transfer_lock!(from:, to:)
|
371
|
-
lock_in_redis!(to)
|
372
|
-
end
|
373
|
-
|
374
|
-
def lock_in_redis!(worker_name)
|
375
|
-
self.locked_at = self.class.db_time_now
|
376
|
-
self.locked_by = worker_name
|
377
|
-
save
|
378
|
-
end
|
379
|
-
|
380
|
-
def unlock!
|
381
|
-
unlock
|
382
|
-
save!
|
383
|
-
end
|
384
|
-
|
385
|
-
def save(*a)
|
386
|
-
return false if destroyed?
|
387
|
-
result = run_callbacks(:save) do
|
388
|
-
if new_record?
|
389
|
-
run_callbacks(:create) { create }
|
390
|
-
else
|
391
|
-
update
|
392
|
-
end
|
393
|
-
end
|
394
|
-
changes_applied
|
395
|
-
result
|
396
|
-
end
|
397
|
-
|
398
|
-
def save!(*a)
|
399
|
-
save(*a) || raise(RecordNotSaved)
|
400
|
-
end
|
401
|
-
|
402
|
-
def destroy
|
403
|
-
self.class.functions.destroy_job(id, self.class.db_time_now)
|
404
|
-
@destroyed = true
|
405
|
-
freeze
|
406
|
-
end
|
407
|
-
|
408
|
-
# take this job off the strand, and queue up the next strand job if this job
|
409
|
-
# was at the front
|
410
|
-
def tickle_strand
|
411
|
-
if strand.present?
|
412
|
-
self.class.functions.tickle_strand(id, strand, self.class.db_time_now)
|
413
|
-
end
|
414
|
-
end
|
415
|
-
|
416
|
-
def create_and_lock!(worker_name)
|
417
|
-
raise "job already exists" unless new_record?
|
418
|
-
lock_in_redis!(worker_name)
|
419
|
-
end
|
420
|
-
|
421
|
-
def fail!
|
422
|
-
self.failed_at = self.class.db_time_now
|
423
|
-
save!
|
424
|
-
redis.rename Keys::JOB[id], Keys::FAILED_JOB[id]
|
425
|
-
tickle_strand
|
426
|
-
self
|
427
|
-
end
|
428
|
-
|
429
|
-
protected
|
430
|
-
|
431
|
-
def update_queues
|
432
|
-
if failed_at
|
433
|
-
self.class.functions.fail_job(id)
|
434
|
-
elsif locked_at
|
435
|
-
self.class.functions.set_running(id)
|
436
|
-
elsif singleton
|
437
|
-
job_id = self.class.functions.create_singleton(id, queue, strand, self.class.db_time_now)
|
438
|
-
# if create_singleton returns a different job id, that means this job got
|
439
|
-
# deleted because there was already that other job on the strand. so
|
440
|
-
# replace this job with the other for returning.
|
441
|
-
if job_id != self.id
|
442
|
-
singleton = self.class.find(job_id)
|
443
|
-
|
444
|
-
self.on_conflict ||= :use_earliest
|
445
|
-
singleton.run_at =
|
446
|
-
case self.on_conflict
|
447
|
-
when :use_earliest
|
448
|
-
[singleton.run_at, run_at].min
|
449
|
-
when :overwrite
|
450
|
-
run_at
|
451
|
-
when :loose
|
452
|
-
singleton.run_at
|
453
|
-
end
|
454
|
-
singleton.handler = self.handler if self.on_conflict == :overwrite
|
455
|
-
singleton.save! if singleton.changed?
|
456
|
-
COLUMNS.each { |c| send("#{c}=", singleton.send(c)) }
|
457
|
-
end
|
458
|
-
else
|
459
|
-
self.class.functions.enqueue(id, queue, strand, self.class.db_time_now)
|
460
|
-
end
|
461
|
-
end
|
462
|
-
|
463
|
-
def create
|
464
|
-
self.id ||= SecureRandom.hex(16)
|
465
|
-
self.created_at = self.updated_at = Time.now.utc
|
466
|
-
save_job_to_redis
|
467
|
-
update_queues
|
468
|
-
|
469
|
-
@new_record = false
|
470
|
-
self.id
|
471
|
-
end
|
472
|
-
|
473
|
-
def update
|
474
|
-
self.updated_at = Time.now.utc
|
475
|
-
save_job_to_redis
|
476
|
-
update_queues
|
477
|
-
true
|
478
|
-
end
|
479
|
-
|
480
|
-
def queue_score
|
481
|
-
"#{priority}.#{run_at.to_i}".to_f
|
482
|
-
end
|
483
|
-
|
484
|
-
def save_job_to_redis
|
485
|
-
to_delete = []
|
486
|
-
attrs = {}
|
487
|
-
COLUMNS.each do |k|
|
488
|
-
v = send(k)
|
489
|
-
if v.nil?
|
490
|
-
to_delete << k if !new_record? && changed.include?(k.to_s)
|
491
|
-
elsif v.is_a?(ActiveSupport::TimeWithZone) || v.is_a?(Time)
|
492
|
-
attrs[k] = v.utc.to_f
|
493
|
-
else
|
494
|
-
attrs[k] = v.as_json
|
495
|
-
end
|
496
|
-
end
|
497
|
-
key = Keys::JOB[id]
|
498
|
-
redis.mapped_hmset(key, attrs)
|
499
|
-
redis.hdel(key, to_delete) unless to_delete.empty?
|
500
|
-
end
|
501
|
-
|
502
|
-
def self.instantiate_from_attrs(redis_attrs)
|
503
|
-
if redis_attrs['id'].present?
|
504
|
-
attrs = redis_attrs.with_indifferent_access
|
505
|
-
TIMESTAMP_COLUMNS.each { |k| attrs[k] = Time.zone.at(attrs[k].to_f) if attrs[k] }
|
506
|
-
INTEGER_COLUMNS.each { |k| attrs[k] = attrs[k].to_i if attrs[k] }
|
507
|
-
instantiate(attrs)
|
508
|
-
else
|
509
|
-
nil
|
510
|
-
end
|
511
|
-
end
|
512
|
-
|
513
|
-
def global_id
|
514
|
-
id
|
515
|
-
end
|
516
|
-
|
517
|
-
class Failed < Job
|
518
|
-
include Delayed::Backend::Base
|
519
|
-
def self.key_for_job_id(job_id)
|
520
|
-
Keys::FAILED_JOB[job_id]
|
521
|
-
end
|
522
|
-
|
523
|
-
def original_job_id
|
524
|
-
id
|
525
|
-
end
|
526
|
-
end
|
527
|
-
end
|
528
|
-
end
|