inst-jobs 2.3.3 → 2.4.11
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/db/migrate/20101216224513_create_delayed_jobs.rb +9 -7
- data/db/migrate/20110531144916_cleanup_delayed_jobs_indexes.rb +8 -13
- data/db/migrate/20110610213249_optimize_delayed_jobs.rb +8 -8
- data/db/migrate/20110831210257_add_delayed_jobs_next_in_strand.rb +25 -25
- data/db/migrate/20120510004759_delayed_jobs_delete_trigger_lock_for_update.rb +4 -8
- data/db/migrate/20120531150712_drop_psql_jobs_pop_fn.rb +1 -3
- data/db/migrate/20120607164022_delayed_jobs_use_advisory_locks.rb +11 -15
- data/db/migrate/20120607181141_index_jobs_on_locked_by.rb +1 -1
- data/db/migrate/20120608191051_add_jobs_run_at_index.rb +2 -2
- data/db/migrate/20120927184213_change_delayed_jobs_handler_to_text.rb +1 -1
- data/db/migrate/20140505215510_copy_failed_jobs_original_id.rb +2 -3
- data/db/migrate/20150807133223_add_max_concurrent_to_jobs.rb +9 -13
- data/db/migrate/20151210162949_improve_max_concurrent.rb +4 -8
- data/db/migrate/20161206323555_add_back_default_string_limits_jobs.rb +3 -2
- data/db/migrate/20181217155351_speed_up_max_concurrent_triggers.rb +13 -17
- data/db/migrate/20200330230722_add_id_to_get_delayed_jobs_index.rb +8 -8
- data/db/migrate/20200824222232_speed_up_max_concurrent_delete_trigger.rb +72 -77
- data/db/migrate/20200825011002_add_strand_order_override.rb +93 -97
- data/db/migrate/20210809145804_add_n_strand_index.rb +3 -3
- data/db/migrate/20210812210128_add_singleton_column.rb +200 -0
- data/db/migrate/20210917232626_add_delete_conflicting_singletons_before_unlock_trigger.rb +27 -0
- data/db/migrate/20210928174754_fix_singleton_condition_in_before_insert.rb +56 -0
- data/exe/inst_jobs +3 -2
- data/lib/delayed/backend/active_record.rb +204 -150
- data/lib/delayed/backend/base.rb +106 -82
- data/lib/delayed/batch.rb +11 -9
- data/lib/delayed/cli.rb +98 -84
- data/lib/delayed/core_ext/kernel.rb +4 -2
- data/lib/delayed/daemon.rb +70 -74
- data/lib/delayed/job_tracking.rb +26 -25
- data/lib/delayed/lifecycle.rb +27 -24
- data/lib/delayed/log_tailer.rb +17 -17
- data/lib/delayed/logging.rb +13 -16
- data/lib/delayed/message_sending.rb +43 -52
- data/lib/delayed/performable_method.rb +6 -8
- data/lib/delayed/periodic.rb +72 -65
- data/lib/delayed/plugin.rb +2 -4
- data/lib/delayed/pool.rb +198 -193
- data/lib/delayed/server/helpers.rb +6 -6
- data/lib/delayed/server.rb +51 -54
- data/lib/delayed/settings.rb +93 -81
- data/lib/delayed/testing.rb +21 -22
- data/lib/delayed/version.rb +1 -1
- data/lib/delayed/work_queue/in_process.rb +21 -18
- data/lib/delayed/work_queue/parent_process/client.rb +54 -55
- data/lib/delayed/work_queue/parent_process/server.rb +219 -208
- data/lib/delayed/work_queue/parent_process.rb +52 -53
- data/lib/delayed/worker/consul_health_check.rb +21 -19
- data/lib/delayed/worker/health_check.rb +29 -22
- data/lib/delayed/worker/null_health_check.rb +3 -1
- data/lib/delayed/worker/process_helper.rb +8 -9
- data/lib/delayed/worker.rb +271 -265
- data/lib/delayed/yaml_extensions.rb +12 -10
- data/lib/delayed_job.rb +37 -38
- data/lib/inst-jobs.rb +1 -1
- data/spec/active_record_job_spec.rb +129 -136
- data/spec/delayed/cli_spec.rb +7 -7
- data/spec/delayed/daemon_spec.rb +10 -9
- data/spec/delayed/message_sending_spec.rb +16 -9
- data/spec/delayed/periodic_spec.rb +13 -12
- data/spec/delayed/server_spec.rb +38 -38
- data/spec/delayed/settings_spec.rb +26 -25
- data/spec/delayed/work_queue/in_process_spec.rb +7 -8
- data/spec/delayed/work_queue/parent_process/client_spec.rb +17 -12
- data/spec/delayed/work_queue/parent_process/server_spec.rb +70 -41
- data/spec/delayed/work_queue/parent_process_spec.rb +21 -23
- data/spec/delayed/worker/consul_health_check_spec.rb +22 -22
- data/spec/delayed/worker/health_check_spec.rb +60 -52
- data/spec/delayed/worker_spec.rb +28 -25
- data/spec/sample_jobs.rb +45 -15
- data/spec/shared/delayed_batch.rb +74 -67
- data/spec/shared/delayed_method.rb +143 -102
- data/spec/shared/performable_method.rb +39 -38
- data/spec/shared/shared_backend.rb +547 -441
- data/spec/shared/testing.rb +14 -14
- data/spec/shared/worker.rb +155 -147
- data/spec/shared_jobs_specs.rb +13 -13
- data/spec/spec_helper.rb +46 -41
- metadata +79 -55
- data/lib/delayed/backend/redis/bulk_update.lua +0 -50
- data/lib/delayed/backend/redis/destroy_job.lua +0 -2
- data/lib/delayed/backend/redis/enqueue.lua +0 -29
- data/lib/delayed/backend/redis/fail_job.lua +0 -5
- data/lib/delayed/backend/redis/find_available.lua +0 -3
- data/lib/delayed/backend/redis/functions.rb +0 -59
- data/lib/delayed/backend/redis/get_and_lock_next_available.lua +0 -17
- data/lib/delayed/backend/redis/includes/jobs_common.lua +0 -203
- data/lib/delayed/backend/redis/job.rb +0 -528
- data/lib/delayed/backend/redis/set_running.lua +0 -5
- data/lib/delayed/backend/redis/tickle_strand.lua +0 -2
- data/spec/gemfiles/52.gemfile +0 -7
- data/spec/gemfiles/60.gemfile +0 -7
- data/spec/gemfiles/60.gemfile.lock +0 -246
- data/spec/gemfiles/61.gemfile +0 -7
- data/spec/redis_job_spec.rb +0 -148
@@ -1,203 +0,0 @@
|
|
1
|
-
-- Keys holds the various functions to map to redis keys
|
2
|
-
-- These are duplicated from job.rb
|
3
|
-
local Keys = {}
|
4
|
-
|
5
|
-
Keys.job = function(id)
|
6
|
-
return "job/" .. id
|
7
|
-
end
|
8
|
-
|
9
|
-
Keys.running_jobs = function()
|
10
|
-
return "running_jobs"
|
11
|
-
end
|
12
|
-
|
13
|
-
Keys.failed_jobs = function()
|
14
|
-
return "failed_jobs"
|
15
|
-
end
|
16
|
-
|
17
|
-
Keys.queue = function(queue)
|
18
|
-
return "queue/" .. (queue or '')
|
19
|
-
end
|
20
|
-
|
21
|
-
Keys.future_queue = function(queue)
|
22
|
-
return Keys.queue(queue) .. "/future"
|
23
|
-
end
|
24
|
-
|
25
|
-
Keys.strand = function(strand_name)
|
26
|
-
if strand_name and string.len(strand_name) > 0 then
|
27
|
-
return "strand/" .. strand_name
|
28
|
-
else
|
29
|
-
return nil
|
30
|
-
end
|
31
|
-
end
|
32
|
-
|
33
|
-
Keys.tag_counts = function(flavor)
|
34
|
-
return "tag_counts/" .. flavor
|
35
|
-
end
|
36
|
-
|
37
|
-
Keys.tag = function(tag)
|
38
|
-
return "tag/" .. tag
|
39
|
-
end
|
40
|
-
|
41
|
-
Keys.waiting_strand_job_priority = function()
|
42
|
-
return 2000000
|
43
|
-
end
|
44
|
-
|
45
|
-
-- remove the given job from the various queues
|
46
|
-
local remove_from_queues = function(job_id, queue, strand)
|
47
|
-
local tag = unpack(redis.call('HMGET', Keys.job(job_id), 'tag'))
|
48
|
-
|
49
|
-
redis.call("SREM", Keys.tag(tag), job_id)
|
50
|
-
|
51
|
-
local current_delta = -redis.call('ZREM', Keys.queue(queue), job_id)
|
52
|
-
redis.call('ZREM', Keys.running_jobs(), job_id)
|
53
|
-
local future_delta = -redis.call('ZREM', Keys.future_queue(queue), job_id)
|
54
|
-
|
55
|
-
if current_delta ~= 0 then
|
56
|
-
redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
|
57
|
-
end
|
58
|
-
|
59
|
-
local total_delta = current_delta + future_delta
|
60
|
-
|
61
|
-
if total_delta ~= 0 then
|
62
|
-
redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
|
63
|
-
end
|
64
|
-
|
65
|
-
local strand_key = Keys.strand(strand)
|
66
|
-
if strand_key then
|
67
|
-
redis.call('LREM', strand_key, 1, job_id)
|
68
|
-
end
|
69
|
-
end
|
70
|
-
|
71
|
-
-- returns the id for the first job on the strand, or nil if none
|
72
|
-
local strand_next_job_id = function(strand)
|
73
|
-
local strand_key = Keys.strand(strand)
|
74
|
-
if not strand_key then return nil end
|
75
|
-
return redis.call('LRANGE', strand_key, 0, 0)[1]
|
76
|
-
end
|
77
|
-
|
78
|
-
-- returns next_in_strand -- whether this added job is at the front of the strand
|
79
|
-
local add_to_strand = function(job_id, strand)
|
80
|
-
local strand_key = Keys.strand(strand)
|
81
|
-
if not strand_key then return end
|
82
|
-
redis.call('RPUSH', strand_key, job_id) -- add to strand list
|
83
|
-
local next_id = strand_next_job_id(strand)
|
84
|
-
return next_id == job_id
|
85
|
-
end
|
86
|
-
|
87
|
-
-- add this given job to the correct queues based on its state and the current time
|
88
|
-
-- also updates the tag counts and tag job lists
|
89
|
-
local add_to_queues = function(job_id, queue, now)
|
90
|
-
local run_at, priority, tag, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'run_at', 'priority', 'tag', 'strand'))
|
91
|
-
|
92
|
-
redis.call("SADD", Keys.tag(tag), job_id)
|
93
|
-
|
94
|
-
if strand then
|
95
|
-
local next_job_id = strand_next_job_id(strand)
|
96
|
-
if next_job_id and next_job_id ~= job_id then
|
97
|
-
priority = Keys.waiting_strand_job_priority()
|
98
|
-
end
|
99
|
-
end
|
100
|
-
|
101
|
-
local current_delta = 0
|
102
|
-
local future_delta = 0
|
103
|
-
|
104
|
-
if run_at > now then
|
105
|
-
future_delta = future_delta + redis.call('ZADD', Keys.future_queue(queue), run_at, job_id)
|
106
|
-
current_delta = current_delta - redis.call('ZREM', Keys.queue(queue), job_id)
|
107
|
-
else
|
108
|
-
-- floor the run_at so we don't have a float in our float
|
109
|
-
local sort_key = priority .. '.' .. math.floor(run_at)
|
110
|
-
current_delta = current_delta + redis.call('ZADD', Keys.queue(queue), sort_key, job_id)
|
111
|
-
future_delta = future_delta - redis.call('ZREM', Keys.future_queue(queue), job_id)
|
112
|
-
end
|
113
|
-
|
114
|
-
if current_delta ~= 0 then
|
115
|
-
redis.call('ZINCRBY', Keys.tag_counts('current'), current_delta, tag)
|
116
|
-
end
|
117
|
-
|
118
|
-
local total_delta = current_delta + future_delta
|
119
|
-
|
120
|
-
if total_delta ~= 0 then
|
121
|
-
redis.call('ZINCRBY', Keys.tag_counts('all'), total_delta, tag)
|
122
|
-
end
|
123
|
-
end
|
124
|
-
|
125
|
-
local job_exists = function(job_id)
|
126
|
-
return job_id and redis.call('HGET', Keys.job(job_id), 'id')
|
127
|
-
end
|
128
|
-
|
129
|
-
-- find jobs available for running
|
130
|
-
-- checks the future queue too, and moves and now-ready jobs
|
131
|
-
-- into the current queue
|
132
|
-
local find_available = function(queue, limit, offset, min_priority, max_priority, now)
|
133
|
-
local ready_future_jobs = redis.call('ZRANGEBYSCORE', Keys.future_queue(queue), 0, now, 'limit', 0, limit)
|
134
|
-
for i, job_id in ipairs(ready_future_jobs) do
|
135
|
-
add_to_queues(job_id, queue, now)
|
136
|
-
end
|
137
|
-
|
138
|
-
if not min_priority or min_priority == '' then
|
139
|
-
min_priority = '0'
|
140
|
-
end
|
141
|
-
|
142
|
-
if not max_priority or max_priority == '' then
|
143
|
-
max_priority = "(" .. Keys.waiting_strand_job_priority()
|
144
|
-
else
|
145
|
-
max_priority = "(" .. (max_priority + 1)
|
146
|
-
end
|
147
|
-
local job_ids = redis.call('ZRANGEBYSCORE', Keys.queue(queue), min_priority, max_priority, 'limit', offset, limit)
|
148
|
-
for idx = table.getn(job_ids), 1, -1 do
|
149
|
-
local job_id = job_ids[idx]
|
150
|
-
if not job_exists(job_id) then
|
151
|
-
table.remove(job_ids, idx)
|
152
|
-
redis.call('ZREM', Keys.queue(queue), job_id)
|
153
|
-
end
|
154
|
-
end
|
155
|
-
return job_ids
|
156
|
-
end
|
157
|
-
|
158
|
-
-- "tickle" the strand, removing the given job_id and setting the job at the
|
159
|
-
-- front of the strand as eligible to run, if it's not already
|
160
|
-
local tickle_strand = function(job_id, strand, now)
|
161
|
-
local strand_key = Keys.strand(strand)
|
162
|
-
|
163
|
-
-- this LREM could be (relatively) slow if the strand is very large and this
|
164
|
-
-- job isn't near the front. however, in normal usage, we only delete from the
|
165
|
-
-- front. also the linked list is in memory, so even with thousands of jobs on
|
166
|
-
-- the strand it'll be quite fast.
|
167
|
-
--
|
168
|
-
-- alternatively we could make strands sorted sets, which would avoid a
|
169
|
-
-- linear search to delete this job. jobs need to be sorted on insertion
|
170
|
-
-- order though, and we're using GUIDs for keys here rather than an
|
171
|
-
-- incrementing integer, so we'd have to use an artificial counter as the
|
172
|
-
-- sort key (through `incrby strand_name` probably).
|
173
|
-
redis.call('LREM', strand_key, 1, job_id)
|
174
|
-
-- normally this loop will only run once, but we loop so that if there's any
|
175
|
-
-- job ids on the strand that don't actually exist anymore, we'll throw them
|
176
|
-
-- out and keep searching until we find a legit job or the strand is empty
|
177
|
-
while true do
|
178
|
-
local next_id = redis.call('LRANGE', strand_key, 0, 0)[1]
|
179
|
-
if next_id == nil then
|
180
|
-
break
|
181
|
-
elseif job_exists(next_id) then
|
182
|
-
-- technically jobs on the same strand can be in different queues,
|
183
|
-
-- though that functionality isn't currently used
|
184
|
-
local queue = redis.call('HGET', Keys.job(next_id), 'queue')
|
185
|
-
add_to_queues(next_id, queue, now)
|
186
|
-
break
|
187
|
-
else
|
188
|
-
redis.call('LPOP', strand_key)
|
189
|
-
end
|
190
|
-
end
|
191
|
-
end
|
192
|
-
|
193
|
-
local destroy_job = function(job_id, now)
|
194
|
-
local queue, strand = unpack(redis.call('HMGET', Keys.job(job_id), 'queue', 'strand'))
|
195
|
-
remove_from_queues(job_id, queue, strand)
|
196
|
-
|
197
|
-
if Keys.strand(strand) then
|
198
|
-
tickle_strand(job_id, strand, now)
|
199
|
-
end
|
200
|
-
|
201
|
-
redis.call('ZREM', Keys.failed_jobs(), job_id)
|
202
|
-
redis.call('DEL', Keys.job(job_id))
|
203
|
-
end
|