sidekiq-unique-jobs 6.0.25 → 7.1.33
Sign up to get free protection for your applications and to get access to all the features.
- checksums.yaml +4 -4
- data/CHANGELOG.md +1157 -41
- data/README.md +825 -291
- data/lib/sidekiq_unique_jobs/batch_delete.rb +124 -0
- data/lib/sidekiq_unique_jobs/changelog.rb +78 -0
- data/lib/sidekiq_unique_jobs/cli.rb +57 -29
- data/lib/sidekiq_unique_jobs/config.rb +319 -0
- data/lib/sidekiq_unique_jobs/connection.rb +6 -5
- data/lib/sidekiq_unique_jobs/constants.rb +46 -25
- data/lib/sidekiq_unique_jobs/core_ext.rb +80 -0
- data/lib/sidekiq_unique_jobs/deprecation.rb +65 -0
- data/lib/sidekiq_unique_jobs/digests.rb +70 -102
- data/lib/sidekiq_unique_jobs/exceptions.rb +88 -12
- data/lib/sidekiq_unique_jobs/expiring_digests.rb +14 -0
- data/lib/sidekiq_unique_jobs/job.rb +46 -12
- data/lib/sidekiq_unique_jobs/json.rb +47 -0
- data/lib/sidekiq_unique_jobs/key.rb +98 -0
- data/lib/sidekiq_unique_jobs/lock/base_lock.rb +111 -82
- data/lib/sidekiq_unique_jobs/lock/client_validator.rb +28 -0
- data/lib/sidekiq_unique_jobs/lock/server_validator.rb +27 -0
- data/lib/sidekiq_unique_jobs/lock/until_and_while_executing.rb +40 -15
- data/lib/sidekiq_unique_jobs/lock/until_executed.rb +30 -7
- data/lib/sidekiq_unique_jobs/lock/until_executing.rb +26 -2
- data/lib/sidekiq_unique_jobs/lock/until_expired.rb +27 -15
- data/lib/sidekiq_unique_jobs/lock/validator.rb +96 -0
- data/lib/sidekiq_unique_jobs/lock/while_executing.rb +26 -12
- data/lib/sidekiq_unique_jobs/lock/while_executing_reject.rb +3 -3
- data/lib/sidekiq_unique_jobs/lock.rb +342 -0
- data/lib/sidekiq_unique_jobs/lock_args.rb +127 -0
- data/lib/sidekiq_unique_jobs/lock_config.rb +126 -0
- data/lib/sidekiq_unique_jobs/lock_digest.rb +79 -0
- data/lib/sidekiq_unique_jobs/lock_info.rb +68 -0
- data/lib/sidekiq_unique_jobs/lock_timeout.rb +62 -0
- data/lib/sidekiq_unique_jobs/lock_ttl.rb +77 -0
- data/lib/sidekiq_unique_jobs/lock_type.rb +37 -0
- data/lib/sidekiq_unique_jobs/locksmith.rb +305 -101
- data/lib/sidekiq_unique_jobs/logging/middleware_context.rb +44 -0
- data/lib/sidekiq_unique_jobs/logging.rb +202 -33
- data/lib/sidekiq_unique_jobs/lua/delete.lua +51 -0
- data/lib/sidekiq_unique_jobs/lua/delete_by_digest.lua +42 -0
- data/lib/sidekiq_unique_jobs/lua/delete_job_by_digest.lua +38 -0
- data/lib/sidekiq_unique_jobs/lua/find_digest_in_queues.lua +26 -0
- data/lib/sidekiq_unique_jobs/lua/lock.lua +99 -0
- data/lib/sidekiq_unique_jobs/lua/lock_until_expired.lua +92 -0
- data/lib/sidekiq_unique_jobs/lua/locked.lua +35 -0
- data/lib/sidekiq_unique_jobs/lua/queue.lua +87 -0
- data/lib/sidekiq_unique_jobs/lua/reap_orphans.lua +122 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_common.lua +40 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_current_time.lua +8 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_delete_from_queue.lua +22 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_delete_from_sorted_set.lua +18 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_process_set.lua +53 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_queues.lua +43 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_sorted_set.lua +24 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_hgetall.lua +13 -0
- data/lib/sidekiq_unique_jobs/lua/shared/_upgrades.lua +3 -0
- data/lib/sidekiq_unique_jobs/lua/unlock.lua +107 -0
- data/lib/sidekiq_unique_jobs/lua/update_version.lua +40 -0
- data/lib/sidekiq_unique_jobs/lua/upgrade.lua +68 -0
- data/lib/sidekiq_unique_jobs/middleware/client.rb +42 -0
- data/lib/sidekiq_unique_jobs/middleware/server.rb +31 -0
- data/lib/sidekiq_unique_jobs/middleware.rb +29 -43
- data/lib/sidekiq_unique_jobs/normalizer.rb +4 -4
- data/lib/sidekiq_unique_jobs/on_conflict/log.rb +9 -5
- data/lib/sidekiq_unique_jobs/on_conflict/null_strategy.rb +1 -1
- data/lib/sidekiq_unique_jobs/on_conflict/raise.rb +1 -1
- data/lib/sidekiq_unique_jobs/on_conflict/reject.rb +63 -17
- data/lib/sidekiq_unique_jobs/on_conflict/replace.rb +54 -14
- data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +16 -5
- data/lib/sidekiq_unique_jobs/on_conflict/strategy.rb +25 -6
- data/lib/sidekiq_unique_jobs/on_conflict.rb +23 -10
- data/lib/sidekiq_unique_jobs/options_with_fallback.rb +39 -36
- data/lib/sidekiq_unique_jobs/orphans/lua_reaper.rb +29 -0
- data/lib/sidekiq_unique_jobs/orphans/manager.rb +241 -0
- data/lib/sidekiq_unique_jobs/orphans/null_reaper.rb +24 -0
- data/lib/sidekiq_unique_jobs/orphans/observer.rb +42 -0
- data/lib/sidekiq_unique_jobs/orphans/reaper.rb +114 -0
- data/lib/sidekiq_unique_jobs/orphans/reaper_resurrector.rb +170 -0
- data/lib/sidekiq_unique_jobs/orphans/ruby_reaper.rb +298 -0
- data/lib/sidekiq_unique_jobs/redis/entity.rb +112 -0
- data/lib/sidekiq_unique_jobs/redis/hash.rb +56 -0
- data/lib/sidekiq_unique_jobs/redis/list.rb +32 -0
- data/lib/sidekiq_unique_jobs/redis/set.rb +32 -0
- data/lib/sidekiq_unique_jobs/redis/sorted_set.rb +86 -0
- data/lib/sidekiq_unique_jobs/redis/string.rb +51 -0
- data/lib/sidekiq_unique_jobs/redis.rb +11 -0
- data/lib/sidekiq_unique_jobs/reflectable.rb +26 -0
- data/lib/sidekiq_unique_jobs/reflections.rb +79 -0
- data/lib/sidekiq_unique_jobs/rspec/matchers/have_valid_sidekiq_options.rb +51 -0
- data/lib/sidekiq_unique_jobs/rspec/matchers.rb +26 -0
- data/lib/sidekiq_unique_jobs/script/caller.rb +127 -0
- data/lib/sidekiq_unique_jobs/script.rb +15 -0
- data/lib/sidekiq_unique_jobs/server.rb +61 -0
- data/lib/sidekiq_unique_jobs/sidekiq_unique_ext.rb +114 -65
- data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +252 -36
- data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +47 -32
- data/lib/sidekiq_unique_jobs/testing.rb +102 -29
- data/lib/sidekiq_unique_jobs/timer_task.rb +299 -0
- data/lib/sidekiq_unique_jobs/timing.rb +58 -0
- data/lib/sidekiq_unique_jobs/unlockable.rb +20 -4
- data/lib/sidekiq_unique_jobs/update_version.rb +25 -0
- data/lib/sidekiq_unique_jobs/upgrade_locks.rb +155 -0
- data/lib/sidekiq_unique_jobs/version.rb +3 -1
- data/lib/sidekiq_unique_jobs/version_check.rb +23 -4
- data/lib/sidekiq_unique_jobs/web/helpers.rb +138 -13
- data/lib/sidekiq_unique_jobs/web/views/_paging.erb +4 -4
- data/lib/sidekiq_unique_jobs/web/views/changelogs.erb +54 -0
- data/lib/sidekiq_unique_jobs/web/views/lock.erb +110 -0
- data/lib/sidekiq_unique_jobs/web/views/locks.erb +54 -0
- data/lib/sidekiq_unique_jobs/web.rb +82 -32
- data/lib/sidekiq_unique_jobs.rb +54 -7
- data/lib/tasks/changelog.rake +16 -16
- metadata +134 -177
- data/lib/sidekiq_unique_jobs/client/middleware.rb +0 -56
- data/lib/sidekiq_unique_jobs/scripts.rb +0 -118
- data/lib/sidekiq_unique_jobs/server/middleware.rb +0 -46
- data/lib/sidekiq_unique_jobs/timeout/calculator.rb +0 -63
- data/lib/sidekiq_unique_jobs/timeout.rb +0 -8
- data/lib/sidekiq_unique_jobs/unique_args.rb +0 -150
- data/lib/sidekiq_unique_jobs/util.rb +0 -103
- data/lib/sidekiq_unique_jobs/web/views/unique_digest.erb +0 -28
- data/lib/sidekiq_unique_jobs/web/views/unique_digests.erb +0 -46
- data/redis/acquire_lock.lua +0 -21
- data/redis/convert_legacy_lock.lua +0 -13
- data/redis/delete.lua +0 -14
- data/redis/delete_by_digest.lua +0 -23
- data/redis/delete_job_by_digest.lua +0 -60
- data/redis/lock.lua +0 -62
- data/redis/release_stale_locks.lua +0 -90
- data/redis/unlock.lua +0 -35
@@ -0,0 +1,87 @@
|
|
1
|
+
-------- BEGIN keys ---------
|
2
|
+
local digest = KEYS[1]
|
3
|
+
local queued = KEYS[2]
|
4
|
+
local primed = KEYS[3]
|
5
|
+
local locked = KEYS[4]
|
6
|
+
local info = KEYS[5]
|
7
|
+
local changelog = KEYS[6]
|
8
|
+
local digests = KEYS[7]
|
9
|
+
-------- END keys ---------
|
10
|
+
|
11
|
+
|
12
|
+
-------- BEGIN lock arguments ---------
|
13
|
+
local job_id = ARGV[1] -- The job_id that was previously primed
|
14
|
+
local pttl = tonumber(ARGV[2])
|
15
|
+
local lock_type = ARGV[3]
|
16
|
+
local limit = tonumber(ARGV[4])
|
17
|
+
-------- END lock arguments -----------
|
18
|
+
|
19
|
+
|
20
|
+
-------- BEGIN injected arguments --------
|
21
|
+
local current_time = tonumber(ARGV[5])
|
22
|
+
local debug_lua = ARGV[6] == "true"
|
23
|
+
local max_history = tonumber(ARGV[7])
|
24
|
+
local script_name = tostring(ARGV[8]) .. ".lua"
|
25
|
+
--------- END injected arguments ---------
|
26
|
+
|
27
|
+
|
28
|
+
-------- BEGIN Variables --------
|
29
|
+
local queued_count = redis.call("LLEN", queued)
|
30
|
+
local locked_count = redis.call("HLEN", locked)
|
31
|
+
local within_limit = limit > locked_count
|
32
|
+
local limit_exceeded = not within_limit
|
33
|
+
-------- END Variables --------
|
34
|
+
|
35
|
+
|
36
|
+
-------- BEGIN local functions --------
|
37
|
+
<%= include_partial "shared/_common.lua" %>
|
38
|
+
---------- END local functions ----------
|
39
|
+
|
40
|
+
|
41
|
+
-------- BEGIN queue.lua --------
|
42
|
+
log_debug("BEGIN queue with key:", digest, "for job:", job_id)
|
43
|
+
|
44
|
+
if redis.call("HEXISTS", locked, job_id) == 1 then
|
45
|
+
log_debug("HEXISTS", locked, job_id, "== 1")
|
46
|
+
log("Duplicate")
|
47
|
+
return job_id
|
48
|
+
end
|
49
|
+
|
50
|
+
local prev_jid = redis.call("GET", digest)
|
51
|
+
log_debug("job_id:", job_id, "prev_jid:", prev_jid)
|
52
|
+
if not prev_jid or prev_jid == false then
|
53
|
+
log_debug("SET", digest, job_id)
|
54
|
+
redis.call("SET", digest, job_id)
|
55
|
+
elseif prev_jid == job_id then
|
56
|
+
log_debug(digest, "already queued with job_id:", job_id)
|
57
|
+
log("Duplicate")
|
58
|
+
return job_id
|
59
|
+
else
|
60
|
+
-- TODO: Consider constraining the total count of both locked and queued?
|
61
|
+
if within_limit and queued_count < limit then
|
62
|
+
log_debug("Within limit:", digest, "(", locked_count, "of", limit, ")", "queued (", queued_count, "of", limit, ")")
|
63
|
+
log_debug("SET", digest, job_id, "(was", prev_jid, ")")
|
64
|
+
redis.call("SET", digest, job_id)
|
65
|
+
else
|
66
|
+
log_debug("Limit exceeded:", digest, "(", locked_count, "of", limit, ")")
|
67
|
+
log("Limit exceeded", prev_jid)
|
68
|
+
return prev_jid
|
69
|
+
end
|
70
|
+
end
|
71
|
+
|
72
|
+
log_debug("LPUSH", queued, job_id)
|
73
|
+
redis.call("LPUSH", queued, job_id)
|
74
|
+
|
75
|
+
-- The Sidekiq client should only set pttl for until_expired
|
76
|
+
-- The Sidekiq server should set pttl for all other jobs
|
77
|
+
if pttl and pttl > 0 then
|
78
|
+
log_debug("PEXPIRE", digest, pttl)
|
79
|
+
redis.call("PEXPIRE", digest, pttl)
|
80
|
+
log_debug("PEXPIRE", queued, pttl)
|
81
|
+
redis.call("PEXPIRE", queued, pttl)
|
82
|
+
end
|
83
|
+
|
84
|
+
log("Queued")
|
85
|
+
log_debug("END queue with key:", digest, "for job:", job_id)
|
86
|
+
return job_id
|
87
|
+
-------- END queue.lua --------
|
@@ -0,0 +1,122 @@
|
|
1
|
+
redis.replicate_commands()
|
2
|
+
|
3
|
+
-------- BEGIN keys ---------
|
4
|
+
local digests_set = KEYS[1]
|
5
|
+
local expiring_digests_set = KEYS[2]
|
6
|
+
local schedule_set = KEYS[3]
|
7
|
+
local retry_set = KEYS[4]
|
8
|
+
-------- END keys ---------
|
9
|
+
|
10
|
+
-------- BEGIN argv ---------
|
11
|
+
local reaper_count = tonumber(ARGV[1])
|
12
|
+
local threshold = tonumber(ARGV[2])
|
13
|
+
-------- END argv ---------
|
14
|
+
|
15
|
+
-------- BEGIN injected arguments --------
|
16
|
+
local current_time = tonumber(ARGV[3])
|
17
|
+
local debug_lua = ARGV[4] == "true"
|
18
|
+
local max_history = tonumber(ARGV[5])
|
19
|
+
local script_name = ARGV[6] .. ".lua"
|
20
|
+
local redisversion = ARGV[7]
|
21
|
+
--------- END injected arguments ---------
|
22
|
+
|
23
|
+
|
24
|
+
-------- BEGIN local functions --------
|
25
|
+
<%= include_partial "shared/_common.lua" %>
|
26
|
+
<%= include_partial "shared/_find_digest_in_queues.lua" %>
|
27
|
+
<%= include_partial "shared/_find_digest_in_sorted_set.lua" %>
|
28
|
+
<%= include_partial "shared/_find_digest_in_process_set.lua" %>
|
29
|
+
---------- END local functions ----------
|
30
|
+
|
31
|
+
|
32
|
+
-------- BEGIN delete_orphaned.lua --------
|
33
|
+
log_debug("BEGIN")
|
34
|
+
local found = false
|
35
|
+
local per = 50
|
36
|
+
local total = redis.call("ZCARD", digests_set)
|
37
|
+
local index = 0
|
38
|
+
local del_count = 0
|
39
|
+
local redis_ver = toversion(redisversion)
|
40
|
+
local del_cmd = "DEL"
|
41
|
+
|
42
|
+
if tonumber(redis_ver["major"]) >= 4 then del_cmd = "UNLINK"; end
|
43
|
+
|
44
|
+
repeat
|
45
|
+
log_debug("Interating through:", digests_set, "for orphaned locks")
|
46
|
+
local digests = redis.call("ZREVRANGE", digests_set, index, index + per -1)
|
47
|
+
|
48
|
+
for _, digest in pairs(digests) do
|
49
|
+
log_debug("Searching for digest:", digest, "in", schedule_set)
|
50
|
+
found = find_digest_in_sorted_set(schedule_set, digest)
|
51
|
+
|
52
|
+
if found ~= true then
|
53
|
+
log_debug("Searching for digest:", digest, "in", retry_set)
|
54
|
+
found = find_digest_in_sorted_set(retry_set, digest)
|
55
|
+
end
|
56
|
+
|
57
|
+
if found ~= true then
|
58
|
+
log_debug("Searching for digest:", digest, "in all queues")
|
59
|
+
local queue = find_digest_in_queues(digest)
|
60
|
+
|
61
|
+
if queue then
|
62
|
+
log_debug("found digest:", digest, "in queue:", queue)
|
63
|
+
found = true
|
64
|
+
end
|
65
|
+
end
|
66
|
+
|
67
|
+
-- TODO: Add check for jobs checked out by process
|
68
|
+
if found ~= true then
|
69
|
+
log_debug("Searching for digest:", digest, "in process sets")
|
70
|
+
found = find_digest_in_process_set(digest, threshold)
|
71
|
+
end
|
72
|
+
|
73
|
+
if found ~= true then
|
74
|
+
local queued = digest .. ":QUEUED"
|
75
|
+
local primed = digest .. ":PRIMED"
|
76
|
+
local locked = digest .. ":LOCKED"
|
77
|
+
local info = digest .. ":INFO"
|
78
|
+
local run_digest = digest .. ":RUN"
|
79
|
+
local run_queued = digest .. ":RUN:QUEUED"
|
80
|
+
local run_primed = digest .. ":RUN:PRIMED"
|
81
|
+
local run_locked = digest .. ":RUN:LOCKED"
|
82
|
+
local run_info = digest .. ":RUN:INFO"
|
83
|
+
|
84
|
+
redis.call(del_cmd, digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
|
85
|
+
|
86
|
+
redis.call("ZREM", digests_set, digest)
|
87
|
+
del_count = del_count + 1
|
88
|
+
end
|
89
|
+
end
|
90
|
+
|
91
|
+
index = index + per
|
92
|
+
until index >= total or del_count >= reaper_count
|
93
|
+
|
94
|
+
if del_count < reaper_count then
|
95
|
+
index = 0
|
96
|
+
total = redis.call("ZCOUNT", expiring_digests_set, 0, current_time)
|
97
|
+
repeat
|
98
|
+
local digests = redis.call("ZRANGEBYSCORE", expiring_digests_set, 0, current_time, "LIMIT", index, index + per -1)
|
99
|
+
|
100
|
+
for _, digest in pairs(digests) do
|
101
|
+
local queued = digest .. ":QUEUED"
|
102
|
+
local primed = digest .. ":PRIMED"
|
103
|
+
local locked = digest .. ":LOCKED"
|
104
|
+
local info = digest .. ":INFO"
|
105
|
+
local run_digest = digest .. ":RUN"
|
106
|
+
local run_queued = digest .. ":RUN:QUEUED"
|
107
|
+
local run_primed = digest .. ":RUN:PRIMED"
|
108
|
+
local run_locked = digest .. ":RUN:LOCKED"
|
109
|
+
local run_info = digest .. ":RUN:INFO"
|
110
|
+
|
111
|
+
redis.call(del_cmd, digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
|
112
|
+
|
113
|
+
redis.call("ZREM", expiring_digests_set, digest)
|
114
|
+
del_count = del_count + 1
|
115
|
+
end
|
116
|
+
|
117
|
+
index = index + per
|
118
|
+
until index >= total or del_count >= reaper_count
|
119
|
+
end
|
120
|
+
|
121
|
+
log_debug("END")
|
122
|
+
return del_count
|
@@ -0,0 +1,40 @@
|
|
1
|
+
local function toversion(version)
|
2
|
+
local _, _, maj, min, pat = string.find(version, "(%d+)%.(%d+)%.(%d+)")
|
3
|
+
|
4
|
+
return {
|
5
|
+
["version"] = version,
|
6
|
+
["major"] = tonumber(maj),
|
7
|
+
["minor"] = tonumber(min),
|
8
|
+
["patch"] = tonumber(pat)
|
9
|
+
}
|
10
|
+
end
|
11
|
+
|
12
|
+
local function toboolean(val)
|
13
|
+
val = tostring(val)
|
14
|
+
return val == "1" or val == "true"
|
15
|
+
end
|
16
|
+
|
17
|
+
local function log_debug( ... )
|
18
|
+
if debug_lua ~= true then return end
|
19
|
+
|
20
|
+
local result = ""
|
21
|
+
for _,v in ipairs(arg) do
|
22
|
+
result = result .. " " .. tostring(v)
|
23
|
+
end
|
24
|
+
redis.log(redis.LOG_DEBUG, script_name .. " -" .. result)
|
25
|
+
end
|
26
|
+
|
27
|
+
local function log(message, prev_jid)
|
28
|
+
if not max_history or max_history == 0 then return end
|
29
|
+
local entry = cjson.encode({digest = digest, job_id = job_id, script = script_name, message = message, time = current_time, prev_jid = prev_jid })
|
30
|
+
|
31
|
+
log_debug("ZADD", changelog, current_time, entry);
|
32
|
+
redis.call("ZADD", changelog, current_time, entry);
|
33
|
+
local total_entries = redis.call("ZCARD", changelog)
|
34
|
+
local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, 0, -1 * max_history)
|
35
|
+
if removed_entries > 0 then
|
36
|
+
log_debug("Removing", removed_entries , "entries from changelog (total entries", total_entries, "exceeds max_history:", max_history ..")");
|
37
|
+
end
|
38
|
+
log_debug("PUBLISH", changelog, entry);
|
39
|
+
redis.call("PUBLISH", changelog, entry);
|
40
|
+
end
|
@@ -0,0 +1,22 @@
|
|
1
|
+
local function delete_from_queue(queue, digest)
|
2
|
+
local per = 50
|
3
|
+
local total = redis.call("LLEN", queue)
|
4
|
+
local index = 0
|
5
|
+
local result = nil
|
6
|
+
|
7
|
+
while (index < total) do
|
8
|
+
local items = redis.call("LRANGE", queue, index, index + per -1)
|
9
|
+
if #items == 0 then
|
10
|
+
break
|
11
|
+
end
|
12
|
+
for _, item in pairs(items) do
|
13
|
+
if string.find(item, digest) then
|
14
|
+
redis.call("LREM", queue, 1, item)
|
15
|
+
result = item
|
16
|
+
break
|
17
|
+
end
|
18
|
+
end
|
19
|
+
index = index + per
|
20
|
+
end
|
21
|
+
return result
|
22
|
+
end
|
@@ -0,0 +1,18 @@
|
|
1
|
+
local function delete_from_sorted_set(name, digest)
|
2
|
+
local per = 50
|
3
|
+
local total = redis.call("zcard", name)
|
4
|
+
local index = 0
|
5
|
+
local result
|
6
|
+
while (index < total) do
|
7
|
+
local items = redis.call("ZRANGE", name, index, index + per -1)
|
8
|
+
for _, item in pairs(items) do
|
9
|
+
if string.find(item, digest) then
|
10
|
+
redis.call("ZREM", name, item)
|
11
|
+
result = item
|
12
|
+
break
|
13
|
+
end
|
14
|
+
end
|
15
|
+
index = index + per
|
16
|
+
end
|
17
|
+
return result
|
18
|
+
end
|
@@ -0,0 +1,53 @@
|
|
1
|
+
local function find_digest_in_process_set(digest, threshold)
|
2
|
+
local process_cursor = 0
|
3
|
+
local job_cursor = 0
|
4
|
+
local pattern = "*" .. digest .. "*"
|
5
|
+
local found = false
|
6
|
+
|
7
|
+
log_debug("Searching in process list",
|
8
|
+
"for digest:", digest,
|
9
|
+
"cursor:", process_cursor)
|
10
|
+
|
11
|
+
repeat
|
12
|
+
local process_paginator = redis.call("SSCAN", "processes", process_cursor, "MATCH", "*")
|
13
|
+
local next_process_cursor = process_paginator[1]
|
14
|
+
local processes = process_paginator[2]
|
15
|
+
log_debug("Found number of processes:", #processes, "next cursor:", next_process_cursor)
|
16
|
+
|
17
|
+
for _, process in ipairs(processes) do
|
18
|
+
local workers_key = process .. ":work"
|
19
|
+
log_debug("searching in process set:", process,
|
20
|
+
"for digest:", digest,
|
21
|
+
"cursor:", process_cursor)
|
22
|
+
|
23
|
+
local jobs = redis.call("HGETALL", workers_key)
|
24
|
+
|
25
|
+
if #jobs == 0 then
|
26
|
+
log_debug("No entries in:", workers_key)
|
27
|
+
else
|
28
|
+
for i = 1, #jobs, 2 do
|
29
|
+
local jobstr = jobs[i +1]
|
30
|
+
if string.find(string.gsub(jobstr, ':RUN', ''), string.gsub(digest, ':RUN', '')) then
|
31
|
+
log_debug("Found digest", digest, "in:", workers_key)
|
32
|
+
found = true
|
33
|
+
break
|
34
|
+
end
|
35
|
+
|
36
|
+
local job = cjson.decode(jobstr)
|
37
|
+
if job.payload.created_at > threshold then
|
38
|
+
found = true
|
39
|
+
break
|
40
|
+
end
|
41
|
+
end
|
42
|
+
end
|
43
|
+
|
44
|
+
if found == true then
|
45
|
+
break
|
46
|
+
end
|
47
|
+
end
|
48
|
+
|
49
|
+
process_cursor = next_process_cursor
|
50
|
+
until found == true or process_cursor == "0"
|
51
|
+
|
52
|
+
return found
|
53
|
+
end
|
@@ -0,0 +1,43 @@
|
|
1
|
+
local function find_digest_in_queues(digest)
|
2
|
+
local cursor = "0"
|
3
|
+
local count = 50
|
4
|
+
local result = nil
|
5
|
+
local found = false
|
6
|
+
|
7
|
+
repeat
|
8
|
+
log_debug("searching all queues for a matching digest:", digest)
|
9
|
+
local pagination = redis.call("SCAN", cursor, "MATCH", "queue:*", "COUNT", count)
|
10
|
+
local next_cursor = pagination[1]
|
11
|
+
local queues = pagination[2]
|
12
|
+
|
13
|
+
for _, queue in ipairs(queues) do
|
14
|
+
local per = 50
|
15
|
+
local total = redis.call("LLEN", queue)
|
16
|
+
local index = 0
|
17
|
+
|
18
|
+
log_debug("searching in:", queue,
|
19
|
+
"for digest:", digest,
|
20
|
+
"from:", index,
|
21
|
+
"to:", total,
|
22
|
+
"(per: " .. per .. ")",
|
23
|
+
"cursor:", cursor)
|
24
|
+
|
25
|
+
while (index < total) do
|
26
|
+
local items = redis.call("LRANGE", queue, index, index + per -1)
|
27
|
+
for _, item in pairs(items) do
|
28
|
+
if string.find(item, digest) then
|
29
|
+
log_debug("Found digest:", digest, "in queue:", queue)
|
30
|
+
result = cjson.decode(item).queue
|
31
|
+
found = true
|
32
|
+
break
|
33
|
+
end
|
34
|
+
end
|
35
|
+
index = index + per
|
36
|
+
end
|
37
|
+
end
|
38
|
+
|
39
|
+
cursor = next_cursor
|
40
|
+
until found == true or cursor == "0"
|
41
|
+
|
42
|
+
return result
|
43
|
+
end
|
@@ -0,0 +1,24 @@
|
|
1
|
+
local function find_digest_in_sorted_set(name, digest)
|
2
|
+
local cursor = 0
|
3
|
+
local count = 5
|
4
|
+
local pattern = "*" .. digest .. "*"
|
5
|
+
local found = false
|
6
|
+
|
7
|
+
log_debug("searching in:", name,
|
8
|
+
"for digest:", digest,
|
9
|
+
"cursor:", cursor)
|
10
|
+
repeat
|
11
|
+
local pagination = redis.call("ZSCAN", name, cursor, "MATCH", pattern, "COUNT", count)
|
12
|
+
local next_cursor = pagination[1]
|
13
|
+
local items = pagination[2]
|
14
|
+
|
15
|
+
if #items > 0 then
|
16
|
+
log_debug("Found digest", digest, "in zset:", name)
|
17
|
+
found = true
|
18
|
+
end
|
19
|
+
|
20
|
+
cursor = next_cursor
|
21
|
+
until found == true or cursor == "0"
|
22
|
+
|
23
|
+
return found
|
24
|
+
end
|
@@ -0,0 +1,107 @@
|
|
1
|
+
-------- BEGIN keys ---------
|
2
|
+
local digest = KEYS[1]
|
3
|
+
local queued = KEYS[2]
|
4
|
+
local primed = KEYS[3]
|
5
|
+
local locked = KEYS[4]
|
6
|
+
local info = KEYS[5]
|
7
|
+
local changelog = KEYS[6]
|
8
|
+
local digests = KEYS[7]
|
9
|
+
-------- END keys ---------
|
10
|
+
|
11
|
+
|
12
|
+
-------- BEGIN lock arguments ---------
|
13
|
+
local job_id = ARGV[1]
|
14
|
+
local pttl = tonumber(ARGV[2])
|
15
|
+
local lock_type = ARGV[3]
|
16
|
+
local limit = tonumber(ARGV[4])
|
17
|
+
-------- END lock arguments -----------
|
18
|
+
|
19
|
+
|
20
|
+
-------- BEGIN injected arguments --------
|
21
|
+
local current_time = tonumber(ARGV[5])
|
22
|
+
local debug_lua = ARGV[6] == "true"
|
23
|
+
local max_history = tonumber(ARGV[7])
|
24
|
+
local script_name = tostring(ARGV[8]) .. ".lua"
|
25
|
+
local redisversion = ARGV[9]
|
26
|
+
--------- END injected arguments ---------
|
27
|
+
|
28
|
+
|
29
|
+
-------- BEGIN Variables --------
|
30
|
+
local queued_count = redis.call("LLEN", queued)
|
31
|
+
local primed_count = redis.call("LLEN", primed)
|
32
|
+
local locked_count = redis.call("HLEN", locked)
|
33
|
+
--------- END Variables ---------
|
34
|
+
|
35
|
+
|
36
|
+
-------- BEGIN local functions --------
|
37
|
+
<%= include_partial "shared/_common.lua" %>
|
38
|
+
---------- END local functions ----------
|
39
|
+
|
40
|
+
|
41
|
+
--------- Begin unlock.lua ---------
|
42
|
+
log_debug("BEGIN unlock digest:", digest, "(job_id: " .. job_id ..")")
|
43
|
+
|
44
|
+
log_debug("HEXISTS", locked, job_id)
|
45
|
+
if redis.call("HEXISTS", locked, job_id) == 0 then
|
46
|
+
-- TODO: Improve orphaned lock detection
|
47
|
+
if queued_count == 0 and primed_count == 0 and locked_count == 0 then
|
48
|
+
log_debug("Orphaned lock")
|
49
|
+
else
|
50
|
+
local result = ""
|
51
|
+
for i,v in ipairs(redis.call("HKEYS", locked)) do
|
52
|
+
result = result .. v .. ","
|
53
|
+
end
|
54
|
+
result = locked .. " (" .. result .. ")"
|
55
|
+
log("Yielding to: " .. result)
|
56
|
+
log_debug("Yielding to", result, locked, "by job", job_id)
|
57
|
+
return nil
|
58
|
+
end
|
59
|
+
end
|
60
|
+
|
61
|
+
-- Just in case something went wrong
|
62
|
+
log_debug("LREM", queued, -1, job_id)
|
63
|
+
redis.call("LREM", queued, -1, job_id)
|
64
|
+
|
65
|
+
log_debug("LREM", primed, -1, job_id)
|
66
|
+
redis.call("LREM", primed, -1, job_id)
|
67
|
+
|
68
|
+
local redis_version = toversion(redisversion)
|
69
|
+
local del_cmd = "DEL"
|
70
|
+
|
71
|
+
if tonumber(redis_version["major"]) >= 4 then del_cmd = "UNLINK"; end
|
72
|
+
|
73
|
+
if lock_type ~= "until_expired" then
|
74
|
+
log_debug(del_cmd, digest, info)
|
75
|
+
redis.call(del_cmd, digest, info)
|
76
|
+
|
77
|
+
log_debug("HDEL", locked, job_id)
|
78
|
+
redis.call("HDEL", locked, job_id)
|
79
|
+
end
|
80
|
+
|
81
|
+
local locked_count = redis.call("HLEN", locked)
|
82
|
+
|
83
|
+
if locked_count and locked_count < 1 then
|
84
|
+
log_debug(del_cmd, locked)
|
85
|
+
redis.call(del_cmd, locked)
|
86
|
+
end
|
87
|
+
|
88
|
+
if redis.call("LLEN", primed) == 0 then
|
89
|
+
log_debug(del_cmd, primed)
|
90
|
+
redis.call(del_cmd, primed)
|
91
|
+
end
|
92
|
+
|
93
|
+
if limit and limit <= 1 and locked_count and locked_count <= 1 then
|
94
|
+
log_debug("ZREM", digests, digest)
|
95
|
+
redis.call("ZREM", digests, digest)
|
96
|
+
end
|
97
|
+
|
98
|
+
log_debug("LPUSH", queued, "1")
|
99
|
+
redis.call("LPUSH", queued, "1")
|
100
|
+
|
101
|
+
log_debug("PEXPIRE", queued, 5000)
|
102
|
+
redis.call("PEXPIRE", queued, 5000)
|
103
|
+
|
104
|
+
log("Unlocked")
|
105
|
+
log_debug("END unlock digest:", digest, "(job_id: " .. job_id ..")")
|
106
|
+
return job_id
|
107
|
+
--------- END unlock.lua ---------
|
@@ -0,0 +1,40 @@
|
|
1
|
+
-------- BEGIN keys ---------
|
2
|
+
local live_key = KEYS[1]
|
3
|
+
local dead_key = KEYS[2]
|
4
|
+
-------- END keys ---------
|
5
|
+
|
6
|
+
-------- BEGIN lock arguments ---------
|
7
|
+
local version = ARGV[1]
|
8
|
+
-------- END lock arguments -----------
|
9
|
+
|
10
|
+
-------- BEGIN injected arguments --------
|
11
|
+
local current_time = tonumber(ARGV[2])
|
12
|
+
local debug_lua = ARGV[3] == "true"
|
13
|
+
local max_history = tonumber(ARGV[4])
|
14
|
+
local script_name = tostring(ARGV[5]) .. ".lua"
|
15
|
+
--------- END injected arguments ---------
|
16
|
+
|
17
|
+
-------- BEGIN local functions --------
|
18
|
+
<%= include_partial "shared/_common.lua" %>
|
19
|
+
---------- END local functions ----------
|
20
|
+
|
21
|
+
|
22
|
+
-------- BEGIN set_version.lua --------
|
23
|
+
log_debug("BEGIN setting version:", version)
|
24
|
+
|
25
|
+
local updated = false
|
26
|
+
local old_version = redis.call("GETSET", live_key, version)
|
27
|
+
|
28
|
+
if not old_version then
|
29
|
+
log_debug("No previous version found")
|
30
|
+
updated = true
|
31
|
+
elseif old_version ~= version then
|
32
|
+
log_debug("Old version:", old_version, "differs from:", version)
|
33
|
+
redis.call("SET", dead_key, old_version)
|
34
|
+
updated = true
|
35
|
+
end
|
36
|
+
|
37
|
+
return updated
|
38
|
+
-------- END delete.lua --------
|
39
|
+
|
40
|
+
|
@@ -0,0 +1,68 @@
|
|
1
|
+
redis.replicate_commands()
|
2
|
+
-------- BEGIN keys ---------
|
3
|
+
local live_version = KEYS[1]
|
4
|
+
local dead_version = KEYS[2]
|
5
|
+
-------- END keys ---------
|
6
|
+
|
7
|
+
-------- BEGIN injected arguments --------
|
8
|
+
local current_time = tonumber(ARGV[5])
|
9
|
+
local debug_lua = ARGV[6] == "true"
|
10
|
+
local max_history = tonumber(ARGV[7])
|
11
|
+
local script_name = tostring(ARGV[8]) .. ".lua"
|
12
|
+
local redisversion = ARGV[9]
|
13
|
+
--------- END injected arguments ---------
|
14
|
+
|
15
|
+
-------- BEGIN local functions --------
|
16
|
+
<%= include_partial "shared/_common.lua" %>
|
17
|
+
<%= include_partial "shared/_upgrades.lua" %>
|
18
|
+
---------- END local functions ----------
|
19
|
+
|
20
|
+
|
21
|
+
local new_version = redis.call("GET", live_version)
|
22
|
+
local old_version = redis.call("GET", dead_version)
|
23
|
+
local redis_version = toversion(redisversion)
|
24
|
+
local upgraded = 0
|
25
|
+
local del_cmd = "DEL"
|
26
|
+
|
27
|
+
if redis_version["major"] >= 4 then del_cmd = "UNLINK"; end
|
28
|
+
-------- BEGIN delete.lua --------
|
29
|
+
|
30
|
+
log_debug("BEGIN upgrading from: ", old_version, "to:", new_version)
|
31
|
+
|
32
|
+
-- 1. Loop through all uniquejobs:jajshdahdas:GRABBED
|
33
|
+
local cursor = "0"
|
34
|
+
local per = 50
|
35
|
+
repeat
|
36
|
+
local pagination = redis.call("SCAN", cursor, "MATCH", "*:GRABBED", "COUNT", per)
|
37
|
+
local next_cursor = pagination[1]
|
38
|
+
local grabbed_keys = pagination[2]
|
39
|
+
|
40
|
+
for _, grabbed in ipairs(grabbed_keys) do
|
41
|
+
local locked_key = grabbed.gsub(":GRABBED", ":LOCKED")
|
42
|
+
local locks = redis.call("HGETALL", grabbed)
|
43
|
+
|
44
|
+
if #locks == 0 then
|
45
|
+
log_debug("No entries in:", grabbed)
|
46
|
+
else
|
47
|
+
log_debug("HMSET", locked_key, unpack(locks))
|
48
|
+
redis.call("HMSET", locked_key, unpack(locks))
|
49
|
+
end
|
50
|
+
|
51
|
+
log_debug("DEL", grabbed)
|
52
|
+
redis.call("DEL", grabbed)
|
53
|
+
|
54
|
+
upgraded = upgraded + 1
|
55
|
+
end
|
56
|
+
|
57
|
+
cursor = next_cursor
|
58
|
+
if cursor == "0" then
|
59
|
+
log_debug("Looped through all grabbed keys, stopping iteration")
|
60
|
+
end
|
61
|
+
until cursor == "0"
|
62
|
+
|
63
|
+
|
64
|
+
|
65
|
+
log_debug("END upgrading from: ", old_version, "to:", new_version)
|
66
|
+
|
67
|
+
return
|
68
|
+
-------- END delete.lua --------
|