sidekiq-unique-jobs 6.0.25 → 7.0.0.beta2

Sign up to get free protection for your applications and to get access to all the features.

Potentially problematic release.


This version of sidekiq-unique-jobs might be problematic. Click here for more details.

Files changed (113) hide show
  1. checksums.yaml +4 -4
  2. data/CHANGELOG.md +155 -20
  3. data/README.md +349 -112
  4. data/lib/sidekiq-unique-jobs.rb +2 -0
  5. data/lib/sidekiq_unique_jobs.rb +43 -6
  6. data/lib/sidekiq_unique_jobs/batch_delete.rb +121 -0
  7. data/lib/sidekiq_unique_jobs/changelog.rb +71 -0
  8. data/lib/sidekiq_unique_jobs/cli.rb +20 -29
  9. data/lib/sidekiq_unique_jobs/config.rb +193 -0
  10. data/lib/sidekiq_unique_jobs/connection.rb +5 -4
  11. data/lib/sidekiq_unique_jobs/constants.rb +36 -24
  12. data/lib/sidekiq_unique_jobs/core_ext.rb +38 -0
  13. data/lib/sidekiq_unique_jobs/digests.rb +78 -93
  14. data/lib/sidekiq_unique_jobs/exceptions.rb +152 -8
  15. data/lib/sidekiq_unique_jobs/job.rb +3 -3
  16. data/lib/sidekiq_unique_jobs/json.rb +34 -0
  17. data/lib/sidekiq_unique_jobs/key.rb +93 -0
  18. data/lib/sidekiq_unique_jobs/lock.rb +295 -0
  19. data/lib/sidekiq_unique_jobs/lock/base_lock.rb +49 -43
  20. data/lib/sidekiq_unique_jobs/lock/client_validator.rb +28 -0
  21. data/lib/sidekiq_unique_jobs/lock/server_validator.rb +27 -0
  22. data/lib/sidekiq_unique_jobs/lock/until_and_while_executing.rb +8 -17
  23. data/lib/sidekiq_unique_jobs/lock/until_executed.rb +5 -5
  24. data/lib/sidekiq_unique_jobs/lock/until_expired.rb +1 -23
  25. data/lib/sidekiq_unique_jobs/lock/validator.rb +65 -0
  26. data/lib/sidekiq_unique_jobs/lock/while_executing.rb +12 -8
  27. data/lib/sidekiq_unique_jobs/lock/while_executing_reject.rb +1 -1
  28. data/lib/sidekiq_unique_jobs/lock_config.rb +95 -0
  29. data/lib/sidekiq_unique_jobs/lock_info.rb +68 -0
  30. data/lib/sidekiq_unique_jobs/locksmith.rb +255 -99
  31. data/lib/sidekiq_unique_jobs/logging.rb +148 -22
  32. data/lib/sidekiq_unique_jobs/logging/middleware_context.rb +44 -0
  33. data/lib/sidekiq_unique_jobs/lua/delete.lua +51 -0
  34. data/lib/sidekiq_unique_jobs/lua/delete_by_digest.lua +46 -0
  35. data/lib/sidekiq_unique_jobs/lua/delete_job_by_digest.lua +38 -0
  36. data/lib/sidekiq_unique_jobs/lua/find_digest_in_queues.lua +26 -0
  37. data/lib/sidekiq_unique_jobs/lua/find_digest_in_sorted_set.lua +24 -0
  38. data/lib/sidekiq_unique_jobs/lua/lock.lua +91 -0
  39. data/lib/sidekiq_unique_jobs/lua/locked.lua +35 -0
  40. data/lib/sidekiq_unique_jobs/lua/queue.lua +83 -0
  41. data/lib/sidekiq_unique_jobs/lua/reap_orphans.lua +86 -0
  42. data/lib/sidekiq_unique_jobs/lua/shared/_common.lua +40 -0
  43. data/lib/sidekiq_unique_jobs/lua/shared/_current_time.lua +8 -0
  44. data/lib/sidekiq_unique_jobs/lua/shared/_delete_from_queue.lua +19 -0
  45. data/lib/sidekiq_unique_jobs/lua/shared/_delete_from_sorted_set.lua +18 -0
  46. data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_queues.lua +46 -0
  47. data/lib/sidekiq_unique_jobs/lua/shared/_find_digest_in_sorted_set.lua +24 -0
  48. data/lib/sidekiq_unique_jobs/lua/shared/_hgetall.lua +13 -0
  49. data/lib/sidekiq_unique_jobs/lua/shared/_upgrades.lua +3 -0
  50. data/lib/sidekiq_unique_jobs/lua/shared/find_digest_in_sorted_set.lua +24 -0
  51. data/lib/sidekiq_unique_jobs/lua/unlock.lua +99 -0
  52. data/lib/sidekiq_unique_jobs/lua/update_version.lua +40 -0
  53. data/lib/sidekiq_unique_jobs/lua/upgrade.lua +68 -0
  54. data/lib/sidekiq_unique_jobs/middleware.rb +62 -31
  55. data/lib/sidekiq_unique_jobs/middleware/client.rb +42 -0
  56. data/lib/sidekiq_unique_jobs/middleware/server.rb +27 -0
  57. data/lib/sidekiq_unique_jobs/normalizer.rb +3 -3
  58. data/lib/sidekiq_unique_jobs/on_conflict.rb +22 -9
  59. data/lib/sidekiq_unique_jobs/on_conflict/log.rb +8 -4
  60. data/lib/sidekiq_unique_jobs/on_conflict/reject.rb +59 -13
  61. data/lib/sidekiq_unique_jobs/on_conflict/replace.rb +42 -13
  62. data/lib/sidekiq_unique_jobs/on_conflict/reschedule.rb +4 -4
  63. data/lib/sidekiq_unique_jobs/on_conflict/strategy.rb +24 -5
  64. data/lib/sidekiq_unique_jobs/options_with_fallback.rb +47 -23
  65. data/lib/sidekiq_unique_jobs/orphans/manager.rb +100 -0
  66. data/lib/sidekiq_unique_jobs/orphans/observer.rb +42 -0
  67. data/lib/sidekiq_unique_jobs/orphans/reaper.rb +201 -0
  68. data/lib/sidekiq_unique_jobs/profiler.rb +51 -0
  69. data/lib/sidekiq_unique_jobs/redis.rb +11 -0
  70. data/lib/sidekiq_unique_jobs/redis/entity.rb +94 -0
  71. data/lib/sidekiq_unique_jobs/redis/hash.rb +56 -0
  72. data/lib/sidekiq_unique_jobs/redis/list.rb +32 -0
  73. data/lib/sidekiq_unique_jobs/redis/set.rb +32 -0
  74. data/lib/sidekiq_unique_jobs/redis/sorted_set.rb +59 -0
  75. data/lib/sidekiq_unique_jobs/redis/string.rb +49 -0
  76. data/lib/sidekiq_unique_jobs/rspec/matchers.rb +19 -0
  77. data/lib/sidekiq_unique_jobs/rspec/matchers/have_valid_sidekiq_options.rb +43 -0
  78. data/lib/sidekiq_unique_jobs/{scripts.rb → script.rb} +43 -29
  79. data/lib/sidekiq_unique_jobs/script/caller.rb +125 -0
  80. data/lib/sidekiq_unique_jobs/script/template.rb +41 -0
  81. data/lib/sidekiq_unique_jobs/sidekiq_unique_ext.rb +92 -65
  82. data/lib/sidekiq_unique_jobs/sidekiq_unique_jobs.rb +166 -28
  83. data/lib/sidekiq_unique_jobs/sidekiq_worker_methods.rb +10 -11
  84. data/lib/sidekiq_unique_jobs/testing.rb +47 -15
  85. data/lib/sidekiq_unique_jobs/time_calculator.rb +103 -0
  86. data/lib/sidekiq_unique_jobs/timing.rb +58 -0
  87. data/lib/sidekiq_unique_jobs/unique_args.rb +19 -21
  88. data/lib/sidekiq_unique_jobs/unlockable.rb +11 -2
  89. data/lib/sidekiq_unique_jobs/update_version.rb +25 -0
  90. data/lib/sidekiq_unique_jobs/upgrade_locks.rb +151 -0
  91. data/lib/sidekiq_unique_jobs/version.rb +3 -1
  92. data/lib/sidekiq_unique_jobs/version_check.rb +1 -1
  93. data/lib/sidekiq_unique_jobs/web.rb +25 -19
  94. data/lib/sidekiq_unique_jobs/web/helpers.rb +98 -6
  95. data/lib/sidekiq_unique_jobs/web/views/lock.erb +108 -0
  96. data/lib/sidekiq_unique_jobs/web/views/locks.erb +52 -0
  97. data/lib/tasks/changelog.rake +4 -3
  98. metadata +70 -35
  99. data/lib/sidekiq_unique_jobs/client/middleware.rb +0 -56
  100. data/lib/sidekiq_unique_jobs/server/middleware.rb +0 -46
  101. data/lib/sidekiq_unique_jobs/timeout.rb +0 -8
  102. data/lib/sidekiq_unique_jobs/timeout/calculator.rb +0 -63
  103. data/lib/sidekiq_unique_jobs/util.rb +0 -103
  104. data/lib/sidekiq_unique_jobs/web/views/unique_digest.erb +0 -28
  105. data/lib/sidekiq_unique_jobs/web/views/unique_digests.erb +0 -46
  106. data/redis/acquire_lock.lua +0 -21
  107. data/redis/convert_legacy_lock.lua +0 -13
  108. data/redis/delete.lua +0 -14
  109. data/redis/delete_by_digest.lua +0 -23
  110. data/redis/delete_job_by_digest.lua +0 -60
  111. data/redis/lock.lua +0 -62
  112. data/redis/release_stale_locks.lua +0 -90
  113. data/redis/unlock.lua +0 -35
@@ -0,0 +1,91 @@
1
+ -------- BEGIN keys ---------
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ -------- END keys ---------
10
+
11
+ -------- BEGIN lock arguments ---------
12
+ local job_id = ARGV[1]
13
+ local pttl = tonumber(ARGV[2])
14
+ local lock_type = ARGV[3]
15
+ local limit = tonumber(ARGV[4])
16
+ -------- END lock arguments -----------
17
+
18
+ -------- BEGIN injected arguments --------
19
+ local current_time = tonumber(ARGV[5])
20
+ local debug_lua = ARGV[6] == "true"
21
+ local max_history = tonumber(ARGV[7])
22
+ local script_name = tostring(ARGV[8]) .. ".lua"
23
+ local redisversion = ARGV[9]
24
+ --------- END injected arguments ---------
25
+
26
+ -------- BEGIN local functions --------
27
+ <%= include_partial "shared/_common.lua" %>
28
+ ---------- END local functions ----------
29
+
30
+
31
+ --------- BEGIN lock.lua ---------
32
+ log_debug("BEGIN lock digest:", digest, "job_id:", job_id)
33
+
34
+ if redis.call("HEXISTS", locked, job_id) == 1 then
35
+ log_debug(locked, "already locked with job_id:", job_id)
36
+ log("Duplicate")
37
+
38
+ log_debug("LREM", queued, -1, job_id)
39
+ redis.call("LREM", queued, -1, job_id)
40
+
41
+ log_debug("LREM", primed, 1, job_id)
42
+ redis.call("LREM", primed, 1, job_id)
43
+
44
+ return job_id
45
+ end
46
+
47
+ local locked_count = redis.call("HLEN", locked)
48
+ local within_limit = limit > locked_count
49
+ local limit_exceeded = not within_limit
50
+
51
+ if limit_exceeded then
52
+ log_debug("Limit exceeded:", digest, "(", locked_count, "of", limit, ")")
53
+ log("Limited")
54
+ return nil
55
+ end
56
+
57
+ log_debug("ZADD", digests, current_time, digest)
58
+ redis.call("ZADD", digests, current_time, digest)
59
+
60
+ log_debug("HSET", locked, job_id, current_time)
61
+ redis.call("HSET", locked, job_id, current_time)
62
+
63
+ log_debug("LREM", queued, -1, job_id)
64
+ redis.call("LREM", queued, -1, job_id)
65
+
66
+ log_debug("LREM", primed, 1, job_id)
67
+ redis.call("LREM", primed, 1, job_id)
68
+
69
+ -- The Sidekiq client should only set pttl for until_expired
70
+ -- The Sidekiq server should set pttl for all other jobs
71
+ if lock_type == "until_expired" and pttl and pttl > 0 then
72
+ log_debug("PEXPIRE", digest, pttl)
73
+ redis.call("PEXPIRE", digest, pttl)
74
+
75
+ log_debug("PEXPIRE", queued, pttl)
76
+ redis.call("PEXPIRE", queued, pttl)
77
+
78
+ log_debug("PEXPIRE", primed, pttl)
79
+ redis.call("PEXPIRE", primed, pttl)
80
+
81
+ log_debug("PEXPIRE", locked, pttl)
82
+ redis.call("PEXPIRE", locked, pttl)
83
+
84
+ log_debug("PEXPIRE", info, pttl)
85
+ redis.call("PEXPIRE", info, pttl)
86
+ end
87
+
88
+ log("Locked")
89
+ log_debug("END lock digest:", digest, "job_id:", job_id)
90
+ return job_id
91
+ ---------- END lock.lua ----------
@@ -0,0 +1,35 @@
1
+ -------- BEGIN keys ---------
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ -------- END keys ---------
10
+
11
+ -------- BEGIN lock arguments ---------
12
+ local job_id = ARGV[1]
13
+ -------- END lock arguments -----------
14
+
15
+ -------- BEGIN injected arguments --------
16
+ local current_time = tonumber(ARGV[2])
17
+ local debug_lua = ARGV[3] == "true"
18
+ local max_history = tonumber(ARGV[4])
19
+ local script_name = tostring(ARGV[5]) .. ".lua"
20
+ --------- END injected arguments ---------
21
+
22
+ -------- BEGIN local functions --------
23
+ <%= include_partial "shared/_common.lua" %>
24
+ --------- END local functions ---------
25
+
26
+
27
+ -------- BEGIN locked.lua --------
28
+ if redis.call("HEXISTS", locked, job_id) == 1 then
29
+ log_debug("Locked", digest, "job_id:", job_id)
30
+ return 1
31
+ else
32
+ log_debug("NOT Locked", digest, "job_id:", job_id)
33
+ return -1
34
+ end
35
+ --------- END locked.lua ---------
@@ -0,0 +1,83 @@
1
+ -------- BEGIN keys ---------
2
+ local digest = KEYS[1]
3
+ local queued = KEYS[2]
4
+ local primed = KEYS[3]
5
+ local locked = KEYS[4]
6
+ local info = KEYS[5]
7
+ local changelog = KEYS[6]
8
+ local digests = KEYS[7]
9
+ -------- END keys ---------
10
+
11
+ -------- BEGIN lock arguments ---------
12
+ local job_id = ARGV[1] -- The job_id that was previously primed
13
+ local pttl = tonumber(ARGV[2])
14
+ local lock_type = ARGV[3]
15
+ local limit = tonumber(ARGV[4])
16
+ -------- END lock arguments -----------
17
+
18
+ -------- BEGIN injected arguments --------
19
+ local current_time = tonumber(ARGV[5])
20
+ local debug_lua = ARGV[6] == "true"
21
+ local max_history = tonumber(ARGV[7])
22
+ local script_name = tostring(ARGV[8]) .. ".lua"
23
+ --------- END injected arguments ---------
24
+
25
+ -------- BEGIN Variables --------
26
+ local queued_count = redis.call("LLEN", queued)
27
+ local locked_count = redis.call("HLEN", locked)
28
+ local within_limit = limit > locked_count
29
+ local limit_exceeded = not within_limit
30
+ -------- END Variables --------
31
+
32
+ -------- BEGIN local functions --------
33
+ <%= include_partial "shared/_common.lua" %>
34
+ ---------- END local functions ----------
35
+
36
+
37
+ -------- BEGIN queue.lua --------
38
+ log_debug("BEGIN queue with key:", digest, "for job:", job_id)
39
+
40
+ if redis.call("HEXISTS", locked, job_id) == 1 then
41
+ log_debug("HEXISTS", locked, job_id, "== 1")
42
+ log("Duplicate")
43
+ return job_id
44
+ end
45
+
46
+ local prev_jid = redis.call("GET", digest)
47
+ log_debug("job_id:", job_id, "prev_jid:", prev_jid)
48
+ if not prev_jid or prev_jid == false then
49
+ log_debug("SET", digest, job_id)
50
+ redis.call("SET", digest, job_id)
51
+ elseif prev_jid == job_id then
52
+ log_debug(digest, "already queued with job_id:", job_id)
53
+ log("Duplicate")
54
+ return job_id
55
+ else
56
+ -- TODO: Consider constraining the total count of both locked and queued?
57
+ if within_limit and queued_count < limit then
58
+ log_debug("Within limit:", digest, "(", locked_count, "of", limit, ")", "queued (", queued_count, "of", limit, ")")
59
+ log_debug("SET", digest, job_id, "(was", prev_jid, ")")
60
+ redis.call("SET", digest, job_id)
61
+ else
62
+ log_debug("Limit exceeded:", digest, "(", locked_count, "of", limit, ")")
63
+ log("Limit exceeded", prev_jid)
64
+ return prev_jid
65
+ end
66
+ end
67
+
68
+ log_debug("LPUSH", queued, job_id)
69
+ redis.call("LPUSH", queued, job_id)
70
+
71
+ -- The Sidekiq client should only set pttl for until_expired
72
+ -- The Sidekiq server should set pttl for all other jobs
73
+ if lock_type == "until_expired" and pttl > 0 then
74
+ log_debug("PEXPIRE", digest, pttl)
75
+ redis.call("PEXPIRE", digest, pttl)
76
+ log_debug("PEXPIRE", queued, pttl)
77
+ redis.call("PEXPIRE", queued, pttl)
78
+ end
79
+
80
+ log("Queued")
81
+ log_debug("END queue with key:", digest, "for job:", job_id)
82
+ return job_id
83
+ -------- END queue.lua --------
@@ -0,0 +1,86 @@
1
+ redis.replicate_commands()
2
+
3
+ -------- BEGIN keys ---------
4
+ local digests_set = KEYS[1]
5
+ local schedule_set = KEYS[2]
6
+ local retry_set = KEYS[3]
7
+ -------- END keys ---------
8
+
9
+ -------- BEGIN argv ---------
10
+ local reaper_count = tonumber(ARGV[1])
11
+ -------- END argv ---------
12
+
13
+ -------- BEGIN injected arguments --------
14
+ local current_time = tonumber(ARGV[2])
15
+ local debug_lua = ARGV[3] == "true"
16
+ local max_history = tonumber(ARGV[4])
17
+ local script_name = ARGV[5] .. ".lua"
18
+ local redisversion = ARGV[6]
19
+ --------- END injected arguments ---------
20
+
21
+
22
+ -------- BEGIN local functions --------
23
+ <%= include_partial "shared/_common.lua" %>
24
+ <%= include_partial "shared/_find_digest_in_queues.lua" %>
25
+ <%= include_partial "shared/_find_digest_in_sorted_set.lua" %>
26
+ ---------- END local functions ----------
27
+
28
+
29
+ -------- BEGIN delete_orphaned.lua --------
30
+ log_debug("BEGIN")
31
+ local found = false
32
+ local per = 50
33
+ local total = redis.call("ZCARD", digests_set)
34
+ local index = 0
35
+ local del_count = 0
36
+ local redis_ver = toversion(redisversion)
37
+ local del_cmd = "DEL"
38
+
39
+ if tonumber(redis_ver["major"]) >= 4 then del_cmd = "UNLINK"; end
40
+
41
+ repeat
42
+ log_debug("Interating through:", digests_set, "for orphaned locks")
43
+ local digests = redis.call("ZREVRANGE", digests_set, index, index + per -1)
44
+
45
+ for _, digest in pairs(digests) do
46
+ log_debug("Searching for digest:", digest, "in", schedule_set)
47
+ found = find_digest_in_sorted_set(schedule_set, digest)
48
+
49
+ if found ~= true then
50
+ log_debug("Searching for digest:", digest, "in", retry_set)
51
+ found = find_digest_in_sorted_set(retry_set, digest)
52
+ end
53
+
54
+ if found ~= true then
55
+ log_debug("Searching for digest:", digest, "in all queues")
56
+ local queue = find_digest_in_queues(digest)
57
+
58
+ if queue then
59
+ log_debug("found digest:", digest, "in queue:", queue)
60
+ found = true
61
+ end
62
+ end
63
+
64
+ if found ~= true then
65
+ local queued = digest .. ":QUEUED"
66
+ local primed = digest .. ":PRIMED"
67
+ local locked = digest .. ":LOCKED"
68
+ local info = digest .. ":INFO"
69
+ local run_digest = digest .. ":RUN"
70
+ local run_queued = digest .. ":RUN:QUEUED"
71
+ local run_primed = digest .. ":RUN:PRIMED"
72
+ local run_locked = digest .. ":RUN:LOCKED"
73
+ local run_info = digest .. ":RUN:INFO"
74
+
75
+ redis.call(del_cmd, digest, queued, primed, locked, info, run_digest, run_queued, run_primed, run_locked, run_info)
76
+
77
+ redis.call("ZREM", digests_set, digest)
78
+ del_count = del_count + 1
79
+ end
80
+ end
81
+
82
+ index = index + per
83
+ until index >= total or del_count >= reaper_count
84
+
85
+ log_debug("END")
86
+ return del_count
@@ -0,0 +1,40 @@
1
+ local function toversion(version)
2
+ local _, _, maj, min, pat = string.find(version, "(%d+)%.(%d+)%.(%d+)")
3
+
4
+ return {
5
+ ["version"] = version,
6
+ ["major"] = tonumber(maj),
7
+ ["minor"] = tonumber(min),
8
+ ["patch"] = tonumber(pat)
9
+ }
10
+ end
11
+
12
+ local function toboolean(val)
13
+ val = tostring(val)
14
+ return val == "1" or val == "true"
15
+ end
16
+
17
+ local function log_debug( ... )
18
+ if debug_lua ~= true then return end
19
+
20
+ local result = ""
21
+ for _,v in ipairs(arg) do
22
+ result = result .. " " .. tostring(v)
23
+ end
24
+ redis.log(redis.LOG_DEBUG, script_name .. " -" .. result)
25
+ end
26
+
27
+ local function log(message, prev_jid)
28
+ if not max_history or max_history == 0 then return end
29
+ local entry = cjson.encode({digest = digest, job_id = job_id, script = script_name, message = message, time = current_time, prev_jid = prev_jid })
30
+
31
+ log_debug("ZADD", changelog, current_time, entry);
32
+ redis.call("ZADD", changelog, current_time, entry);
33
+ local total_entries = redis.call("ZCARD", changelog)
34
+ local removed_entries = redis.call("ZREMRANGEBYRANK", changelog, max_history, -1)
35
+ if removed_entries > 0 then
36
+ log_debug("Removing", removed_entries , "entries from changelog (total entries", total_entries, "exceeds max_history:", max_history ..")");
37
+ end
38
+ log_debug("PUBLISH", changelog, entry);
39
+ redis.call("PUBLISH", changelog, entry);
40
+ end
@@ -0,0 +1,8 @@
1
+ local function current_time()
2
+ local time = redis.call("time")
3
+ local s = time[1]
4
+ local ms = time[2]
5
+ local number = tonumber((s .. "." .. ms))
6
+
7
+ return number
8
+ end
@@ -0,0 +1,19 @@
1
+ local function delete_from_queue(queue, digest)
2
+ local per = 50
3
+ local total = redis.call("LLEN", queue)
4
+ local index = 0
5
+ local result = nil
6
+
7
+ while (index < total) do
8
+ local items = redis.call("LRANGE", queue, index, index + per -1)
9
+ for _, item in pairs(items) do
10
+ if string.find(item, digest) then
11
+ redis.call("LREM", queue, 1, item)
12
+ result = item
13
+ break
14
+ end
15
+ end
16
+ index = index + per
17
+ end
18
+ return result
19
+ end
@@ -0,0 +1,18 @@
1
+ local function delete_from_sorted_set(name, digest)
2
+ local per = 50
3
+ local total = redis.call("zcard", name)
4
+ local index = 0
5
+ local result
6
+ while (index < total) do
7
+ local items = redis.call("ZRANGE", name, index, index + per -1)
8
+ for _, item in pairs(items) do
9
+ if string.find(item, digest) then
10
+ redis.call("ZREM", name, item)
11
+ result = item
12
+ break
13
+ end
14
+ end
15
+ index = index + per
16
+ end
17
+ return result
18
+ end
@@ -0,0 +1,46 @@
1
+ local function find_digest_in_queues(digest)
2
+ local cursor = "0"
3
+ local count = 50
4
+ local result = nil
5
+ local found = false
6
+
7
+ repeat
8
+ log_debug("searching all queues for a matching digest:", digest)
9
+ local pagination = redis.call("SCAN", cursor, "MATCH", "*queue:*", "COUNT", count)
10
+ local next_cursor = pagination[1]
11
+ local queues = pagination[2]
12
+
13
+ for _, queue in ipairs(queues) do
14
+ local per = 50
15
+ local total = redis.call("LLEN", queue)
16
+ local index = 0
17
+
18
+ log_debug("searching in:", queue,
19
+ "for digest:", digest,
20
+ "from:", index,
21
+ "to:", total,
22
+ "(per: " .. per .. ")",
23
+ "cursor:", cursor)
24
+
25
+ while (index < total) do
26
+ local items = redis.call("LRANGE", queue, index, index + per -1)
27
+ for _, item in pairs(items) do
28
+ if string.find(item, digest) then
29
+ log_debug("Found digest:", digest, "in queue:", queue)
30
+ result = cjson.decode(item).queue
31
+ found = true
32
+ break
33
+ end
34
+ end
35
+ index = index + per
36
+ end
37
+
38
+ cursor = next_cursor
39
+ if cursor == "0" then
40
+ log_debug("Looped through all queues, stopping iteration")
41
+ end
42
+ end
43
+ until found == true or cursor == "0"
44
+
45
+ return result
46
+ end
@@ -0,0 +1,24 @@
1
+ local function find_digest_in_sorted_set(name, digest)
2
+ local cursor = 0
3
+ local count = 5
4
+ local pattern = "*" .. digest .. "*"
5
+ local found = false
6
+
7
+ log_debug("searching in:", name,
8
+ "for digest:", digest,
9
+ "cursor:", cursor)
10
+ repeat
11
+ local pagination = redis.call("ZSCAN", name, cursor, "MATCH", pattern, "COUNT", count)
12
+ local next_cursor = pagination[1]
13
+ local items = pagination[2]
14
+
15
+ if #items > 0 then
16
+ log_debug("Found digest", digest, "in zset:", name)
17
+ found = true
18
+ end
19
+
20
+ cursor = next_cursor
21
+ until found == true or cursor == "0"
22
+
23
+ return found
24
+ end